summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bfdd/bfd.h16
-rw-r--r--bfdd/bfd_packet.c38
-rw-r--r--bfdd/bfdd_nb.c2
-rw-r--r--bfdd/bfdd_nb.h3
-rw-r--r--bfdd/bfdd_nb_config.c38
-rw-r--r--bgpd/bgp_attr.c15
-rw-r--r--bgpd/bgp_attr.h2
-rw-r--r--bgpd/bgp_clist.c67
-rw-r--r--bgpd/bgp_clist.h3
-rw-r--r--bgpd/bgp_ecommunity.c15
-rw-r--r--bgpd/bgp_ecommunity.h2
-rw-r--r--bgpd/bgp_evpn.c507
-rw-r--r--bgpd/bgp_evpn.h11
-rw-r--r--bgpd/bgp_evpn_private.h9
-rw-r--r--bgpd/bgp_evpn_vty.c200
-rw-r--r--bgpd/bgp_flowspec.c7
-rw-r--r--bgpd/bgp_fsm.c7
-rw-r--r--bgpd/bgp_fsm.h9
-rw-r--r--bgpd/bgp_label.c17
-rw-r--r--bgpd/bgp_label.h4
-rw-r--r--bgpd/bgp_labelpool.c31
-rw-r--r--bgpd/bgp_labelpool.h1
-rw-r--r--bgpd/bgp_mac.c19
-rw-r--r--bgpd/bgp_mplsvpn.c563
-rw-r--r--bgpd/bgp_mplsvpn.h76
-rw-r--r--bgpd/bgp_network.c5
-rw-r--r--bgpd/bgp_nexthop.c1
-rw-r--r--bgpd/bgp_nexthop.h5
-rw-r--r--bgpd/bgp_nht.c11
-rw-r--r--bgpd/bgp_packet.c1
-rw-r--r--bgpd/bgp_route.c313
-rw-r--r--bgpd/bgp_route.h40
-rw-r--r--bgpd/bgp_table.h1
-rw-r--r--bgpd/bgp_updgrp.c1
-rw-r--r--bgpd/bgp_updgrp_adv.c11
-rw-r--r--bgpd/bgp_updgrp_packet.c23
-rw-r--r--bgpd/bgp_vty.c119
-rw-r--r--bgpd/bgp_vty.h2
-rw-r--r--bgpd/bgp_zebra.c23
-rw-r--r--bgpd/bgp_zebra.h3
-rw-r--r--bgpd/bgpd.c80
-rw-r--r--bgpd/bgpd.h29
-rw-r--r--configure.ac2
-rw-r--r--debian/changelog10
-rw-r--r--doc/developer/building-docker.rst48
-rw-r--r--doc/developer/process-architecture.rst44
-rw-r--r--doc/developer/topotests.rst8
-rw-r--r--doc/developer/workflow.rst21
-rw-r--r--doc/user/bgp.rst93
-rw-r--r--docker/ubuntu-ci/Dockerfile125
-rwxr-xr-xdocker/ubuntu-ci/docker-start (renamed from docker/ubuntu18-ci/docker-start)0
-rw-r--r--docker/ubuntu18-ci/Dockerfile73
-rw-r--r--docker/ubuntu18-ci/README.md44
-rw-r--r--docker/ubuntu20-ci/Dockerfile78
-rw-r--r--docker/ubuntu20-ci/README.md16
-rwxr-xr-xdocker/ubuntu20-ci/docker-start8
-rw-r--r--docker/ubuntu22-ci/README.md57
-rw-r--r--isisd/isis_tlvs.c12
-rw-r--r--ldpd/control.c18
-rw-r--r--ldpd/init.c91
-rw-r--r--ldpd/ldp_debug.c9
-rw-r--r--ldpd/log.c12
-rw-r--r--ldpd/logmsg.c6
-rw-r--r--ldpd/notification.c73
-rw-r--r--ldpd/pfkey.c6
-rw-r--r--lib/mgmt_be_client.c136
-rw-r--r--lib/mgmt_be_client.h10
-rw-r--r--lib/mgmt_fe_client.c42
-rw-r--r--lib/mgmt_fe_client.h11
-rw-r--r--lib/mgmt_msg.c6
-rw-r--r--lib/northbound.c7
-rw-r--r--lib/northbound_cli.c13
-rw-r--r--lib/vty.c63
-rw-r--r--lib/vty.h11
-rw-r--r--mgmtd/mgmt_be_adapter.c48
-rw-r--r--mgmtd/mgmt_be_adapter.h54
-rw-r--r--mgmtd/mgmt_ds.c6
-rw-r--r--mgmtd/mgmt_fe_adapter.c63
-rw-r--r--mgmtd/mgmt_history.c2
-rw-r--r--mgmtd/mgmt_history.h2
-rw-r--r--mgmtd/mgmt_txn.c132
-rw-r--r--mgmtd/mgmt_txn.h14
-rw-r--r--mgmtd/mgmt_vty.c8
-rw-r--r--ospfd/ospf_interface.c18
-rw-r--r--ospfd/ospf_route.c2
-rw-r--r--ospfd/ospf_vty.c178
-rw-r--r--pbrd/pbr_zebra.c21
-rw-r--r--pimd/pim6_cmd.c2
-rw-r--r--pimd/pim_iface.c58
-rw-r--r--pimd/pim_iface.h2
-rw-r--r--pimd/pim_igmpv3.c11
-rw-r--r--pimd/pim_nb_config.c92
-rw-r--r--pimd/pim_register.c2
-rw-r--r--pimd/pim_tib.c2
-rw-r--r--pimd/pim_upstream.c4
-rw-r--r--redhat/frr.spec.in30
-rw-r--r--staticd/static_main.c4
-rw-r--r--tests/topotests/babel_topo1/r1/babeld.conf1
-rw-r--r--tests/topotests/babel_topo1/r2/babeld.conf1
-rw-r--r--tests/topotests/babel_topo1/r3/babeld.conf1
-rw-r--r--tests/topotests/bgp_always_compare_med/bgp_always_compare_med_topo1.json152
-rw-r--r--tests/topotests/bgp_always_compare_med/test_bgp_always_compare_med_topo1.py1118
-rw-r--r--tests/topotests/bgp_default_originate_timer/__init__.py0
-rw-r--r--tests/topotests/bgp_default_originate_timer/r1/bgpd.conf18
-rw-r--r--tests/topotests/bgp_default_originate_timer/r1/zebra.conf7
-rw-r--r--tests/topotests/bgp_default_originate_timer/r2/bgpd.conf6
-rw-r--r--tests/topotests/bgp_default_originate_timer/r2/zebra.conf4
-rw-r--r--tests/topotests/bgp_default_originate_timer/r3/bgpd.conf12
-rw-r--r--tests/topotests/bgp_default_originate_timer/r3/zebra.conf7
-rw-r--r--tests/topotests/bgp_default_originate_timer/test_bgp_default_originate_timer.py123
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/P1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/P1/ospfd.conf13
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/P1/zebra.conf7
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/bgp.l2vpn.evpn.vni.json19
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/bgpd.conf18
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/evpn.vni.json17
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/ospfd.conf9
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/zebra.conf8
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/bgp.l2vpn.evpn.vni.json19
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/bgpd.conf18
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/evpn.vni.json16
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/ospfd.conf9
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/zebra.conf6
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host1/ospfd.conf1
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host1/zebra.conf3
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host2/bgpd.conf1
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host2/ospfd.conf1
-rw-r--r--tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host2/zebra.conf3
-rwxr-xr-xtests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/test_bgp_evpn_vxlan_macvrf_soo.py839
-rw-r--r--tests/topotests/bgp_flowspec/test_bgp_flowspec_topo.py6
-rw-r--r--tests/topotests/bgp_gr_functionality_topo3/test_bgp_gr_functionality_topo3.py (renamed from tests/topotests/bgp_gr_functionality_topo3/bgp_gr_functionality_topo3.py)0
-rw-r--r--tests/topotests/bgp_local_as_dotplus_private_remove/test_bgp_local_as_dotplus_private_remove.py63
-rw-r--r--tests/topotests/bgp_peer_group/r3/bgpd.conf12
-rw-r--r--tests/topotests/bgp_peer_group/test_bgp_peer-group.py21
-rw-r--r--tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py4
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/__init__.py0
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/h1/zebra.conf7
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/h2/zebra.conf6
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/h3/zebra.conf6
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/r1/bgp_ipv4_routes.json49
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/r1/bgpd.conf29
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/r1/zebra.conf10
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/r2/bgpd.conf31
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/r2/ipv4_vpn_summary.json24
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/r2/zebra.conf13
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/r3/bgpd.conf25
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/r3/zebra.conf14
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/rr100/bgpd.conf29
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/rr100/zebra.conf7
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/rs200/bgpd.conf19
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/rs200/zebra.conf4
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/test_bgp_vpnv4_asbr.py912
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r1/bgpd.conf34
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes.json68
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_all.json175
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_no_retain_init.json (renamed from tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_unfiltered.json)54
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_no_retain_init_plus_r2_vrf2.json148
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_no_retain_init_plus_r2_vrf3.json148
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r1/isisd.conf2
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r1/zebra.conf11
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r2/bgpd.conf43
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r2/ipv4_vpn_routes_all.json177
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r2/ipv4_vpn_summary.json17
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r2/isisd.conf2
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r2/zebra.conf16
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py397
-rw-r--r--tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py65
-rwxr-xr-xtests/topotests/conftest.py79
-rw-r--r--tests/topotests/lib/bgp.py8
-rw-r--r--tests/topotests/lib/common_config.py369
-rwxr-xr-xtests/topotests/lib/mcast-tester.py42
-rw-r--r--tests/topotests/lib/micronet_compat.py4
-rw-r--r--tests/topotests/lib/pim.py12
-rw-r--r--tests/topotests/lib/topogen.py13
-rw-r--r--tests/topotests/lib/topolog.py160
-rw-r--r--tests/topotests/lib/topotest.py10
-rw-r--r--tests/topotests/mgmt_config/r1/early-end-zebra.conf6
-rw-r--r--tests/topotests/mgmt_config/r1/early-end.conf8
-rw-r--r--tests/topotests/mgmt_config/r1/early-end2-zebra.conf7
-rw-r--r--tests/topotests/mgmt_config/r1/early-end2.conf9
-rw-r--r--tests/topotests/mgmt_config/r1/early-exit-zebra.conf6
-rw-r--r--tests/topotests/mgmt_config/r1/early-exit.conf8
-rw-r--r--tests/topotests/mgmt_config/r1/early-exit2-zebra.conf7
-rw-r--r--tests/topotests/mgmt_config/r1/early-exit2.conf9
-rw-r--r--tests/topotests/mgmt_config/r1/mgmtd.conf11
-rw-r--r--tests/topotests/mgmt_config/r1/normal-exit.conf8
-rw-r--r--tests/topotests/mgmt_config/r1/one-exit-zebra.conf3
-rw-r--r--tests/topotests/mgmt_config/r1/one-exit.conf3
-rw-r--r--tests/topotests/mgmt_config/r1/one-exit2-zebra.conf4
-rw-r--r--tests/topotests/mgmt_config/r1/one-exit2.conf4
-rw-r--r--tests/topotests/mgmt_config/r1/zebra.conf7
-rw-r--r--tests/topotests/mgmt_config/test_config.py385
-rw-r--r--tests/topotests/mgmt_startup/test_bigconf.py4
-rw-r--r--tests/topotests/mgmt_startup/test_cfgfile_var.py (renamed from tests/topotests/mgmt_startup/test_config.py)0
-rw-r--r--tests/topotests/mgmt_startup/test_late_bigconf.py20
-rw-r--r--tests/topotests/mgmt_startup/util.py4
-rw-r--r--tests/topotests/multicast_mld_join_topo1/test_multicast_mld_local_join.py70
-rw-r--r--tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm1.py160
-rw-r--r--tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm2.py98
-rwxr-xr-xtests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py119
-rwxr-xr-xtests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py87
-rw-r--r--tests/topotests/rip_topo1/test_rip_topo1.py61
-rw-r--r--tests/topotests/ripng_topo1/test_ripng_topo1.py63
-rwxr-xr-xtools/frr-reload.py9
-rw-r--r--vrrpd/vrrp_vty.c1
-rw-r--r--vtysh/vtysh.c7
-rw-r--r--zebra/interface.c8
-rw-r--r--zebra/interface.h3
-rw-r--r--zebra/main.c11
-rw-r--r--zebra/rib.h7
-rw-r--r--zebra/rule_netlink.c4
-rw-r--r--zebra/zapi_msg.c16
-rw-r--r--zebra/zebra_dplane.c5
214 files changed, 8801 insertions, 2483 deletions
diff --git a/bfdd/bfd.h b/bfdd/bfd.h
index 5451e66c23..6c5a1e9216 100644
--- a/bfdd/bfd.h
+++ b/bfdd/bfd.h
@@ -32,6 +32,11 @@ DECLARE_MGROUP(BFDD);
DECLARE_MTYPE(BFDD_CONTROL);
DECLARE_MTYPE(BFDD_NOTIFICATION);
+/* bfd Authentication Type. */
+#define BFD_AUTH_NULL 0
+#define BFD_AUTH_SIMPLE 1
+#define BFD_AUTH_CRYPTOGRAPHIC 2
+
struct bfd_timers {
uint32_t desired_min_tx;
uint32_t required_min_rx;
@@ -61,6 +66,15 @@ struct bfd_pkt {
};
/*
+ * Format of authentification.
+ */
+struct bfd_auth {
+ uint8_t type;
+ uint8_t length;
+};
+
+
+/*
* Format of Echo packet.
*/
struct bfd_echo_pkt {
@@ -79,7 +93,7 @@ struct bfd_echo_pkt {
/* Macros for manipulating control packets */
-#define BFD_VERMASK 0x03
+#define BFD_VERMASK 0x07
#define BFD_DIAGMASK 0x1F
#define BFD_GETVER(diag) ((diag >> 5) & BFD_VERMASK)
#define BFD_SETVER(diag, val) ((diag) |= (val & BFD_VERMASK) << 5)
diff --git a/bfdd/bfd_packet.c b/bfdd/bfd_packet.c
index ea7a1038ae..0c72ee7581 100644
--- a/bfdd/bfd_packet.c
+++ b/bfdd/bfd_packet.c
@@ -768,6 +768,37 @@ static void cp_debug(bool mhop, struct sockaddr_any *peer,
mhop ? "yes" : "no", peerstr, localstr, portstr, vrfstr);
}
+static bool bfd_check_auth(const struct bfd_session *bfd,
+ const struct bfd_pkt *cp)
+{
+ if (CHECK_FLAG(cp->flags, BFD_ABIT)) {
+ /* RFC5880 4.1: Authentication Section is present. */
+ struct bfd_auth *auth = (struct bfd_auth *)(cp + 1);
+ uint16_t pkt_auth_type = ntohs(auth->type);
+
+ if (cp->len < BFD_PKT_LEN + sizeof(struct bfd_auth))
+ return false;
+
+ if (cp->len < BFD_PKT_LEN + auth->length)
+ return false;
+
+ switch (pkt_auth_type) {
+ case BFD_AUTH_NULL:
+ return false;
+ case BFD_AUTH_SIMPLE:
+ /* RFC5880 6.7: To be finshed. */
+ return false;
+ case BFD_AUTH_CRYPTOGRAPHIC:
+ /* RFC5880 6.7: To be finshed. */
+ return false;
+ default:
+ /* RFC5880 6.7: To be finshed. */
+ return false;
+ }
+ }
+ return true;
+}
+
void bfd_recv_cb(struct event *t)
{
int sd = EVENT_FD(t);
@@ -932,6 +963,13 @@ void bfd_recv_cb(struct event *t)
bfd->discrs.remote_discr = ntohl(cp->discrs.my_discr);
+ /* Check authentication. */
+ if (!bfd_check_auth(bfd, cp)) {
+ cp_debug(is_mhop, &peer, &local, ifindex, vrfid,
+ "Authentication failed");
+ return;
+ }
+
/* Save remote diagnostics before state switch. */
bfd->remote_diag = cp->diag & BFD_DIAGMASK;
diff --git a/bfdd/bfdd_nb.c b/bfdd/bfdd_nb.c
index 7135c50763..114fbc2bdd 100644
--- a/bfdd/bfdd_nb.c
+++ b/bfdd/bfdd_nb.c
@@ -74,7 +74,6 @@ const struct frr_yang_module_info frr_bfdd_info = {
.xpath = "/frr-bfdd:bfdd/bfd/profile/minimum-ttl",
.cbs = {
.modify = bfdd_bfd_profile_minimum_ttl_modify,
- .destroy = bfdd_bfd_profile_minimum_ttl_destroy,
.cli_show = bfd_cli_show_minimum_ttl,
}
},
@@ -361,7 +360,6 @@ const struct frr_yang_module_info frr_bfdd_info = {
.xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/minimum-ttl",
.cbs = {
.modify = bfdd_bfd_sessions_multi_hop_minimum_ttl_modify,
- .destroy = bfdd_bfd_sessions_multi_hop_minimum_ttl_destroy,
.cli_show = bfd_cli_show_minimum_ttl,
}
},
diff --git a/bfdd/bfdd_nb.h b/bfdd/bfdd_nb.h
index 7a0e724d28..b5b00b57e4 100644
--- a/bfdd/bfdd_nb.h
+++ b/bfdd/bfdd_nb.h
@@ -25,7 +25,6 @@ int bfdd_bfd_profile_required_receive_interval_modify(
int bfdd_bfd_profile_administrative_down_modify(struct nb_cb_modify_args *args);
int bfdd_bfd_profile_passive_mode_modify(struct nb_cb_modify_args *args);
int bfdd_bfd_profile_minimum_ttl_modify(struct nb_cb_modify_args *args);
-int bfdd_bfd_profile_minimum_ttl_destroy(struct nb_cb_destroy_args *args);
int bfdd_bfd_profile_echo_mode_modify(struct nb_cb_modify_args *args);
int bfdd_bfd_profile_desired_echo_transmission_interval_modify(
struct nb_cb_modify_args *args);
@@ -128,8 +127,6 @@ int bfdd_bfd_sessions_multi_hop_administrative_down_modify(
struct nb_cb_modify_args *args);
int bfdd_bfd_sessions_multi_hop_minimum_ttl_modify(
struct nb_cb_modify_args *args);
-int bfdd_bfd_sessions_multi_hop_minimum_ttl_destroy(
- struct nb_cb_destroy_args *args);
struct yang_data *
bfdd_bfd_sessions_multi_hop_stats_local_discriminator_get_elem(
struct nb_cb_get_elem_args *args);
diff --git a/bfdd/bfdd_nb_config.c b/bfdd/bfdd_nb_config.c
index e4e97404d8..8cf2f0a6f1 100644
--- a/bfdd/bfdd_nb_config.c
+++ b/bfdd/bfdd_nb_config.c
@@ -423,20 +423,6 @@ int bfdd_bfd_profile_minimum_ttl_modify(struct nb_cb_modify_args *args)
return NB_OK;
}
-int bfdd_bfd_profile_minimum_ttl_destroy(struct nb_cb_destroy_args *args)
-{
- struct bfd_profile *bp;
-
- if (args->event != NB_EV_APPLY)
- return NB_OK;
-
- bp = nb_running_get_entry(args->dnode, NULL, true);
- bp->minimum_ttl = BFD_DEF_MHOP_TTL;
- bfd_profile_update(bp);
-
- return NB_OK;
-}
-
/*
* XPath: /frr-bfdd:bfdd/bfd/profile/echo-mode
*/
@@ -859,27 +845,3 @@ int bfdd_bfd_sessions_multi_hop_minimum_ttl_modify(
return NB_OK;
}
-
-int bfdd_bfd_sessions_multi_hop_minimum_ttl_destroy(
- struct nb_cb_destroy_args *args)
-{
- struct bfd_session *bs;
-
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- return NB_OK;
-
- case NB_EV_APPLY:
- break;
-
- case NB_EV_ABORT:
- return NB_OK;
- }
-
- bs = nb_running_get_entry(args->dnode, NULL, true);
- bs->peer_profile.minimum_ttl = BFD_DEF_MHOP_TTL;
- bfd_session_apply(bs);
-
- return NB_OK;
-}
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index ec9f12d61a..221605d985 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -5189,3 +5189,18 @@ enum bgp_attr_parse_ret bgp_attr_ignore(struct peer *peer, uint8_t type)
return withdraw ? BGP_ATTR_PARSE_WITHDRAW : BGP_ATTR_PARSE_PROCEED;
}
+
+bool route_matches_soo(struct bgp_path_info *pi, struct ecommunity *soo)
+{
+ struct attr *attr = pi->attr;
+ struct ecommunity *ecom;
+
+ if (!CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES)))
+ return false;
+
+ ecom = attr->ecommunity;
+ if (!ecom || !ecom->size)
+ return false;
+
+ return soo_in_ecom(ecom, soo);
+}
diff --git a/bgpd/bgp_attr.h b/bgpd/bgp_attr.h
index 1c7e88a4f9..6cd34d301c 100644
--- a/bgpd/bgp_attr.h
+++ b/bgpd/bgp_attr.h
@@ -637,4 +637,6 @@ bgp_attr_set_vnc_subtlvs(struct attr *attr,
#endif
}
+extern bool route_matches_soo(struct bgp_path_info *pi, struct ecommunity *soo);
+
#endif /* _QUAGGA_BGP_ATTR_H */
diff --git a/bgpd/bgp_clist.c b/bgpd/bgp_clist.c
index ac5cdd6acb..f3c308afb9 100644
--- a/bgpd/bgp_clist.c
+++ b/bgpd/bgp_clist.c
@@ -659,9 +659,6 @@ bool community_list_match(struct community *com, struct community_list *list)
struct community_entry *entry;
for (entry = list->head; entry; entry = entry->next) {
- if (entry->any)
- return entry->direct == COMMUNITY_PERMIT;
-
if (entry->style == COMMUNITY_LIST_STANDARD) {
if (community_include(entry->u.com, COMMUNITY_INTERNET))
return entry->direct == COMMUNITY_PERMIT;
@@ -681,9 +678,6 @@ bool lcommunity_list_match(struct lcommunity *lcom, struct community_list *list)
struct community_entry *entry;
for (entry = list->head; entry; entry = entry->next) {
- if (entry->any)
- return entry->direct == COMMUNITY_PERMIT;
-
if (entry->style == LARGE_COMMUNITY_LIST_STANDARD) {
if (lcommunity_match(lcom, entry->u.lcom))
return entry->direct == COMMUNITY_PERMIT;
@@ -705,9 +699,6 @@ bool lcommunity_list_exact_match(struct lcommunity *lcom,
struct community_entry *entry;
for (entry = list->head; entry; entry = entry->next) {
- if (entry->any)
- return entry->direct == COMMUNITY_PERMIT;
-
if (entry->style == LARGE_COMMUNITY_LIST_STANDARD) {
if (lcommunity_cmp(lcom, entry->u.lcom))
return entry->direct == COMMUNITY_PERMIT;
@@ -724,9 +715,6 @@ bool ecommunity_list_match(struct ecommunity *ecom, struct community_list *list)
struct community_entry *entry;
for (entry = list->head; entry; entry = entry->next) {
- if (entry->any)
- return entry->direct == COMMUNITY_PERMIT;
-
if (entry->style == EXTCOMMUNITY_LIST_STANDARD) {
if (ecommunity_match(ecom, entry->u.ecom))
return entry->direct == COMMUNITY_PERMIT;
@@ -746,9 +734,6 @@ bool community_list_exact_match(struct community *com,
struct community_entry *entry;
for (entry = list->head; entry; entry = entry->next) {
- if (entry->any)
- return entry->direct == COMMUNITY_PERMIT;
-
if (entry->style == COMMUNITY_LIST_STANDARD) {
if (community_include(entry->u.com, COMMUNITY_INTERNET))
return entry->direct == COMMUNITY_PERMIT;
@@ -781,28 +766,18 @@ struct community *community_list_match_delete(struct community *com,
val = community_val_get(com, i);
for (entry = list->head; entry; entry = entry->next) {
- if (entry->any) {
- if (entry->direct == COMMUNITY_PERMIT) {
- com_index_to_delete[delete_index] = i;
- delete_index++;
- }
- break;
- }
-
- else if ((entry->style == COMMUNITY_LIST_STANDARD)
- && (community_include(entry->u.com,
- COMMUNITY_INTERNET)
- || community_include(entry->u.com, val))) {
+ if ((entry->style == COMMUNITY_LIST_STANDARD) &&
+ (community_include(entry->u.com,
+ COMMUNITY_INTERNET) ||
+ community_include(entry->u.com, val))) {
if (entry->direct == COMMUNITY_PERMIT) {
com_index_to_delete[delete_index] = i;
delete_index++;
}
break;
- }
-
- else if ((entry->style == COMMUNITY_LIST_EXPANDED)
- && community_regexp_include(entry->reg, com,
- i)) {
+ } else if ((entry->style == COMMUNITY_LIST_EXPANDED) &&
+ community_regexp_include(entry->reg, com,
+ i)) {
if (entry->direct == COMMUNITY_PERMIT) {
com_index_to_delete[delete_index] = i;
delete_index++;
@@ -836,12 +811,6 @@ static bool community_list_dup_check(struct community_list *list,
if (entry->direct != new->direct)
continue;
- if (entry->any != new->any)
- continue;
-
- if (entry->any)
- return true;
-
switch (entry->style) {
case COMMUNITY_LIST_STANDARD:
if (community_cmp(entry->u.com, new->u.com))
@@ -910,7 +879,6 @@ int community_list_set(struct community_list_handler *ch, const char *name,
entry = community_entry_new();
entry->direct = direct;
entry->style = style;
- entry->any = (str ? false : true);
entry->u.com = com;
entry->reg = regex;
entry->seq = seqnum;
@@ -987,16 +955,8 @@ struct lcommunity *lcommunity_list_match_delete(struct lcommunity *lcom,
for (i = 0; i < lcom->size; i++) {
ptr = lcom->val + (i * LCOMMUNITY_SIZE);
for (entry = list->head; entry; entry = entry->next) {
- if (entry->any) {
- if (entry->direct == COMMUNITY_PERMIT) {
- com_index_to_delete[delete_index] = i;
- delete_index++;
- }
- break;
- }
-
- else if ((entry->style == LARGE_COMMUNITY_LIST_STANDARD)
- && lcommunity_include(entry->u.lcom, ptr)) {
+ if ((entry->style == LARGE_COMMUNITY_LIST_STANDARD) &&
+ lcommunity_include(entry->u.lcom, ptr)) {
if (entry->direct == COMMUNITY_PERMIT) {
com_index_to_delete[delete_index] = i;
delete_index++;
@@ -1004,9 +964,10 @@ struct lcommunity *lcommunity_list_match_delete(struct lcommunity *lcom,
break;
}
- else if ((entry->style == LARGE_COMMUNITY_LIST_EXPANDED)
- && lcommunity_regexp_include(entry->reg, lcom,
- i)) {
+ else if ((entry->style ==
+ LARGE_COMMUNITY_LIST_EXPANDED) &&
+ lcommunity_regexp_include(entry->reg, lcom,
+ i)) {
if (entry->direct == COMMUNITY_PERMIT) {
com_index_to_delete[delete_index] = i;
delete_index++;
@@ -1125,7 +1086,6 @@ int lcommunity_list_set(struct community_list_handler *ch, const char *name,
entry = community_entry_new();
entry->direct = direct;
entry->style = style;
- entry->any = (str ? false : true);
entry->u.lcom = lcom;
entry->reg = regex;
entry->seq = seqnum;
@@ -1246,7 +1206,6 @@ int extcommunity_list_set(struct community_list_handler *ch, const char *name,
entry = community_entry_new();
entry->direct = direct;
entry->style = style;
- entry->any = false;
if (ecom)
entry->config = ecommunity_ecom2str(
ecom, ECOMMUNITY_FORMAT_COMMUNITY_LIST, 0);
diff --git a/bgpd/bgp_clist.h b/bgpd/bgp_clist.h
index 7a9b28038c..8e5d637bab 100644
--- a/bgpd/bgp_clist.h
+++ b/bgpd/bgp_clist.h
@@ -65,9 +65,6 @@ struct community_entry {
/* Standard or expanded. */
uint8_t style;
- /* Any match. */
- bool any;
-
/* Sequence number. */
int64_t seq;
diff --git a/bgpd/bgp_ecommunity.c b/bgpd/bgp_ecommunity.c
index a555930137..29b2250747 100644
--- a/bgpd/bgp_ecommunity.c
+++ b/bgpd/bgp_ecommunity.c
@@ -1765,3 +1765,18 @@ struct ecommunity *ecommunity_replace_linkbw(as_t as, struct ecommunity *ecom,
return new;
}
+
+bool soo_in_ecom(struct ecommunity *ecom, struct ecommunity *soo)
+{
+ if (ecom && soo) {
+ if ((ecommunity_lookup(ecom, ECOMMUNITY_ENCODE_AS,
+ ECOMMUNITY_SITE_ORIGIN) ||
+ ecommunity_lookup(ecom, ECOMMUNITY_ENCODE_AS4,
+ ECOMMUNITY_SITE_ORIGIN) ||
+ ecommunity_lookup(ecom, ECOMMUNITY_ENCODE_IP,
+ ECOMMUNITY_SITE_ORIGIN)) &&
+ ecommunity_include(ecom, soo))
+ return true;
+ }
+ return false;
+}
diff --git a/bgpd/bgp_ecommunity.h b/bgpd/bgp_ecommunity.h
index 94a178bbb6..d62dc2e84c 100644
--- a/bgpd/bgp_ecommunity.h
+++ b/bgpd/bgp_ecommunity.h
@@ -360,6 +360,8 @@ extern struct ecommunity *ecommunity_replace_linkbw(as_t as,
uint64_t cum_bw,
bool disable_ieee_floating);
+extern bool soo_in_ecom(struct ecommunity *ecom, struct ecommunity *soo);
+
static inline void ecommunity_strip_rts(struct ecommunity *ecom)
{
uint8_t subtype = ECOMMUNITY_ROUTE_TARGET;
diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c
index fc8889c175..625b7e59dc 100644
--- a/bgpd/bgp_evpn.c
+++ b/bgpd/bgp_evpn.c
@@ -41,6 +41,7 @@
#include "bgpd/bgp_nht.h"
#include "bgpd/bgp_trace.h"
#include "bgpd/bgp_mpath.h"
+#include "bgpd/bgp_packet.h"
/*
* Definitions and external declarations.
@@ -48,6 +49,7 @@
DEFINE_QOBJ_TYPE(bgpevpn);
DEFINE_QOBJ_TYPE(bgp_evpn_es);
+DEFINE_MTYPE_STATIC(BGPD, BGP_EVPN_INFO, "BGP EVPN instance information");
DEFINE_MTYPE_STATIC(BGPD, VRF_ROUTE_TARGET, "L3 Route Target");
/*
@@ -1050,7 +1052,8 @@ static void build_evpn_type5_route_extcomm(struct bgp *bgp_vrf,
* type-2 routes.
*/
static void build_evpn_route_extcomm(struct bgpevpn *vpn, struct attr *attr,
- int add_l3_ecomm)
+ int add_l3_ecomm,
+ struct ecommunity *macvrf_soo)
{
struct ecommunity ecom_encap;
struct ecommunity ecom_sticky;
@@ -1147,6 +1150,11 @@ static void build_evpn_route_extcomm(struct bgpevpn *vpn, struct attr *attr,
attr, ecommunity_merge(bgp_attr_get_ecommunity(attr),
&ecom_na));
}
+
+ /* Add MAC-VRF SoO, if configured */
+ if (macvrf_soo)
+ bgp_attr_set_ecommunity(
+ attr, ecommunity_merge(attr->ecommunity, macvrf_soo));
}
/*
@@ -2068,6 +2076,7 @@ static int update_evpn_route(struct bgp *bgp, struct bgpevpn *vpn,
int route_change;
bool old_is_sync = false;
bool mac_only = false;
+ struct ecommunity *macvrf_soo = NULL;
memset(&attr, 0, sizeof(attr));
@@ -2125,8 +2134,11 @@ static int update_evpn_route(struct bgp *bgp, struct bgpevpn *vpn,
add_l3_ecomm = bgp_evpn_route_add_l3_ecomm_ok(
vpn, p, (attr.es_flags & ATTR_ES_IS_LOCAL) ? &attr.esi : NULL);
+ if (bgp->evpn_info)
+ macvrf_soo = bgp->evpn_info->soo;
+
/* Set up extended community. */
- build_evpn_route_extcomm(vpn, &attr, add_l3_ecomm);
+ build_evpn_route_extcomm(vpn, &attr, add_l3_ecomm, macvrf_soo);
/* First, create (or fetch) route node within the VNI.
* NOTE: There is no RD here.
@@ -2333,6 +2345,7 @@ void bgp_evpn_update_type2_route_entry(struct bgp *bgp, struct bgpevpn *vpn,
struct prefix_evpn evp;
int route_change;
bool old_is_sync = false;
+ struct ecommunity *macvrf_soo = NULL;
if (CHECK_FLAG(local_pi->flags, BGP_PATH_REMOVED))
return;
@@ -2380,8 +2393,11 @@ void bgp_evpn_update_type2_route_entry(struct bgp *bgp, struct bgpevpn *vpn,
vpn, &evp,
(attr.es_flags & ATTR_ES_IS_LOCAL) ? &attr.esi : NULL);
+ if (bgp->evpn_info)
+ macvrf_soo = bgp->evpn_info->soo;
+
/* Set up extended community. */
- build_evpn_route_extcomm(vpn, &attr, add_l3_ecomm);
+ build_evpn_route_extcomm(vpn, &attr, add_l3_ecomm, macvrf_soo);
seq = mac_mobility_seqnum(local_pi->attr);
if (bgp_debug_zebra(NULL)) {
@@ -2673,6 +2689,21 @@ int update_routes_for_vni(struct bgp *bgp, struct bgpevpn *vpn)
return 0;
}
+/* Update Type-2/3 Routes for L2VNI.
+ * Called by hash_iterate()
+ */
+static void update_routes_for_vni_hash(struct hash_bucket *bucket,
+ struct bgp *bgp)
+{
+ struct bgpevpn *vpn;
+
+ if (!bucket)
+ return;
+
+ vpn = (struct bgpevpn *)bucket->data;
+ update_routes_for_vni(bgp, vpn);
+}
+
/*
* Delete (and withdraw) local routes for specified VNI from the global
* table and per-VNI table. After this, remove all other routes from
@@ -2720,43 +2751,60 @@ static int bgp_evpn_mcast_grp_change(struct bgp *bgp, struct bgpevpn *vpn,
}
/*
- * There is a tunnel endpoint IP address change for this VNI, delete
- * prior type-3 route (if needed) and update.
+ * If there is a tunnel endpoint IP address (VTEP-IP) change for this VNI.
+ - Deletes tip_hash entry for old VTEP-IP
+ - Adds tip_hash entry/refcount for new VTEP-IP
+ - Deletes prior type-3 route for L2VNI (if needed)
+ - Updates originator_ip
* Note: Route re-advertisement happens elsewhere after other processing
* other changes.
*/
-static void handle_tunnel_ip_change(struct bgp *bgp, struct bgpevpn *vpn,
+static void handle_tunnel_ip_change(struct bgp *bgp_vrf, struct bgp *bgp_evpn,
+ struct bgpevpn *vpn,
struct in_addr originator_ip)
{
struct prefix_evpn p;
+ struct in_addr old_vtep_ip;
+
+ if (bgp_vrf) /* L3VNI */
+ old_vtep_ip = bgp_vrf->originator_ip;
+ else /* L2VNI */
+ old_vtep_ip = vpn->originator_ip;
- if (IPV4_ADDR_SAME(&vpn->originator_ip, &originator_ip))
+ /* TIP didn't change, nothing to do */
+ if (IPV4_ADDR_SAME(&old_vtep_ip, &originator_ip))
return;
- /* If VNI is not live, we only need to update the originator ip */
- if (!is_vni_live(vpn)) {
+ /* If L2VNI is not live, we only need to update the originator_ip.
+ * L3VNIs are updated immediately, so we can't bail out early.
+ */
+ if (!bgp_vrf && !is_vni_live(vpn)) {
vpn->originator_ip = originator_ip;
return;
}
/* Update the tunnel-ip hash */
- bgp_tip_del(bgp, &vpn->originator_ip);
- if (bgp_tip_add(bgp, &originator_ip))
+ bgp_tip_del(bgp_evpn, &old_vtep_ip);
+ if (bgp_tip_add(bgp_evpn, &originator_ip))
/* The originator_ip was not already present in the
* bgp martian next-hop table as a tunnel-ip, so we
* need to go back and filter routes matching the new
* martian next-hop.
*/
- bgp_filter_evpn_routes_upon_martian_nh_change(bgp);
+ bgp_filter_evpn_routes_upon_martian_change(bgp_evpn,
+ BGP_MARTIAN_TUN_IP);
- /* Need to withdraw type-3 route as the originator IP is part
- * of the key.
- */
- build_evpn_type3_prefix(&p, vpn->originator_ip);
- delete_evpn_route(bgp, vpn, &p);
+ if (!bgp_vrf) {
+ /* Need to withdraw type-3 route as the originator IP is part
+ * of the key.
+ */
+ build_evpn_type3_prefix(&p, vpn->originator_ip);
+ delete_evpn_route(bgp_evpn, vpn, &p);
+
+ vpn->originator_ip = originator_ip;
+ } else
+ bgp_vrf->originator_ip = originator_ip;
- /* Update the tunnel IP and re-advertise all routes for this VNI. */
- vpn->originator_ip = originator_ip;
return;
}
@@ -3271,6 +3319,9 @@ static int uninstall_evpn_route_entry_in_vrf(struct bgp *bgp_vrf,
bgp_aggregate_decrement(bgp_vrf, bgp_dest_get_prefix(dest), pi, afi,
safi);
+ /* Force deletion */
+ SET_FLAG(dest->flags, BGP_NODE_PROCESS_CLEAR);
+
/* Mark entry for deletion */
bgp_path_info_delete(dest, pi);
@@ -3366,7 +3417,7 @@ static int uninstall_evpn_route_entry(struct bgp *bgp, struct bgpevpn *vpn,
/*
* Given a route entry and a VRF, see if this route entry should be
- * imported into the VRF i.e., RTs match.
+ * imported into the VRF i.e., RTs match + Site-of-Origin check passes.
*/
static int is_route_matching_for_vrf(struct bgp *bgp_vrf,
struct bgp_path_info *pi)
@@ -3498,6 +3549,41 @@ static int is_route_matching_for_vni(struct bgp *bgp, struct bgpevpn *vpn,
return 0;
}
+static bool bgp_evpn_route_matches_macvrf_soo(struct bgp_path_info *pi,
+ const struct prefix_evpn *evp)
+{
+ struct bgp *bgp_evpn = bgp_get_evpn();
+ struct ecommunity *macvrf_soo;
+ bool ret = false;
+
+ if (!bgp_evpn->evpn_info)
+ return false;
+
+ /* We only stamp the mac-vrf soo on routes from our local L2VNI.
+ * No need to filter additional EVPN routes that originated outside
+ * the MAC-VRF/L2VNI.
+ */
+ if (evp->prefix.route_type != BGP_EVPN_MAC_IP_ROUTE &&
+ evp->prefix.route_type != BGP_EVPN_IMET_ROUTE)
+ return false;
+
+ macvrf_soo = bgp_evpn->evpn_info->soo;
+ ret = route_matches_soo(pi, macvrf_soo);
+
+ if (ret && bgp_debug_zebra(NULL)) {
+ char *ecom_str;
+
+ ecom_str = ecommunity_ecom2str(macvrf_soo,
+ ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
+ zlog_debug(
+ "import of evpn prefix %pFX skipped, local mac-vrf soo %s",
+ evp, ecom_str);
+ ecommunity_strfree(&ecom_str);
+ }
+
+ return ret;
+}
+
/* This API will scan evpn routes for checking attribute's rmac
* macthes with bgp instance router mac. It avoid installing
* route into bgp vrf table and remote rmac in bridge table.
@@ -3583,8 +3669,9 @@ int bgp_evpn_route_entry_install_if_vrf_match(struct bgp *bgp_vrf,
return 0;
/* don't import hosts that are locally attached */
- if (install && bgp_evpn_skip_vrf_import_of_local_es(
- bgp_vrf, evp, pi, install))
+ if (install && (bgp_evpn_skip_vrf_import_of_local_es(
+ bgp_vrf, evp, pi, install) ||
+ bgp_evpn_route_matches_macvrf_soo(pi, evp)))
return 0;
if (install)
@@ -3713,30 +3800,35 @@ static int install_uninstall_routes_for_vni(struct bgp *bgp,
&& pi->sub_type == BGP_ROUTE_NORMAL))
continue;
- if (is_route_matching_for_vni(bgp, vpn, pi)) {
- if (install)
- ret = install_evpn_route_entry(
- bgp, vpn, evp, pi);
- else
- ret = uninstall_evpn_route_entry(
- bgp, vpn, evp, pi);
-
- if (ret) {
- flog_err(
- EC_BGP_EVPN_FAIL,
- "%u: Failed to %s EVPN %s route in VNI %u",
- bgp->vrf_id,
- install ? "install"
- : "uninstall",
- rtype == BGP_EVPN_MAC_IP_ROUTE
- ? "MACIP"
- : "IMET",
- vpn->vni);
-
- bgp_dest_unlock_node(rd_dest);
- bgp_dest_unlock_node(dest);
- return ret;
- }
+ if (!is_route_matching_for_vni(bgp, vpn, pi))
+ continue;
+
+ if (install) {
+ if (bgp_evpn_route_matches_macvrf_soo(
+ pi, evp))
+ continue;
+
+ ret = install_evpn_route_entry(bgp, vpn,
+ evp, pi);
+ } else
+ ret = uninstall_evpn_route_entry(
+ bgp, vpn, evp, pi);
+
+ if (ret) {
+ flog_err(
+ EC_BGP_EVPN_FAIL,
+ "%u: Failed to %s EVPN %s route in VNI %u",
+ bgp->vrf_id,
+ install ? "install"
+ : "uninstall",
+ rtype == BGP_EVPN_MAC_IP_ROUTE
+ ? "MACIP"
+ : "IMET",
+ vpn->vni);
+
+ bgp_dest_unlock_node(rd_dest);
+ bgp_dest_unlock_node(dest);
+ return ret;
}
}
}
@@ -3942,6 +4034,12 @@ static int bgp_evpn_install_uninstall_table(struct bgp *bgp, afi_t afi,
if (!ecom || !ecom->size)
return -1;
+ /* Filter routes carrying a Site-of-Origin that matches our
+ * local MAC-VRF SoO.
+ */
+ if (import && bgp_evpn_route_matches_macvrf_soo(pi, evp))
+ return 0;
+
/* An EVPN route belongs to a VNI or a VRF or an ESI based on the RTs
* attached to the route */
for (i = 0; i < ecom->size; i++) {
@@ -5489,6 +5587,46 @@ void bgp_evpn_handle_rd_change(struct bgp *bgp, struct bgpevpn *vpn,
update_advertise_vni_routes(bgp, vpn);
}
+/* "mac-vrf soo" vty handler
+ * Handle change to the global MAC-VRF Site-of-Origin:
+ * - Unimport routes with new SoO from VNI/VRF
+ * - Import routes with old SoO into VNI/VRF
+ * - Update SoO on local VNI routes + re-advertise
+ */
+void bgp_evpn_handle_global_macvrf_soo_change(struct bgp *bgp,
+ struct ecommunity *new_soo)
+{
+ struct ecommunity *old_soo;
+
+ old_soo = bgp->evpn_info->soo;
+
+ /* cleanup and bail out if old_soo == new_soo */
+ if (ecommunity_match(old_soo, new_soo)) {
+ ecommunity_free(&new_soo);
+ return;
+ }
+
+ /* set new_soo */
+ bgp->evpn_info->soo = new_soo;
+
+ /* Unimport routes matching the new_soo */
+ bgp_filter_evpn_routes_upon_martian_change(bgp, BGP_MARTIAN_SOO);
+
+ /* Reimport routes with old_soo and !new_soo.
+ */
+ bgp_reimport_evpn_routes_upon_martian_change(
+ bgp, BGP_MARTIAN_SOO, (void *)old_soo, (void *)new_soo);
+
+ /* Update locally originated routes for all L2VNIs */
+ hash_iterate(bgp->vnihash,
+ (void (*)(struct hash_bucket *,
+ void *))update_routes_for_vni_hash,
+ bgp);
+
+ /* clear old_soo */
+ ecommunity_free(&old_soo);
+}
+
/*
* Install routes for this VNI. Invoked upon change to Import RT.
*/
@@ -6056,8 +6194,12 @@ int bgp_evpn_unimport_route(struct bgp *bgp, afi_t afi, safi_t safi,
return install_uninstall_evpn_route(bgp, afi, safi, p, pi, 0);
}
-/* filter routes which have martian next hops */
-int bgp_filter_evpn_routes_upon_martian_nh_change(struct bgp *bgp)
+/* Refresh previously-discarded EVPN routes carrying "self" MAC-VRF SoO.
+ * Walk global EVPN rib + import remote routes with old_soo && !new_soo.
+ */
+void bgp_reimport_evpn_routes_upon_macvrf_soo_change(struct bgp *bgp,
+ struct ecommunity *old_soo,
+ struct ecommunity *new_soo)
{
afi_t afi;
safi_t safi;
@@ -6068,12 +6210,9 @@ int bgp_filter_evpn_routes_upon_martian_nh_change(struct bgp *bgp)
afi = AFI_L2VPN;
safi = SAFI_EVPN;
- /* Walk entire global routing table and evaluate routes which could be
- * imported into this VPN. Note that we cannot just look at the routes
- * for the VNI's RD -
- * remote routes applicable for this VNI could have any RD.
+ /* EVPN routes are a 2-level table: outer=prefix_rd, inner=prefix_evpn.
+ * A remote route could have any RD, so we need to walk them all.
*/
- /* EVPN routes are a 2-level table. */
for (rd_dest = bgp_table_top(bgp->rib[afi][safi]); rd_dest;
rd_dest = bgp_route_next(rd_dest)) {
table = bgp_dest_get_bgp_table_info(rd_dest);
@@ -6082,21 +6221,132 @@ int bgp_filter_evpn_routes_upon_martian_nh_change(struct bgp *bgp)
for (dest = bgp_table_top(table); dest;
dest = bgp_route_next(dest)) {
+ const struct prefix *p;
+ struct prefix_evpn *evp;
+
+ p = bgp_dest_get_prefix(dest);
+ evp = (struct prefix_evpn *)p;
+
+ /* On export we only add MAC-VRF SoO to RT-2/3, so we
+ * can skip evaluation of other RTs.
+ */
+ if (evp->prefix.route_type != BGP_EVPN_MAC_IP_ROUTE &&
+ evp->prefix.route_type != BGP_EVPN_IMET_ROUTE)
+ continue;
for (pi = bgp_dest_get_bgp_path_info(dest); pi;
pi = pi->next) {
+ bool old_soo_fnd = false;
+ bool new_soo_fnd = false;
- /* Consider "valid" remote routes applicable for
- * this VNI. */
+ /* Only consider routes learned from peers */
+ if (!(pi->type == ZEBRA_ROUTE_BGP &&
+ pi->sub_type == BGP_ROUTE_NORMAL))
+ continue;
+
+ if (!CHECK_FLAG(pi->flags, BGP_PATH_VALID))
+ continue;
+
+ old_soo_fnd = route_matches_soo(pi, old_soo);
+ new_soo_fnd = route_matches_soo(pi, new_soo);
+
+ if (old_soo_fnd && !new_soo_fnd) {
+ if (bgp_debug_update(pi->peer, p, NULL,
+ 1)) {
+ char attr_str[BUFSIZ] = {0};
+
+ bgp_dump_attr(pi->attr,
+ attr_str, BUFSIZ);
+
+ zlog_debug(
+ "mac-vrf soo changed: evaluating reimport of prefix %pBD with attr %s",
+ dest, attr_str);
+ }
+
+ bgp_evpn_import_route(bgp, afi, safi, p,
+ pi);
+ }
+ }
+ }
+ }
+}
+
+/* Filter learned (!local) EVPN routes carrying "self" attributes.
+ * Walk the Global EVPN loc-rib unimporting martian routes from the appropriate
+ * L2VNIs (MAC-VRFs) / L3VNIs (IP-VRFs), and deleting them from the Global
+ * loc-rib when applicable (based on martian_type).
+ * This function is the handler for new martian entries, which is triggered by
+ * events occurring on the local system,
+ * e.g.
+ * - New VTEP-IP
+ * + bgp_zebra_process_local_vni
+ * + bgp_zebra_process_local_l3vni
+ * - New MAC-VRF Site-of-Origin
+ * + bgp_evpn_handle_global_macvrf_soo_change
+ * This will likely be extended in the future to cover these events too:
+ * - New Interface IP
+ * + bgp_interface_address_add
+ * - New Interface MAC
+ * + bgp_ifp_up
+ * + bgp_ifp_create
+ * - New RMAC
+ * + bgp_zebra_process_local_l3vni
+ */
+void bgp_filter_evpn_routes_upon_martian_change(
+ struct bgp *bgp, enum bgp_martian_type martian_type)
+{
+ afi_t afi;
+ safi_t safi;
+ struct bgp_dest *rd_dest, *dest;
+ struct bgp_table *table;
+ struct bgp_path_info *pi;
+ struct ecommunity *macvrf_soo;
+
+ afi = AFI_L2VPN;
+ safi = SAFI_EVPN;
+ macvrf_soo = bgp->evpn_info->soo;
+
+ /* EVPN routes are a 2-level table: outer=prefix_rd, inner=prefix_evpn.
+ * A remote route could have any RD, so we need to walk them all.
+ */
+ for (rd_dest = bgp_table_top(bgp->rib[afi][safi]); rd_dest;
+ rd_dest = bgp_route_next(rd_dest)) {
+ table = bgp_dest_get_bgp_table_info(rd_dest);
+ if (!table)
+ continue;
+
+ for (dest = bgp_table_top(table); dest;
+ dest = bgp_route_next(dest)) {
+
+ for (pi = bgp_dest_get_bgp_path_info(dest); pi;
+ pi = pi->next) {
+ bool affected = false;
+ const struct prefix *p;
+
+ /* Only consider routes learned from peers */
if (!(pi->type == ZEBRA_ROUTE_BGP
&& pi->sub_type == BGP_ROUTE_NORMAL))
continue;
- if (bgp_nexthop_self(bgp, afi, pi->type,
- pi->sub_type, pi->attr,
- dest)) {
- const struct prefix *p =
- bgp_dest_get_prefix(dest);
+ p = bgp_dest_get_prefix(dest);
+
+ switch (martian_type) {
+ case BGP_MARTIAN_TUN_IP:
+ affected = bgp_nexthop_self(
+ bgp, afi, pi->type,
+ pi->sub_type, pi->attr, dest);
+ break;
+ case BGP_MARTIAN_SOO:
+ affected = route_matches_soo(
+ pi, macvrf_soo);
+ break;
+ case BGP_MARTIAN_IF_IP:
+ case BGP_MARTIAN_IF_MAC:
+ case BGP_MARTIAN_RMAC:
+ break;
+ }
+
+ if (affected) {
if (bgp_debug_update(pi->peer, p, NULL,
1)) {
char attr_str[BUFSIZ] = {0};
@@ -6106,21 +6356,116 @@ int bgp_filter_evpn_routes_upon_martian_nh_change(struct bgp *bgp)
sizeof(attr_str));
zlog_debug(
- "%u: prefix %pBD with attr %s - DENIED due to martian or self nexthop",
+ "%u: prefix %pBD with attr %s - DISCARDED due to Martian/%s",
bgp->vrf_id, dest,
- attr_str);
+ attr_str,
+ bgp_martian_type2str(
+ martian_type));
}
+
+
bgp_evpn_unimport_route(bgp, afi, safi,
p, pi);
- bgp_rib_remove(dest, pi, pi->peer, afi,
- safi);
+ /* For now, retain existing handling of
+ * tip_hash updates: (Self SoO routes
+ * are unimported from L2VNI/VRF but
+ * retained in global loc-rib, but Self
+ * IP/MAC routes are also deleted from
+ * global loc-rib).
+ * TODO: use consistent handling for all
+ * martian types
+ */
+ if (martian_type == BGP_MARTIAN_TUN_IP)
+ bgp_rib_remove(dest, pi,
+ pi->peer, afi,
+ safi);
}
}
}
}
+}
- return 0;
+/* Refresh previously-discarded EVPN routes carrying "self" attributes.
+ * This function is the handler for deleted martian entries, which is triggered
+ * by events occurring on the local system,
+ * e.g.
+ * - Del MAC-VRF Site-of-Origin
+ * + bgp_evpn_handle_global_macvrf_soo_change
+ * This will likely be extended in the future to cover these events too:
+ * - Del VTEP-IP
+ * + bgp_zebra_process_local_vni
+ * + bgp_zebra_process_local_l3vni
+ * - Del Interface IP
+ * + bgp_interface_address_delete
+ * - Del Interface MAC
+ * + bgp_ifp_down
+ * + bgp_ifp_destroy
+ * - Del RMAC
+ * + bgp_zebra_process_local_l3vni
+ */
+void bgp_reimport_evpn_routes_upon_martian_change(
+ struct bgp *bgp, enum bgp_martian_type martian_type, void *old_martian,
+ void *new_martian)
+{
+ struct listnode *node;
+ struct peer *peer;
+ safi_t safi;
+ afi_t afi;
+ struct ecommunity *old_soo, *new_soo;
+
+ afi = AFI_L2VPN;
+ safi = SAFI_EVPN;
+
+ /* Self-SoO routes are held in the global EVPN loc-rib, so we can
+ * reimport routes w/o triggering soft-reconfig/route-refresh.
+ */
+ if (martian_type == BGP_MARTIAN_SOO) {
+ old_soo = (struct ecommunity *)old_martian;
+ new_soo = (struct ecommunity *)new_martian;
+
+ /* If !old_soo, then we can skip the reimport because we
+ * wouldn't have filtered anything via the self-SoO import check
+ */
+ if (old_martian)
+ bgp_reimport_evpn_routes_upon_macvrf_soo_change(
+ bgp, old_soo, new_soo);
+
+ return;
+ }
+
+ /* Self-TIP/IP/MAC/RMAC routes are deleted from the global EVPN
+ * loc-rib, so we need to re-learn the routes via soft-reconfig/
+ * route-refresh.
+ */
+ for (ALL_LIST_ELEMENTS_RO(bgp->peer, node, peer)) {
+
+ if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP))
+ continue;
+
+ if (peer->status != Established)
+ continue;
+
+ if (CHECK_FLAG(peer->af_flags[afi][safi],
+ PEER_FLAG_SOFT_RECONFIG)) {
+ if (bgp_debug_update(peer, NULL, NULL, 1))
+ zlog_debug(
+ "Processing EVPN Martian/%s change on peer %s (inbound, soft-reconfig)",
+ bgp_martian_type2str(martian_type),
+ peer->host);
+
+ bgp_soft_reconfig_in(peer, afi, safi);
+ } else {
+ if (bgp_debug_update(peer, NULL, NULL, 1))
+ zlog_debug(
+ "Processing EVPN Martian/%s change on peer %s",
+ bgp_martian_type2str(martian_type),
+ peer->host);
+ bgp_route_refresh_send(peer, afi, safi, 0,
+ REFRESH_IMMEDIATE, 0,
+ BGP_ROUTE_REFRESH_NORMAL);
+ }
+ }
}
/*
@@ -6269,10 +6614,14 @@ int bgp_evpn_local_l3vni_add(vni_t l3vni, vrf_id_t vrf_id,
/* associate the vrf with l3vni and related parameters */
bgp_vrf->l3vni = l3vni;
- bgp_vrf->originator_ip = originator_ip;
bgp_vrf->l3vni_svi_ifindex = svi_ifindex;
bgp_vrf->evpn_info->is_anycast_mac = is_anycast_mac;
+ /* Update tip_hash of the EVPN underlay BGP instance (bgp_evpn)
+ * if the VTEP-IP (originator_ip) has changed
+ */
+ handle_tunnel_ip_change(bgp_vrf, bgp_evpn, vpn, originator_ip);
+
/* copy anycast MAC from VRR MAC */
memcpy(&bgp_vrf->rmac, vrr_rmac, ETH_ALEN);
/* copy sys RMAC from SVI MAC */
@@ -6397,6 +6746,11 @@ int bgp_evpn_local_l3vni_del(vni_t l3vni, vrf_id_t vrf_id)
/* delete/withdraw all type-5 routes */
delete_withdraw_vrf_routes(bgp_vrf);
+ /* Tunnel is no longer active.
+ * Delete VTEP-IP from EVPN underlay's tip_hash.
+ */
+ bgp_tip_del(bgp_evpn, &bgp_vrf->originator_ip);
+
/* remove the l3vni from vrf instance */
bgp_vrf->l3vni = 0;
@@ -6461,8 +6815,8 @@ int bgp_evpn_local_vni_del(struct bgp *bgp, vni_t vni)
bgp_evpn_unlink_from_vni_svi_hash(bgp, vpn);
vpn->svi_ifindex = 0;
- /*
- * tunnel is no longer active, del tunnel ip address from tip_hash
+ /* Tunnel is no longer active.
+ * Delete VTEP-IP from EVPN underlay's tip_hash.
*/
bgp_tip_del(bgp, &vpn->originator_ip);
@@ -6486,6 +6840,7 @@ int bgp_evpn_local_vni_add(struct bgp *bgp, vni_t vni,
{
struct bgpevpn *vpn;
struct prefix_evpn p;
+ struct bgp *bgp_evpn = bgp_get_evpn();
/* Lookup VNI. If present and no change, exit. */
vpn = bgp_evpn_lookup_vni(bgp, vni);
@@ -6558,7 +6913,7 @@ int bgp_evpn_local_vni_add(struct bgp *bgp, vni_t vni,
/* If tunnel endpoint IP has changed, update (and delete prior
* type-3 route, if needed.)
*/
- handle_tunnel_ip_change(bgp, vpn, originator_ip);
+ handle_tunnel_ip_change(NULL, bgp, vpn, originator_ip);
/* Update all routes with new endpoint IP and/or export RT
* for VRFs
@@ -6578,14 +6933,17 @@ int bgp_evpn_local_vni_add(struct bgp *bgp, vni_t vni,
/* Mark as "live" */
SET_FLAG(vpn->flags, VNI_FLAG_LIVE);
- /* tunnel is now active, add tunnel-ip to db */
+ /* Tunnel is newly active.
+ * Add TIP to tip_hash of the EVPN underlay instance (bgp_get_evpn()).
+ */
if (bgp_tip_add(bgp, &originator_ip))
/* The originator_ip was not already present in the
* bgp martian next-hop table as a tunnel-ip, so we
* need to go back and filter routes matching the new
* martian next-hop.
*/
- bgp_filter_evpn_routes_upon_martian_nh_change(bgp);
+ bgp_filter_evpn_routes_upon_martian_change(bgp_evpn,
+ BGP_MARTIAN_TUN_IP);
/*
* Create EVPN type-3 route and schedule for processing.
@@ -6679,6 +7037,11 @@ void bgp_evpn_cleanup(struct bgp *bgp)
list_delete(&bgp->vrf_export_rtl);
list_delete(&bgp->l2vnis);
+ if (bgp->evpn_info) {
+ ecommunity_free(&bgp->evpn_info->soo);
+ XFREE(MTYPE_BGP_EVPN_INFO, bgp->evpn_info);
+ }
+
if (bgp->vrf_prd_pretty)
XFREE(MTYPE_BGP, bgp->vrf_prd_pretty);
}
@@ -6712,6 +7075,8 @@ void bgp_evpn_init(struct bgp *bgp)
bgp->vrf_export_rtl->del = evpn_vrf_rt_del;
bgp->l2vnis = list_new();
bgp->l2vnis->cmp = vni_list_cmp;
+ bgp->evpn_info =
+ XCALLOC(MTYPE_BGP_EVPN_INFO, sizeof(struct bgp_evpn_info));
/* By default Duplicate Address Dection is enabled.
* Max-moves (N) 5, detection time (M) 180
* default action is warning-only
diff --git a/bgpd/bgp_evpn.h b/bgpd/bgp_evpn.h
index a034bfbd7e..076248c9f7 100644
--- a/bgpd/bgp_evpn.h
+++ b/bgpd/bgp_evpn.h
@@ -157,7 +157,16 @@ extern int bgp_evpn_import_route(struct bgp *bgp, afi_t afi, safi_t safi,
extern int bgp_evpn_unimport_route(struct bgp *bgp, afi_t afi, safi_t safi,
const struct prefix *p,
struct bgp_path_info *ri);
-extern int bgp_filter_evpn_routes_upon_martian_nh_change(struct bgp *bgp);
+extern void
+bgp_reimport_evpn_routes_upon_macvrf_soo_change(struct bgp *bgp,
+ struct ecommunity *old_soo,
+ struct ecommunity *new_soo);
+extern void bgp_reimport_evpn_routes_upon_martian_change(
+ struct bgp *bgp, enum bgp_martian_type martian_type, void *old_martian,
+ void *new_martian);
+extern void
+bgp_filter_evpn_routes_upon_martian_change(struct bgp *bgp,
+ enum bgp_martian_type martian_type);
extern int bgp_evpn_local_macip_del(struct bgp *bgp, vni_t vni,
struct ethaddr *mac, struct ipaddr *ip,
int state);
diff --git a/bgpd/bgp_evpn_private.h b/bgpd/bgp_evpn_private.h
index fd8d2c118f..8cee048b69 100644
--- a/bgpd/bgp_evpn_private.h
+++ b/bgpd/bgp_evpn_private.h
@@ -162,6 +162,13 @@ struct bgp_evpn_info {
/* EVPN enable - advertise svi macip routes */
int advertise_svi_macip;
+ /* MAC-VRF Site-of-Origin
+ * - added to all routes exported from L2VNI
+ * - Type-2/3 routes with matching SoO not imported into L2VNI
+ * - Type-2/5 routes with matching SoO not imported into L3VNI
+ */
+ struct ecommunity *soo;
+
/* PIP feature knob */
bool advertise_pip;
/* PIP IP (sys ip) */
@@ -680,6 +687,8 @@ extern void bgp_evpn_handle_autort_change(struct bgp *bgp);
extern void bgp_evpn_handle_vrf_rd_change(struct bgp *bgp_vrf, int withdraw);
extern void bgp_evpn_handle_rd_change(struct bgp *bgp, struct bgpevpn *vpn,
int withdraw);
+void bgp_evpn_handle_global_macvrf_soo_change(struct bgp *bgp,
+ struct ecommunity *new_soo);
extern int bgp_evpn_install_routes(struct bgp *bgp, struct bgpevpn *vpn);
extern int bgp_evpn_uninstall_routes(struct bgp *bgp, struct bgpevpn *vpn);
extern void bgp_evpn_map_vrf_to_its_rts(struct bgp *bgp_vrf);
diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c
index 66079cad22..3a5047f152 100644
--- a/bgpd/bgp_evpn_vty.c
+++ b/bgpd/bgp_evpn_vty.c
@@ -362,10 +362,11 @@ static void display_l3vni(struct vty *vty, struct bgp *bgp_vrf,
char *ecom_str;
struct listnode *node, *nnode;
struct vrf_route_target *l3rt;
+ struct bgp *bgp_evpn = NULL;
json_object *json_import_rtl = NULL;
json_object *json_export_rtl = NULL;
- char buf2[ETHER_ADDR_STRLEN];
+ bgp_evpn = bgp_get_evpn();
json_import_rtl = json_export_rtl = 0;
if (json) {
@@ -379,19 +380,26 @@ static void display_l3vni(struct vty *vty, struct bgp *bgp_vrf,
&bgp_vrf->vrf_prd);
json_object_string_addf(json, "originatorIp", "%pI4",
&bgp_vrf->originator_ip);
+ if (bgp_evpn && bgp_evpn->evpn_info) {
+ ecom_str = ecommunity_ecom2str(
+ bgp_evpn->evpn_info->soo,
+ ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
+ json_object_string_add(json, "siteOfOrigin", ecom_str);
+ ecommunity_strfree(&ecom_str);
+ }
json_object_string_add(json, "advertiseGatewayMacip", "n/a");
json_object_string_add(json, "advertiseSviMacIp", "n/a");
- json_object_string_add(json, "advertisePip",
- bgp_vrf->evpn_info->advertise_pip ?
- "Enabled" : "Disabled");
- json_object_string_addf(json, "sysIP", "%pI4",
- &bgp_vrf->evpn_info->pip_ip);
- json_object_string_add(json, "sysMac",
- prefix_mac2str(&bgp_vrf->evpn_info->pip_rmac,
- buf2, sizeof(buf2)));
- json_object_string_add(json, "rmac",
- prefix_mac2str(&bgp_vrf->rmac,
- buf2, sizeof(buf2)));
+ if (bgp_vrf && bgp_vrf->evpn_info) {
+ json_object_string_add(json, "advertisePip",
+ bgp_vrf->evpn_info->advertise_pip
+ ? "Enabled"
+ : "Disabled");
+ json_object_string_addf(json, "sysIP", "%pI4",
+ &bgp_vrf->evpn_info->pip_ip);
+ json_object_string_addf(json, "sysMac", "%pEA",
+ &bgp_vrf->evpn_info->pip_rmac);
+ }
+ json_object_string_addf(json, "rmac", "%pEA", &bgp_vrf->rmac);
} else {
vty_out(vty, "VNI: %d", bgp_vrf->l3vni);
vty_out(vty, " (known to the kernel)");
@@ -406,18 +414,26 @@ static void display_l3vni(struct vty *vty, struct bgp *bgp_vrf,
vty_out(vty, "\n");
vty_out(vty, " Originator IP: %pI4\n",
&bgp_vrf->originator_ip);
+ if (bgp_evpn && bgp_evpn->evpn_info) {
+ ecom_str = ecommunity_ecom2str(
+ bgp_evpn->evpn_info->soo,
+ ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
+ vty_out(vty, " MAC-VRF Site-of-Origin: %s\n",
+ ecom_str);
+ ecommunity_strfree(&ecom_str);
+ }
vty_out(vty, " Advertise-gw-macip : %s\n", "n/a");
vty_out(vty, " Advertise-svi-macip : %s\n", "n/a");
- vty_out(vty, " Advertise-pip: %s\n",
- bgp_vrf->evpn_info->advertise_pip ? "Yes" : "No");
- vty_out(vty, " System-IP: %pI4\n",
- &bgp_vrf->evpn_info->pip_ip);
- vty_out(vty, " System-MAC: %s\n",
- prefix_mac2str(&bgp_vrf->evpn_info->pip_rmac,
- buf2, sizeof(buf2)));
- vty_out(vty, " Router-MAC: %s\n",
- prefix_mac2str(&bgp_vrf->rmac,
- buf2, sizeof(buf2)));
+ if (bgp_vrf && bgp_vrf->evpn_info) {
+ vty_out(vty, " Advertise-pip: %s\n",
+ bgp_vrf->evpn_info->advertise_pip ? "Yes"
+ : "No");
+ vty_out(vty, " System-IP: %pI4\n",
+ &bgp_vrf->evpn_info->pip_ip);
+ vty_out(vty, " System-MAC: %pEA\n",
+ &bgp_vrf->evpn_info->pip_rmac);
+ }
+ vty_out(vty, " Router-MAC: %pEA\n", &bgp_vrf->rmac);
}
if (!json)
@@ -433,7 +449,7 @@ static void display_l3vni(struct vty *vty, struct bgp *bgp_vrf,
else
vty_out(vty, " %s\n", ecom_str);
- XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ ecommunity_strfree(&ecom_str);
}
if (json)
@@ -451,7 +467,7 @@ static void display_l3vni(struct vty *vty, struct bgp *bgp_vrf,
else
vty_out(vty, " %s\n", ecom_str);
- XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ ecommunity_strfree(&ecom_str);
}
if (json)
@@ -484,6 +500,13 @@ static void display_vni(struct vty *vty, struct bgpevpn *vpn, json_object *json)
&vpn->originator_ip);
json_object_string_addf(json, "mcastGroup", "%pI4",
&vpn->mcast_grp);
+ if (bgp_evpn && bgp_evpn->evpn_info) {
+ ecom_str = ecommunity_ecom2str(
+ bgp_evpn->evpn_info->soo,
+ ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
+ json_object_string_add(json, "siteOfOrigin", ecom_str);
+ ecommunity_strfree(&ecom_str);
+ }
/* per vni knob is enabled -- Enabled
* Global knob is enabled -- Active
* default -- Disabled
@@ -499,6 +522,7 @@ static void display_vni(struct vty *vty, struct bgpevpn *vpn, json_object *json)
json_object_string_add(json, "advertiseGatewayMacip",
"Disabled");
if (!vpn->advertise_svi_macip && bgp_evpn &&
+ bgp_evpn->evpn_info &&
bgp_evpn->evpn_info->advertise_svi_macip)
json_object_string_add(json, "advertiseSviMacIp",
"Active");
@@ -525,6 +549,14 @@ static void display_vni(struct vty *vty, struct bgpevpn *vpn, json_object *json)
vty_out(vty, "\n");
vty_out(vty, " Originator IP: %pI4\n", &vpn->originator_ip);
vty_out(vty, " Mcast group: %pI4\n", &vpn->mcast_grp);
+ if (bgp_evpn && bgp_evpn->evpn_info) {
+ ecom_str = ecommunity_ecom2str(
+ bgp_evpn->evpn_info->soo,
+ ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
+ vty_out(vty, " MAC-VRF Site-of-Origin: %s\n",
+ ecom_str);
+ ecommunity_strfree(&ecom_str);
+ }
if (!vpn->advertise_gw_macip &&
bgp_evpn && bgp_evpn->advertise_gw_macip)
vty_out(vty, " Advertise-gw-macip : %s\n",
@@ -536,6 +568,7 @@ static void display_vni(struct vty *vty, struct bgpevpn *vpn, json_object *json)
vty_out(vty, " Advertise-gw-macip : %s\n",
"Disabled");
if (!vpn->advertise_svi_macip && bgp_evpn &&
+ bgp_evpn->evpn_info &&
bgp_evpn->evpn_info->advertise_svi_macip)
vty_out(vty, " Advertise-svi-macip : %s\n",
"Active");
@@ -562,7 +595,7 @@ static void display_vni(struct vty *vty, struct bgpevpn *vpn, json_object *json)
else
vty_out(vty, " %s\n", ecom_str);
- XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ ecommunity_strfree(&ecom_str);
}
if (json)
@@ -580,7 +613,7 @@ static void display_vni(struct vty *vty, struct bgpevpn *vpn, json_object *json)
else
vty_out(vty, " %s\n", ecom_str);
- XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ ecommunity_strfree(&ecom_str);
}
if (json)
@@ -981,10 +1014,13 @@ static void show_l3vni_entry(struct vty *vty, struct bgp *bgp,
char *ecom_str;
struct listnode *node, *nnode;
struct vrf_route_target *l3rt;
+ struct bgp *bgp_evpn;
if (!bgp->l3vni)
return;
+ bgp_evpn = bgp_get_evpn();
+
if (json) {
json_vni = json_object_new_object();
json_import_rtl = json_object_new_array();
@@ -1041,7 +1077,7 @@ static void show_l3vni_entry(struct vty *vty, struct bgp *bgp,
vty_out(vty, " %-25s", rt_buf);
}
- XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ ecommunity_strfree(&ecom_str);
/* If there are multiple import RTs we break here and show only
* one */
@@ -1069,12 +1105,19 @@ static void show_l3vni_entry(struct vty *vty, struct bgp *bgp,
vty_out(vty, " %-25s", rt_buf);
}
- XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ ecommunity_strfree(&ecom_str);
/* If there are multiple export RTs we break here and show only
* one */
if (!json) {
- vty_out(vty, "%-37s", vrf_id_to_name(bgp->vrf_id));
+ if (bgp_evpn && bgp_evpn->evpn_info) {
+ ecom_str = ecommunity_ecom2str(
+ bgp_evpn->evpn_info->soo,
+ ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
+ vty_out(vty, " %-25s", ecom_str);
+ ecommunity_strfree(&ecom_str);
+ }
+ vty_out(vty, " %-37s", vrf_id_to_name(bgp->vrf_id));
break;
}
}
@@ -1083,11 +1126,18 @@ static void show_l3vni_entry(struct vty *vty, struct bgp *bgp,
char vni_str[VNI_STR_LEN];
json_object_object_add(json_vni, "exportRTs", json_export_rtl);
+ if (bgp_evpn && bgp_evpn->evpn_info) {
+ ecom_str = ecommunity_ecom2str(
+ bgp_evpn->evpn_info->soo,
+ ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
+ json_object_string_add(json_vni, "siteOfOrigin",
+ ecom_str);
+ ecommunity_strfree(&ecom_str);
+ }
snprintf(vni_str, sizeof(vni_str), "%u", bgp->l3vni);
json_object_object_add(json, vni_str, json_vni);
- } else {
+ } else
vty_out(vty, "\n");
- }
}
static void show_vni_entry(struct hash_bucket *bucket, void *args[])
@@ -1213,7 +1263,14 @@ static void show_vni_entry(struct hash_bucket *bucket, void *args[])
/* If there are multiple export RTs we break here and show only
* one */
if (!json) {
- vty_out(vty, "%-37s",
+ if (bgp_evpn && bgp_evpn->evpn_info) {
+ ecom_str = ecommunity_ecom2str(
+ bgp_evpn->evpn_info->soo,
+ ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
+ vty_out(vty, " %-25s", ecom_str);
+ ecommunity_strfree(&ecom_str);
+ }
+ vty_out(vty, " %-37s",
vrf_id_to_name(vpn->tenant_vrf_id));
break;
}
@@ -1223,11 +1280,18 @@ static void show_vni_entry(struct hash_bucket *bucket, void *args[])
char vni_str[VNI_STR_LEN];
json_object_object_add(json_vni, "exportRTs", json_export_rtl);
+ if (bgp_evpn && bgp_evpn->evpn_info) {
+ ecom_str = ecommunity_ecom2str(
+ bgp_evpn->evpn_info->soo,
+ ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
+ json_object_string_add(json_vni, "siteOfOrigin",
+ ecom_str);
+ ecommunity_strfree(&ecom_str);
+ }
snprintf(vni_str, sizeof(vni_str), "%u", vpn->vni);
json_object_object_add(json, vni_str, json_vni);
- } else {
+ } else
vty_out(vty, "\n");
- }
}
static int bgp_show_ethernet_vpn(struct vty *vty, struct prefix_rd *prd,
@@ -3276,8 +3340,9 @@ static void evpn_show_all_vnis(struct vty *vty, struct bgp *bgp,
if (!json) {
vty_out(vty, "Flags: * - Kernel\n");
- vty_out(vty, " %-10s %-4s %-21s %-25s %-25s %-37s\n", "VNI",
- "Type", "RD", "Import RT", "Export RT", "Tenant VRF");
+ vty_out(vty, " %-10s %-4s %-21s %-25s %-25s %-25s %-37s\n",
+ "VNI", "Type", "RD", "Import RT", "Export RT",
+ "MAC-VRF Site-of-Origin", "Tenant VRF");
}
/* print all L2 VNIS */
@@ -3923,6 +3988,58 @@ DEFPY(bgp_evpn_advertise_svi_ip_vni,
return CMD_SUCCESS;
}
+DEFPY(macvrf_soo_global, macvrf_soo_global_cmd,
+ "mac-vrf soo ASN:NN_OR_IP-ADDRESS:NN$soo",
+ "EVPN MAC-VRF\n"
+ "Site-of-Origin extended community\n"
+ "VPN extended community\n")
+{
+ struct bgp *bgp = VTY_GET_CONTEXT(bgp);
+ struct bgp *bgp_evpn = bgp_get_evpn();
+ struct ecommunity *ecomm_soo;
+
+ if (!bgp || !bgp_evpn || !bgp_evpn->evpn_info)
+ return CMD_WARNING;
+
+ if (bgp != bgp_evpn) {
+ vty_out(vty,
+ "%% Please configure MAC-VRF SoO in the EVPN underlay: %s\n",
+ bgp_evpn->name_pretty);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ ecomm_soo = ecommunity_str2com(soo, ECOMMUNITY_SITE_ORIGIN, 0);
+ if (!ecomm_soo) {
+ vty_out(vty, "%% Malformed SoO extended community\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ ecommunity_str(ecomm_soo);
+
+ bgp_evpn_handle_global_macvrf_soo_change(bgp_evpn, ecomm_soo);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(no_macvrf_soo_global, no_macvrf_soo_global_cmd,
+ "no mac-vrf soo [ASN:NN_OR_IP-ADDRESS:NN$soo]",
+ NO_STR
+ "EVPN MAC-VRF\n"
+ "Site-of-Origin extended community\n"
+ "VPN extended community\n")
+{
+ struct bgp *bgp = VTY_GET_CONTEXT(bgp);
+ struct bgp *bgp_evpn = bgp_get_evpn();
+
+ if (!bgp || !bgp_evpn || !bgp_evpn->evpn_info)
+ return CMD_WARNING;
+
+ if (bgp_evpn)
+ bgp_evpn_handle_global_macvrf_soo_change(bgp_evpn,
+ NULL /* new_soo */);
+
+ return CMD_SUCCESS;
+}
+
DEFUN_HIDDEN (bgp_evpn_advertise_vni_subnet,
bgp_evpn_advertise_vni_subnet_cmd,
"advertise-subnet",
@@ -7158,6 +7275,15 @@ void bgp_config_write_evpn_info(struct vty *vty, struct bgp *bgp, afi_t afi,
if (bgp->evpn_info->advertise_svi_macip)
vty_out(vty, " advertise-svi-ip\n");
+ if (bgp->evpn_info->soo) {
+ char *ecom_str;
+
+ ecom_str = ecommunity_ecom2str(bgp->evpn_info->soo,
+ ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
+ vty_out(vty, " mac-vrf soo %s\n", ecom_str);
+ ecommunity_strfree(&ecom_str);
+ }
+
if (bgp->resolve_overlay_index)
vty_out(vty, " enable-resolve-overlay-index\n");
@@ -7390,6 +7516,8 @@ void bgp_ethernetvpn_init(void)
install_element(BGP_EVPN_NODE, &bgp_evpn_advertise_default_gw_cmd);
install_element(BGP_EVPN_NODE, &no_bgp_evpn_advertise_default_gw_cmd);
install_element(BGP_EVPN_NODE, &bgp_evpn_advertise_svi_ip_cmd);
+ install_element(BGP_EVPN_NODE, &macvrf_soo_global_cmd);
+ install_element(BGP_EVPN_NODE, &no_macvrf_soo_global_cmd);
install_element(BGP_EVPN_NODE, &bgp_evpn_advertise_type5_cmd);
install_element(BGP_EVPN_NODE, &no_bgp_evpn_advertise_type5_cmd);
install_element(BGP_EVPN_NODE, &bgp_evpn_default_originate_cmd);
diff --git a/bgpd/bgp_flowspec.c b/bgpd/bgp_flowspec.c
index 70bdbaf035..6165bf892e 100644
--- a/bgpd/bgp_flowspec.c
+++ b/bgpd/bgp_flowspec.c
@@ -189,13 +189,16 @@ int bgp_nlri_parse_flowspec(struct peer *peer, struct attr *attr,
zlog_info("%s", local_string);
}
/* Process the route. */
- if (!withdraw)
+ if (!withdraw) {
bgp_update(peer, &p, 0, attr, afi, safi,
ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, NULL,
NULL, 0, 0, NULL);
- else
+ } else {
bgp_withdraw(peer, &p, 0, afi, safi, ZEBRA_ROUTE_BGP,
BGP_ROUTE_NORMAL, NULL, NULL, 0, NULL);
+ }
+
+ XFREE(MTYPE_TMP, temp);
}
return BGP_NLRI_PARSE_OK;
}
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index ad6906d092..4e8894cb44 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -46,13 +46,6 @@
DEFINE_HOOK(peer_backward_transition, (struct peer * peer), (peer));
DEFINE_HOOK(peer_status_changed, (struct peer * peer), (peer));
-enum bgp_fsm_state_progress {
- BGP_FSM_FAILURE_AND_DELETE = -2,
- BGP_FSM_FAILURE = -1,
- BGP_FSM_SUCCESS = 0,
- BGP_FSM_SUCCESS_STATE_TRANSFER = 1,
-};
-
/* Definition of display strings corresponding to FSM events. This should be
* kept consistent with the events defined in bgpd.h
*/
diff --git a/bgpd/bgp_fsm.h b/bgpd/bgp_fsm.h
index e3cfd0c893..daf31b266e 100644
--- a/bgpd/bgp_fsm.h
+++ b/bgpd/bgp_fsm.h
@@ -7,6 +7,13 @@
#ifndef _QUAGGA_BGP_FSM_H
#define _QUAGGA_BGP_FSM_H
+enum bgp_fsm_state_progress {
+ BGP_FSM_FAILURE_AND_DELETE = -2,
+ BGP_FSM_FAILURE = -1,
+ BGP_FSM_SUCCESS = 0,
+ BGP_FSM_SUCCESS_STATE_TRANSFER = 1,
+};
+
/* Macro for BGP read, write and timer thread. */
#define BGP_TIMER_ON(T, F, V) \
do { \
@@ -108,7 +115,7 @@
extern void bgp_fsm_nht_update(struct peer *peer, bool has_valid_nexthops);
extern void bgp_event(struct event *event);
extern int bgp_event_update(struct peer *, enum bgp_fsm_events event);
-extern int bgp_stop(struct peer *peer);
+extern enum bgp_fsm_state_progress bgp_stop(struct peer *peer);
extern void bgp_timer_set(struct peer *);
extern void bgp_routeadv_timer(struct event *event);
extern void bgp_fsm_change_status(struct peer *peer,
diff --git a/bgpd/bgp_label.c b/bgpd/bgp_label.c
index 0f3faeb297..30090e0590 100644
--- a/bgpd/bgp_label.c
+++ b/bgpd/bgp_label.c
@@ -470,3 +470,20 @@ int bgp_nlri_parse_label(struct peer *peer, struct attr *attr,
return BGP_NLRI_PARSE_OK;
}
+
+bool bgp_labels_same(const mpls_label_t *tbl_a, const uint32_t num_labels_a,
+ const mpls_label_t *tbl_b, const uint32_t num_labels_b)
+{
+ uint32_t i;
+
+ if (num_labels_a != num_labels_b)
+ return false;
+ if (num_labels_a == 0)
+ return true;
+
+ for (i = 0; i < num_labels_a; i++) {
+ if (tbl_a[i] != tbl_b[i])
+ return false;
+ }
+ return true;
+}
diff --git a/bgpd/bgp_label.h b/bgpd/bgp_label.h
index ac7fbb27fb..b54403ee89 100644
--- a/bgpd/bgp_label.h
+++ b/bgpd/bgp_label.h
@@ -26,6 +26,10 @@ extern mpls_label_t bgp_adv_label(struct bgp_dest *dest,
extern int bgp_nlri_parse_label(struct peer *peer, struct attr *attr,
struct bgp_nlri *packet);
+extern bool bgp_labels_same(const mpls_label_t *tbl_a,
+ const uint32_t num_labels_a,
+ const mpls_label_t *tbl_b,
+ const uint32_t num_labels_b);
static inline int bgp_labeled_safi(safi_t safi)
{
diff --git a/bgpd/bgp_labelpool.c b/bgpd/bgp_labelpool.c
index faddfc995f..137e88adea 100644
--- a/bgpd/bgp_labelpool.c
+++ b/bgpd/bgp_labelpool.c
@@ -843,6 +843,16 @@ DEFUN(show_bgp_labelpool_ledger, show_bgp_labelpool_ledger_cmd,
vty_out(vty, "%-18s %u\n", "nexthop",
lcb->label);
break;
+ case LP_TYPE_BGP_L3VPN_BIND:
+ if (uj) {
+ json_object_string_add(json_elem, "prefix",
+ "l3vpn-bind");
+ json_object_int_add(json_elem, "label",
+ lcb->label);
+ } else
+ vty_out(vty, "%-18s %u\n", "l3vpn-bind",
+ lcb->label);
+ break;
}
}
if (uj)
@@ -941,6 +951,15 @@ DEFUN(show_bgp_labelpool_inuse, show_bgp_labelpool_inuse_cmd,
vty_out(vty, "%-18s %u\n", "nexthop",
label);
break;
+ case LP_TYPE_BGP_L3VPN_BIND:
+ if (uj) {
+ json_object_string_add(json_elem, "prefix",
+ "l3vpn-bind");
+ json_object_int_add(json_elem, "label", label);
+ } else
+ vty_out(vty, "%-18s %u\n", "l3vpn-bind",
+ label);
+ break;
}
}
if (uj)
@@ -1020,6 +1039,13 @@ DEFUN(show_bgp_labelpool_requests, show_bgp_labelpool_requests_cmd,
else
vty_out(vty, "Nexthop\n");
break;
+ case LP_TYPE_BGP_L3VPN_BIND:
+ if (uj)
+ json_object_string_add(json_elem, "prefix",
+ "l3vpn-bind");
+ else
+ vty_out(vty, "L3VPN-BIND\n");
+ break;
}
}
if (uj)
@@ -1121,7 +1147,8 @@ static void show_bgp_nexthop_label_afi(struct vty *vty, afi_t afi,
if (!detail)
continue;
vty_out(vty, " Paths:\n");
- LIST_FOREACH (path, &(iter->paths), label_nh_thread) {
+ LIST_FOREACH (path, &(iter->paths),
+ mplsvpn.blnc.label_nh_thread) {
dest = path->net;
table = bgp_dest_table(dest);
assert(dest && table);
@@ -1703,7 +1730,7 @@ void bgp_label_per_nexthop_free(struct bgp_label_per_nexthop_cache *blnc)
bgp_zebra_send_nexthop_label(ZEBRA_MPLS_LABELS_DELETE,
blnc->label, blnc->nh->ifindex,
blnc->nh->vrf_id, ZEBRA_LSP_BGP,
- &blnc->nexthop);
+ &blnc->nexthop, 0, NULL);
bgp_lp_release(LP_TYPE_NEXTHOP, blnc, blnc->label);
}
bgp_label_per_nexthop_cache_del(blnc->tree, blnc);
diff --git a/bgpd/bgp_labelpool.h b/bgpd/bgp_labelpool.h
index b33527186e..9a110e6297 100644
--- a/bgpd/bgp_labelpool.h
+++ b/bgpd/bgp_labelpool.h
@@ -18,6 +18,7 @@
#define LP_TYPE_VRF 0x00000001
#define LP_TYPE_BGP_LU 0x00000002
#define LP_TYPE_NEXTHOP 0x00000003
+#define LP_TYPE_BGP_L3VPN_BIND 0x00000004
PREDECL_LIST(lp_fifo);
diff --git a/bgpd/bgp_mac.c b/bgpd/bgp_mac.c
index 6272bdb884..0398e4e8c1 100644
--- a/bgpd/bgp_mac.c
+++ b/bgpd/bgp_mac.c
@@ -279,15 +279,29 @@ static void bgp_mac_remove_ifp_internal(struct bgp_self_mac *bsm, char *ifname,
}
}
+/* Add/Update entry of the 'bgp mac hash' table.
+ * A rescan of the EVPN tables is only needed if
+ * a new hash bucket is allocated.
+ * Learning an existing mac on a new interface (or
+ * having an existing mac move from one interface to
+ * another) does not result in changes to self mac
+ * state, so we shouldn't trigger a rescan.
+ */
void bgp_mac_add_mac_entry(struct interface *ifp)
{
struct bgp_self_mac lookup;
struct bgp_self_mac *bsm;
struct bgp_self_mac *old_bsm;
char *ifname;
+ bool mac_added = false;
memcpy(&lookup.macaddr, &ifp->hw_addr, ETH_ALEN);
- bsm = hash_get(bm->self_mac_hash, &lookup, bgp_mac_hash_alloc);
+ bsm = hash_lookup(bm->self_mac_hash, &lookup);
+ if (!bsm) {
+ bsm = hash_get(bm->self_mac_hash, &lookup, bgp_mac_hash_alloc);
+ /* mac is new, rescan needs to be triggered */
+ mac_added = true;
+ }
/*
* Does this happen to be a move
@@ -318,7 +332,8 @@ void bgp_mac_add_mac_entry(struct interface *ifp)
listnode_add(bsm->ifp_list, ifname);
}
- bgp_mac_rescan_all_evpn_tables(&bsm->macaddr);
+ if (mac_added)
+ bgp_mac_rescan_all_evpn_tables(&bsm->macaddr);
}
void bgp_mac_del_mac_entry(struct interface *ifp)
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index dc9bd3cff5..068dc44bc6 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -39,6 +39,9 @@
#include "bgpd/rfapi/rfapi_backend.h"
#endif
+DEFINE_MTYPE_STATIC(BGPD, MPLSVPN_NH_LABEL_BIND_CACHE,
+ "BGP MPLSVPN nexthop label bind cache");
+
/*
* Definitions and external declarations.
*/
@@ -952,8 +955,6 @@ void transpose_sid(struct in6_addr *sid, uint32_t label, uint8_t offset,
static bool labels_same(struct bgp_path_info *bpi, mpls_label_t *label,
uint32_t n)
{
- uint32_t i;
-
if (!bpi->extra) {
if (!n)
return true;
@@ -961,14 +962,9 @@ static bool labels_same(struct bgp_path_info *bpi, mpls_label_t *label,
return false;
}
- if (n != bpi->extra->num_labels)
- return false;
-
- for (i = 0; i < n; ++i) {
- if (label[i] != bpi->extra->label[i])
- return false;
- }
- return true;
+ return bgp_labels_same((const mpls_label_t *)bpi->extra->label,
+ bpi->extra->num_labels,
+ (const mpls_label_t *)label, n);
}
/*
@@ -1345,14 +1341,18 @@ void bgp_mplsvpn_path_nh_label_unlink(struct bgp_path_info *pi)
if (!pi)
return;
- blnc = pi->label_nexthop_cache;
+ if (!CHECK_FLAG(pi->flags, BGP_PATH_MPLSVPN_LABEL_NH))
+ return;
+
+ blnc = pi->mplsvpn.blnc.label_nexthop_cache;
if (!blnc)
return;
- LIST_REMOVE(pi, label_nh_thread);
- pi->label_nexthop_cache->path_count--;
- pi->label_nexthop_cache = NULL;
+ LIST_REMOVE(pi, mplsvpn.blnc.label_nh_thread);
+ pi->mplsvpn.blnc.label_nexthop_cache->path_count--;
+ pi->mplsvpn.blnc.label_nexthop_cache = NULL;
+ UNSET_FLAG(pi->flags, BGP_PATH_MPLSVPN_LABEL_NH);
if (LIST_EMPTY(&(blnc->paths)))
bgp_label_per_nexthop_free(blnc);
@@ -1390,11 +1390,12 @@ static int bgp_mplsvpn_get_label_per_nexthop_cb(mpls_label_t label,
/* update paths */
if (blnc->label != MPLS_INVALID_LABEL)
- bgp_zebra_send_nexthop_label(
- ZEBRA_MPLS_LABELS_ADD, blnc->label, blnc->nh->ifindex,
- blnc->nh->vrf_id, ZEBRA_LSP_BGP, &blnc->nexthop);
+ bgp_zebra_send_nexthop_label(ZEBRA_MPLS_LABELS_ADD, blnc->label,
+ blnc->nh->ifindex,
+ blnc->nh->vrf_id, ZEBRA_LSP_BGP,
+ &blnc->nexthop, 0, NULL);
- LIST_FOREACH (pi, &(blnc->paths), label_nh_thread) {
+ LIST_FOREACH (pi, &(blnc->paths), mplsvpn.blnc.label_nh_thread) {
if (!pi->net)
continue;
table = bgp_dest_table(pi->net);
@@ -1411,9 +1412,10 @@ static int bgp_mplsvpn_get_label_per_nexthop_cb(mpls_label_t label,
* - else allocate a new per label nexthop cache entry and request a
* label to zebra. Return MPLS_INVALID_LABEL
*/
-static mpls_label_t _vpn_leak_from_vrf_get_per_nexthop_label(
- struct bgp_path_info *pi, struct bgp *to_bgp, struct bgp *from_bgp,
- afi_t afi, safi_t safi)
+static mpls_label_t
+_vpn_leak_from_vrf_get_per_nexthop_label(struct bgp_path_info *pi,
+ struct bgp *to_bgp,
+ struct bgp *from_bgp, afi_t afi)
{
struct bgp_nexthop_cache *bnc = pi->nexthop;
struct bgp_label_per_nexthop_cache *blnc;
@@ -1460,7 +1462,7 @@ static mpls_label_t _vpn_leak_from_vrf_get_per_nexthop_label(
bgp_mplsvpn_get_label_per_nexthop_cb);
}
- if (pi->label_nexthop_cache == blnc)
+ if (pi->mplsvpn.blnc.label_nexthop_cache == blnc)
/* no change */
return blnc->label;
@@ -1469,9 +1471,10 @@ static mpls_label_t _vpn_leak_from_vrf_get_per_nexthop_label(
bgp_mplsvpn_path_nh_label_unlink(pi);
/* updates NHT pi list reference */
- LIST_INSERT_HEAD(&(blnc->paths), pi, label_nh_thread);
- pi->label_nexthop_cache = blnc;
- pi->label_nexthop_cache->path_count++;
+ LIST_INSERT_HEAD(&(blnc->paths), pi, mplsvpn.blnc.label_nh_thread);
+ pi->mplsvpn.blnc.label_nexthop_cache = blnc;
+ pi->mplsvpn.blnc.label_nexthop_cache->path_count++;
+ SET_FLAG(pi->flags, BGP_PATH_MPLSVPN_LABEL_NH);
blnc->last_update = monotime(NULL);
/* then add or update the selected nexthop */
@@ -1484,7 +1487,7 @@ static mpls_label_t _vpn_leak_from_vrf_get_per_nexthop_label(
bgp_zebra_send_nexthop_label(
ZEBRA_MPLS_LABELS_REPLACE, blnc->label,
bnc->nexthop->ifindex, bnc->nexthop->vrf_id,
- ZEBRA_LSP_BGP, &blnc->nexthop);
+ ZEBRA_LSP_BGP, &blnc->nexthop, 0, NULL);
}
}
@@ -1496,9 +1499,10 @@ static mpls_label_t _vpn_leak_from_vrf_get_per_nexthop_label(
* - return the per VRF label when the per nexthop label is not supported
* Otherwise, find or request a per label nexthop.
*/
-static mpls_label_t vpn_leak_from_vrf_get_per_nexthop_label(
- afi_t afi, safi_t safi, struct bgp_path_info *pi, struct bgp *from_bgp,
- struct bgp *to_bgp)
+static mpls_label_t
+vpn_leak_from_vrf_get_per_nexthop_label(afi_t afi, struct bgp_path_info *pi,
+ struct bgp *from_bgp,
+ struct bgp *to_bgp)
{
struct bgp_path_info *bpi_ultimate = bgp_get_imported_bpi_ultimate(pi);
struct bgp *bgp_nexthop = NULL;
@@ -1556,8 +1560,8 @@ static mpls_label_t vpn_leak_from_vrf_get_per_nexthop_label(
bgp_nexthop = from_bgp;
nh_afi = BGP_ATTR_NH_AFI(afi, pi->attr);
- nh_valid = bgp_find_or_add_nexthop(from_bgp, bgp_nexthop, nh_afi, safi,
- pi, NULL, 0, NULL);
+ nh_valid = bgp_find_or_add_nexthop(from_bgp, bgp_nexthop, nh_afi,
+ SAFI_UNICAST, pi, NULL, 0, NULL);
if (!nh_valid && is_bgp_static_route &&
!CHECK_FLAG(from_bgp->flags, BGP_FLAG_IMPORT_CHECK)) {
@@ -1593,7 +1597,7 @@ static mpls_label_t vpn_leak_from_vrf_get_per_nexthop_label(
}
return _vpn_leak_from_vrf_get_per_nexthop_label(pi, to_bgp, from_bgp,
- afi, safi);
+ afi);
}
/* cf vnc_import_bgp_add_route_mode_nvegroup() and add_vnc_route() */
@@ -1792,7 +1796,7 @@ void vpn_leak_from_vrf_update(struct bgp *to_bgp, /* to */
BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP))
/* per nexthop label mode */
label_val = vpn_leak_from_vrf_get_per_nexthop_label(
- afi, safi, path_vrf, from_bgp, to_bgp);
+ afi, path_vrf, from_bgp, to_bgp);
else
/* per VRF label mode */
label_val = from_bgp->vpn_policy[afi].tovpn_label;
@@ -2116,7 +2120,7 @@ static struct bgp *bgp_lookup_by_rd(struct bgp_path_info *bpi,
return NULL;
}
-static bool vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
+static void vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
struct bgp *from_bgp, /* from */
struct bgp_path_info *path_vpn,
struct prefix_rd *prd)
@@ -2146,7 +2150,7 @@ static bool vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
"%s: from vpn (%s) to vrf (%s), skipping: %s",
__func__, from_bgp->name_pretty,
to_bgp->name_pretty, debugmsg);
- return false;
+ return;
}
/*
@@ -2173,7 +2177,7 @@ static bool vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
zlog_debug(
"from vpn (%s) to vrf (%s), skipping after no intersection of route targets",
from_bgp->name_pretty, to_bgp->name_pretty);
- return false;
+ return;
}
rd_buf[0] = '\0';
@@ -2190,7 +2194,7 @@ static bool vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
zlog_debug(
"%s: skipping import, match RD (%s) of src VRF (%s) and the prefix (%pFX)",
__func__, rd_buf, to_bgp->name_pretty, p);
- return false;
+ return;
}
if (debug)
@@ -2301,7 +2305,7 @@ static bool vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
to_bgp->vpn_policy[afi]
.rmap[BGP_VPN_POLICY_DIR_FROMVPN]
->name);
- return false;
+ return;
}
/*
* if route-map changed nexthop, don't nexthop-self on output
@@ -2363,17 +2367,60 @@ static bool vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
num_labels, src_vrf, &nexthop_orig, nexthop_self_flag,
debug))
bgp_dest_unlock_node(bn);
+}
+
+bool vpn_leak_to_vrf_no_retain_filter_check(struct bgp *from_bgp,
+ struct attr *attr, afi_t afi)
+{
+ struct ecommunity *ecom_route_target = bgp_attr_get_ecommunity(attr);
+ int debug = BGP_DEBUG(vpn, VPN_LEAK_TO_VRF);
+ struct listnode *node;
+ const char *debugmsg;
+ struct bgp *to_bgp;
+
+ /* Loop over BGP instances */
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, to_bgp)) {
+ if (!vpn_leak_from_vpn_active(to_bgp, afi, &debugmsg)) {
+ if (debug)
+ zlog_debug(
+ "%s: from vpn (%s) to vrf (%s) afi %s, skipping: %s",
+ __func__, from_bgp->name_pretty,
+ to_bgp->name_pretty, afi2str(afi),
+ debugmsg);
+ continue;
+ }
+
+ /* Check for intersection of route targets */
+ if (!ecommunity_include(
+ to_bgp->vpn_policy[afi]
+ .rtlist[BGP_VPN_POLICY_DIR_FROMVPN],
+ ecom_route_target)) {
+ if (debug)
+ zlog_debug(
+ "%s: from vpn (%s) to vrf (%s) afi %s %s, skipping after no intersection of route targets",
+ __func__, from_bgp->name_pretty,
+ to_bgp->name_pretty, afi2str(afi),
+ ecommunity_str(ecom_route_target));
+ continue;
+ }
+ return false;
+ }
+
+ if (debug)
+ zlog_debug(
+ "%s: from vpn (%s) afi %s %s, no import - must be filtered",
+ __func__, from_bgp->name_pretty, afi2str(afi),
+ ecommunity_str(ecom_route_target));
return true;
}
-bool vpn_leak_to_vrf_update(struct bgp *from_bgp,
+void vpn_leak_to_vrf_update(struct bgp *from_bgp,
struct bgp_path_info *path_vpn,
struct prefix_rd *prd)
{
struct listnode *mnode, *mnnode;
struct bgp *bgp;
- bool leak_success = false;
int debug = BGP_DEBUG(vpn, VPN_LEAK_TO_VRF);
@@ -2385,11 +2432,10 @@ bool vpn_leak_to_vrf_update(struct bgp *from_bgp,
if (!path_vpn->extra
|| path_vpn->extra->bgp_orig != bgp) { /* no loop */
- leak_success |= vpn_leak_to_vrf_update_onevrf(
- bgp, from_bgp, path_vpn, prd);
+ vpn_leak_to_vrf_update_onevrf(bgp, from_bgp, path_vpn,
+ prd);
}
}
- return leak_success;
}
void vpn_leak_to_vrf_withdraw(struct bgp_path_info *path_vpn)
@@ -2509,6 +2555,51 @@ void vpn_leak_to_vrf_withdraw_all(struct bgp *to_bgp, afi_t afi)
}
}
+void vpn_leak_no_retain(struct bgp *to_bgp, struct bgp *vpn_from, afi_t afi)
+{
+ struct bgp_dest *pdest;
+ safi_t safi = SAFI_MPLS_VPN;
+
+ assert(vpn_from);
+
+ /*
+ * Walk vpn table
+ */
+ for (pdest = bgp_table_top(vpn_from->rib[afi][safi]); pdest;
+ pdest = bgp_route_next(pdest)) {
+ struct bgp_table *table;
+ struct bgp_dest *bn;
+ struct bgp_path_info *bpi;
+
+ /* This is the per-RD table of prefixes */
+ table = bgp_dest_get_bgp_table_info(pdest);
+
+ if (!table)
+ continue;
+
+ for (bn = bgp_table_top(table); bn; bn = bgp_route_next(bn)) {
+ for (bpi = bgp_dest_get_bgp_path_info(bn); bpi;
+ bpi = bpi->next) {
+
+ if (bpi->extra &&
+ bpi->extra->bgp_orig == to_bgp)
+ continue;
+
+ if (bpi->sub_type != BGP_ROUTE_NORMAL)
+ continue;
+
+ if (!vpn_leak_to_vrf_no_retain_filter_check(
+ vpn_from, bpi->attr, afi))
+ /* do not filter */
+ continue;
+
+ bgp_unlink_nexthop(bpi);
+ bgp_rib_remove(bn, bpi, bpi->peer, afi, safi);
+ }
+ }
+ }
+}
+
void vpn_leak_to_vrf_update_all(struct bgp *to_bgp, struct bgp *vpn_from,
afi_t afi)
{
@@ -3713,6 +3804,7 @@ void vpn_leak_postchange_all(void)
*/
void bgp_vpn_leak_unimport(struct bgp *from_bgp)
{
+ struct bgp *bgp_default = bgp_get_default();
struct bgp *to_bgp;
const char *tmp_name;
char *vname;
@@ -3791,6 +3883,17 @@ void bgp_vpn_leak_unimport(struct bgp *from_bgp)
}
}
}
+
+ if (bgp_default &&
+ !CHECK_FLAG(bgp_default->af_flags[afi][SAFI_MPLS_VPN],
+ BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL)) {
+ /* 'from_bgp' instance will be deleted
+ * so force to unset importation to update VPN labels
+ */
+ UNSET_FLAG(from_bgp->af_flags[afi][SAFI_UNICAST],
+ BGP_CONFIG_MPLSVPN_TO_VRF_IMPORT);
+ vpn_leak_no_retain(from_bgp, bgp_default, afi);
+ }
}
return;
}
@@ -3865,3 +3968,377 @@ void bgp_vpn_leak_export(struct bgp *from_bgp)
}
}
}
+
+/* The nexthops values are compared to
+ * find in the tree the appropriate cache entry
+ */
+int bgp_mplsvpn_nh_label_bind_cmp(
+ const struct bgp_mplsvpn_nh_label_bind_cache *a,
+ const struct bgp_mplsvpn_nh_label_bind_cache *b)
+{
+ if (prefix_cmp(&a->nexthop, &b->nexthop))
+ return 1;
+ if (a->orig_label > b->orig_label)
+ return 1;
+ if (a->orig_label < b->orig_label)
+ return -1;
+ return 0;
+}
+
+static void bgp_mplsvpn_nh_label_bind_send_nexthop_label(
+ struct bgp_mplsvpn_nh_label_bind_cache *bmnc, int cmd)
+{
+ struct prefix pfx_nh, *p = NULL;
+ uint32_t num_labels = 0, lsp_num_labels;
+ mpls_label_t label[MPLS_MAX_LABELS];
+ struct nexthop *nh;
+ ifindex_t ifindex = IFINDEX_INTERNAL;
+ vrf_id_t vrf_id = VRF_DEFAULT;
+ uint32_t i;
+
+ if (bmnc->nh == NULL)
+ return;
+ nh = bmnc->nh;
+ switch (nh->type) {
+ case NEXTHOP_TYPE_IFINDEX:
+ p = &bmnc->nexthop;
+ label[num_labels] = bmnc->orig_label;
+ num_labels += 1;
+ ifindex = nh->ifindex;
+ vrf_id = nh->vrf_id;
+ break;
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ if (nh->type == NEXTHOP_TYPE_IPV4 ||
+ nh->type == NEXTHOP_TYPE_IPV4_IFINDEX) {
+ pfx_nh.family = AF_INET;
+ pfx_nh.prefixlen = IPV4_MAX_BITLEN;
+ IPV4_ADDR_COPY(&pfx_nh.u.prefix4, &nh->gate.ipv4);
+ } else {
+ pfx_nh.family = AF_INET6;
+ pfx_nh.prefixlen = IPV6_MAX_BITLEN;
+ IPV6_ADDR_COPY(&pfx_nh.u.prefix6, &nh->gate.ipv6);
+ }
+ p = &pfx_nh;
+ if (nh->nh_label) {
+ if (nh->nh_label->num_labels >
+ MPLS_MAX_LABELS - num_labels)
+ lsp_num_labels = MPLS_MAX_LABELS - num_labels;
+ else
+ lsp_num_labels = nh->nh_label->num_labels;
+ for (i = 0; i < lsp_num_labels; i++)
+ label[num_labels + i] = nh->nh_label->label[i];
+ num_labels += lsp_num_labels;
+ }
+ label[num_labels] = bmnc->orig_label;
+ num_labels += 1;
+ if (nh->type == NEXTHOP_TYPE_IPV4_IFINDEX ||
+ nh->type == NEXTHOP_TYPE_IPV6_IFINDEX) {
+ ifindex = nh->ifindex;
+ vrf_id = nh->vrf_id;
+ }
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ return;
+ }
+ bgp_zebra_send_nexthop_label(cmd, bmnc->new_label, ifindex, vrf_id,
+ ZEBRA_LSP_BGP, p, num_labels, &label[0]);
+}
+
+void bgp_mplsvpn_nh_label_bind_free(
+ struct bgp_mplsvpn_nh_label_bind_cache *bmnc)
+{
+ if (bmnc->allocation_in_progress) {
+ bmnc->allocation_in_progress = false;
+ bgp_mplsvpn_nh_label_bind_cache_del(
+ &bmnc->bgp_vpn->mplsvpn_nh_label_bind, bmnc);
+ return;
+ }
+ if (bmnc->new_label != MPLS_INVALID_LABEL) {
+ bgp_mplsvpn_nh_label_bind_send_nexthop_label(
+ bmnc, ZEBRA_MPLS_LABELS_DELETE);
+ bgp_lp_release(LP_TYPE_BGP_L3VPN_BIND, bmnc, bmnc->new_label);
+ }
+ bgp_mplsvpn_nh_label_bind_cache_del(
+ &bmnc->bgp_vpn->mplsvpn_nh_label_bind, bmnc);
+ XFREE(MTYPE_MPLSVPN_NH_LABEL_BIND_CACHE, bmnc);
+}
+
+struct bgp_mplsvpn_nh_label_bind_cache *
+bgp_mplsvpn_nh_label_bind_new(struct bgp_mplsvpn_nh_label_bind_cache_head *tree,
+ struct prefix *p, mpls_label_t orig_label)
+{
+ struct bgp_mplsvpn_nh_label_bind_cache *bmnc;
+
+ bmnc = XCALLOC(MTYPE_MPLSVPN_NH_LABEL_BIND_CACHE,
+ sizeof(struct bgp_mplsvpn_nh_label_bind_cache));
+ bmnc->new_label = MPLS_INVALID_LABEL;
+ prefix_copy(&bmnc->nexthop, p);
+ bmnc->orig_label = orig_label;
+
+ LIST_INIT(&(bmnc->paths));
+ bgp_mplsvpn_nh_label_bind_cache_add(tree, bmnc);
+
+ return bmnc;
+}
+
+struct bgp_mplsvpn_nh_label_bind_cache *bgp_mplsvpn_nh_label_bind_find(
+ struct bgp_mplsvpn_nh_label_bind_cache_head *tree, struct prefix *p,
+ mpls_label_t orig_label)
+{
+ struct bgp_mplsvpn_nh_label_bind_cache bmnc = {0};
+
+ if (!tree)
+ return NULL;
+ prefix_copy(&bmnc.nexthop, p);
+ bmnc.orig_label = orig_label;
+
+ return bgp_mplsvpn_nh_label_bind_cache_find(tree, &bmnc);
+}
+
+/* Called to check if the incoming l3vpn path entry
+ * has mpls label information
+ */
+bool bgp_mplsvpn_path_uses_valid_mpls_label(struct bgp_path_info *pi)
+{
+ if (pi->attr && pi->attr->srv6_l3vpn)
+ /* srv6 sid */
+ return false;
+
+ if (pi->attr &&
+ CHECK_FLAG(pi->attr->flag, ATTR_FLAG_BIT(BGP_ATTR_PREFIX_SID)) &&
+ pi->attr->label_index != BGP_INVALID_LABEL_INDEX)
+ /* prefix_sid attribute */
+ return false;
+
+ if (!pi->extra || !bgp_is_valid_label(&pi->extra->label[0]))
+ /* invalid MPLS label */
+ return false;
+ return true;
+}
+
+mpls_label_t bgp_mplsvpn_nh_label_bind_get_label(struct bgp_path_info *pi)
+{
+ mpls_label_t label;
+ struct bgp_mplsvpn_nh_label_bind_cache *bmnc;
+
+ bmnc = pi->mplsvpn.bmnc.nh_label_bind_cache;
+ if (!bmnc || bmnc->new_label == MPLS_INVALID_LABEL)
+ /* allocation in progress
+ * or path not eligible for local label
+ */
+ return MPLS_INVALID_LABEL;
+
+ label = mpls_lse_encode(bmnc->new_label, 0, 0, 1);
+ bgp_set_valid_label(&label);
+
+ return label;
+}
+
+/* Called upon reception of a ZAPI Message from zebra, about
+ * a new available label.
+ */
+static int bgp_mplsvpn_nh_label_bind_get_local_label_cb(mpls_label_t label,
+ void *context,
+ bool allocated)
+{
+ struct bgp_mplsvpn_nh_label_bind_cache *bmnc = context;
+ struct bgp_table *table;
+ struct bgp_path_info *pi;
+
+ if (BGP_DEBUG(labelpool, LABELPOOL))
+ zlog_debug("%s: label=%u, allocated=%d, nexthop=%pFX, label %u",
+ __func__, label, allocated, &bmnc->nexthop,
+ bmnc->orig_label);
+ if (allocated)
+ /* update the entry with the new label */
+ bmnc->new_label = label;
+ else
+ /*
+ * previously-allocated label is now invalid
+ * eg: zebra deallocated the labels and notifies it
+ */
+ bmnc->new_label = MPLS_INVALID_LABEL;
+
+ if (!bmnc->allocation_in_progress) {
+ bgp_mplsvpn_nh_label_bind_free(bmnc);
+ return 0;
+ }
+ bmnc->allocation_in_progress = false;
+
+ if (bmnc->new_label != MPLS_INVALID_LABEL)
+ /*
+ * Create the LSP : <local_label -> bmnc->orig_label,
+ * via bmnc->prefix, interface bnc->nexthop->ifindex
+ */
+ bgp_mplsvpn_nh_label_bind_send_nexthop_label(
+ bmnc, ZEBRA_MPLS_LABELS_ADD);
+
+ LIST_FOREACH (pi, &(bmnc->paths), mplsvpn.bmnc.nh_label_bind_thread) {
+ /* we can advertise it */
+ if (!pi->net)
+ continue;
+ table = bgp_dest_table(pi->net);
+ if (!table)
+ continue;
+ SET_FLAG(pi->net->flags, BGP_NODE_LABEL_CHANGED);
+ bgp_process(table->bgp, pi->net, table->afi, table->safi);
+ }
+
+ return 0;
+}
+
+void bgp_mplsvpn_path_nh_label_bind_unlink(struct bgp_path_info *pi)
+{
+ struct bgp_mplsvpn_nh_label_bind_cache *bmnc;
+
+ if (!pi)
+ return;
+
+ if (!CHECK_FLAG(pi->flags, BGP_PATH_MPLSVPN_NH_LABEL_BIND))
+ return;
+
+ bmnc = pi->mplsvpn.bmnc.nh_label_bind_cache;
+
+ if (!bmnc)
+ return;
+
+ LIST_REMOVE(pi, mplsvpn.bmnc.nh_label_bind_thread);
+ pi->mplsvpn.bmnc.nh_label_bind_cache->path_count--;
+ pi->mplsvpn.bmnc.nh_label_bind_cache = NULL;
+ SET_FLAG(pi->flags, BGP_PATH_MPLSVPN_NH_LABEL_BIND);
+
+ if (LIST_EMPTY(&(bmnc->paths)))
+ bgp_mplsvpn_nh_label_bind_free(bmnc);
+}
+
+void bgp_mplsvpn_nh_label_bind_register_local_label(struct bgp *bgp,
+ struct bgp_dest *dest,
+ struct bgp_path_info *pi)
+{
+ struct bgp_mplsvpn_nh_label_bind_cache *bmnc;
+ struct bgp_mplsvpn_nh_label_bind_cache_head *tree;
+
+ tree = &bgp->mplsvpn_nh_label_bind;
+ bmnc = bgp_mplsvpn_nh_label_bind_find(
+ tree, &pi->nexthop->prefix, decode_label(&pi->extra->label[0]));
+ if (!bmnc) {
+ bmnc = bgp_mplsvpn_nh_label_bind_new(
+ tree, &pi->nexthop->prefix,
+ decode_label(&pi->extra->label[0]));
+ bmnc->bgp_vpn = bgp;
+ bmnc->allocation_in_progress = true;
+ bgp_lp_get(LP_TYPE_BGP_L3VPN_BIND, bmnc,
+ bgp_mplsvpn_nh_label_bind_get_local_label_cb);
+ }
+
+ if (pi->mplsvpn.bmnc.nh_label_bind_cache == bmnc)
+ /* no change */
+ return;
+
+ bgp_mplsvpn_path_nh_label_bind_unlink(pi);
+ if (bmnc) {
+ /* updates NHT pi list reference */
+ LIST_INSERT_HEAD(&(bmnc->paths), pi,
+ mplsvpn.bmnc.nh_label_bind_thread);
+ pi->mplsvpn.bmnc.nh_label_bind_cache = bmnc;
+ pi->mplsvpn.bmnc.nh_label_bind_cache->path_count++;
+ SET_FLAG(pi->flags, BGP_PATH_MPLSVPN_NH_LABEL_BIND);
+ bmnc->last_update = monotime(NULL);
+ }
+
+ /* Add or update the selected nexthop */
+ if (!bmnc->nh)
+ bmnc->nh = nexthop_dup(pi->nexthop->nexthop, NULL);
+ else if (!nexthop_same(pi->nexthop->nexthop, bmnc->nh)) {
+ nexthop_free(bmnc->nh);
+ bmnc->nh = nexthop_dup(pi->nexthop->nexthop, NULL);
+ if (bmnc->new_label != MPLS_INVALID_LABEL)
+ bgp_mplsvpn_nh_label_bind_send_nexthop_label(
+ bmnc, ZEBRA_MPLS_LABELS_REPLACE);
+ }
+}
+
+static void show_bgp_mplsvpn_nh_label_bind_internal(struct vty *vty,
+ struct bgp *bgp,
+ bool detail)
+{
+ struct bgp_mplsvpn_nh_label_bind_cache_head *tree;
+ struct bgp_mplsvpn_nh_label_bind_cache *iter;
+ afi_t afi;
+ safi_t safi;
+ struct bgp_dest *dest;
+ struct bgp_path_info *path;
+ struct bgp *bgp_path;
+ struct bgp_table *table;
+ time_t tbuf;
+
+ vty_out(vty, "Current BGP mpls-vpn nexthop label bind cache, %s\n",
+ bgp->name_pretty);
+
+ tree = &bgp->mplsvpn_nh_label_bind;
+ frr_each (bgp_mplsvpn_nh_label_bind_cache, tree, iter) {
+ if (iter->nexthop.family == AF_INET)
+ vty_out(vty, " %pI4", &iter->nexthop.u.prefix4);
+ else
+ vty_out(vty, " %pI6", &iter->nexthop.u.prefix6);
+ vty_out(vty, ", label %u, local label %u #paths %u\n",
+ iter->orig_label, iter->new_label, iter->path_count);
+ if (iter->nh)
+ vty_out(vty, " interface %s\n",
+ ifindex2ifname(iter->nh->ifindex,
+ iter->nh->vrf_id));
+ tbuf = time(NULL) - (monotime(NULL) - iter->last_update);
+ vty_out(vty, " Last update: %s", ctime(&tbuf));
+ if (!detail)
+ continue;
+ vty_out(vty, " Paths:\n");
+ LIST_FOREACH (path, &(iter->paths),
+ mplsvpn.bmnc.nh_label_bind_thread) {
+ dest = path->net;
+ table = bgp_dest_table(dest);
+ assert(dest && table);
+ afi = family2afi(bgp_dest_get_prefix(dest)->family);
+ safi = table->safi;
+ bgp_path = table->bgp;
+
+ vty_out(vty, " %d/%d %pBD %s flags 0x%x\n", afi,
+ safi, dest, bgp_path->name_pretty, path->flags);
+ }
+ }
+}
+
+
+DEFUN(show_bgp_mplsvpn_nh_label_bind, show_bgp_mplsvpn_nh_label_bind_cmd,
+ "show bgp [<view|vrf> VIEWVRFNAME] mplsvpn-nh-label-bind [detail]",
+ SHOW_STR BGP_STR BGP_INSTANCE_HELP_STR
+ "BGP mplsvpn nexthop label binding entries\n"
+ "Show detailed information\n")
+{
+ int idx = 0;
+ char *vrf = NULL;
+ struct bgp *bgp;
+ bool detail = false;
+
+ if (argv_find(argv, argc, "vrf", &idx)) {
+ vrf = argv[++idx]->arg;
+ bgp = bgp_lookup_by_name(vrf);
+ } else
+ bgp = bgp_get_default();
+
+ if (!bgp)
+ return CMD_SUCCESS;
+
+ if (argv_find(argv, argc, "detail", &idx))
+ detail = true;
+
+ show_bgp_mplsvpn_nh_label_bind_internal(vty, bgp, detail);
+ return CMD_SUCCESS;
+}
+
+void bgp_mplsvpn_nexthop_init(void)
+{
+ install_element(VIEW_NODE, &show_bgp_mplsvpn_nh_label_bind_cmd);
+}
diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h
index 75758edcc2..a7f31854f0 100644
--- a/bgpd/bgp_mplsvpn.h
+++ b/bgpd/bgp_mplsvpn.h
@@ -56,10 +56,17 @@ extern void vpn_leak_from_vrf_update_all(struct bgp *to_bgp,
extern void vpn_leak_to_vrf_withdraw_all(struct bgp *to_bgp, afi_t afi);
+extern void vpn_leak_no_retain(struct bgp *to_bgp, struct bgp *vpn_from,
+ afi_t afi);
+
extern void vpn_leak_to_vrf_update_all(struct bgp *to_bgp, struct bgp *from_bgp,
afi_t afi);
-extern bool vpn_leak_to_vrf_update(struct bgp *from_bgp,
+extern bool vpn_leak_to_vrf_no_retain_filter_check(struct bgp *from_bgp,
+ struct attr *attr,
+ afi_t afi);
+
+extern void vpn_leak_to_vrf_update(struct bgp *from_bgp,
struct bgp_path_info *path_vpn,
struct prefix_rd *prd);
@@ -325,4 +332,71 @@ extern void vpn_handle_router_id_update(struct bgp *bgp, bool withdraw,
extern void bgp_vpn_leak_unimport(struct bgp *from_bgp);
extern void bgp_vpn_leak_export(struct bgp *from_bgp);
+extern bool bgp_mplsvpn_path_uses_valid_mpls_label(struct bgp_path_info *pi);
+extern int
+bgp_mplsvpn_nh_label_bind_cmp(const struct bgp_mplsvpn_nh_label_bind_cache *a,
+ const struct bgp_mplsvpn_nh_label_bind_cache *b);
+extern void bgp_mplsvpn_path_nh_label_bind_unlink(struct bgp_path_info *pi);
+extern void bgp_mplsvpn_nh_label_bind_register_local_label(
+ struct bgp *bgp, struct bgp_dest *dest, struct bgp_path_info *pi);
+mpls_label_t bgp_mplsvpn_nh_label_bind_get_label(struct bgp_path_info *pi);
+
+/* used to bind a local label to the (label, nexthop) values
+ * from an incoming BGP mplsvpn update
+ */
+struct bgp_mplsvpn_nh_label_bind_cache {
+
+ /* RB-tree entry. */
+ struct bgp_mplsvpn_nh_label_bind_cache_item entry;
+
+ /* The nexthop and the vpn label are the key of the list.
+ * Only received BGP MPLSVPN updates may use that structure.
+ * orig_label is the original label received from the BGP Update.
+ */
+ struct prefix nexthop;
+ mpls_label_t orig_label;
+
+ /* resolved interface for the paths */
+ struct nexthop *nh;
+
+ /* number of mplsvpn path */
+ unsigned int path_count;
+
+ /* back pointer to bgp instance */
+ struct bgp *bgp_vpn;
+
+ /* MPLS label allocated value.
+ * When the next-hop is changed because of 'next-hop-self' or
+ * because it is an eBGP peer, the redistributed orig_label value
+ * is unmodified, unless the 'l3vpn-multi-domain-switching'
+ * is enabled: a new_label value is allocated:
+ * - The new_label value is sent in the advertised BGP update,
+ * instead of the label value.
+ * - An MPLS entry is set to swap <new_label> with <orig_label>.
+ */
+ mpls_label_t new_label;
+
+ /* list of path_vrfs using it */
+ LIST_HEAD(mplsvpn_nh_label_bind_path_lists, bgp_path_info) paths;
+
+ time_t last_update;
+
+ bool allocation_in_progress;
+};
+
+DECLARE_RBTREE_UNIQ(bgp_mplsvpn_nh_label_bind_cache,
+ struct bgp_mplsvpn_nh_label_bind_cache, entry,
+ bgp_mplsvpn_nh_label_bind_cmp);
+
+void bgp_mplsvpn_nh_label_bind_free(
+ struct bgp_mplsvpn_nh_label_bind_cache *bmnc);
+
+struct bgp_mplsvpn_nh_label_bind_cache *
+bgp_mplsvpn_nh_label_bind_new(struct bgp_mplsvpn_nh_label_bind_cache_head *tree,
+ struct prefix *p, mpls_label_t orig_label);
+struct bgp_mplsvpn_nh_label_bind_cache *bgp_mplsvpn_nh_label_bind_find(
+ struct bgp_mplsvpn_nh_label_bind_cache_head *tree, struct prefix *p,
+ mpls_label_t orig_label);
+void bgp_mplsvpn_nexthop_init(void);
+
#endif /* _QUAGGA_BGP_MPLSVPN_H */
diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c
index e235a61f59..73fe00c7ab 100644
--- a/bgpd/bgp_network.c
+++ b/bgpd/bgp_network.c
@@ -810,8 +810,9 @@ int bgp_getsockname(struct peer *peer)
&peer->nexthop, peer)) {
flog_err(
EC_BGP_NH_UPD,
- "%s: nexthop_set failed, resetting connection - intf %s",
- peer->host,
+ "%s: nexthop_set failed, local: %pSUp remote: %pSUp update_if: %s resetting connection - intf %s",
+ peer->host, peer->su_local, peer->su_remote,
+ peer->update_if ? peer->update_if : "(None)",
peer->nexthop.ifp ? peer->nexthop.ifp->name
: "(Unknown)");
return -1;
diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c
index c878512389..a854ca0fe4 100644
--- a/bgpd/bgp_nexthop.c
+++ b/bgpd/bgp_nexthop.c
@@ -121,6 +121,7 @@ static void bgp_nexthop_cache_reset(struct bgp_nexthop_cache_head *tree)
struct bgp_path_info *path = LIST_FIRST(&(bnc->paths));
bgp_mplsvpn_path_nh_label_unlink(path);
+ bgp_mplsvpn_path_nh_label_bind_unlink(path);
path_nh_map(path, bnc, false);
}
diff --git a/bgpd/bgp_nexthop.h b/bgpd/bgp_nexthop.h
index 95e2f9165b..47b6464085 100644
--- a/bgpd/bgp_nexthop.h
+++ b/bgpd/bgp_nexthop.h
@@ -104,11 +104,6 @@ struct tip_addr {
int refcnt;
};
-struct bgp_addrv6 {
- struct in6_addr addrv6;
- struct list *ifp_name_list;
-};
-
/* Forward declaration(s). */
struct peer;
struct update_subgroup;
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index d7b1429881..ba5b0c7a7d 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -151,6 +151,7 @@ void bgp_unlink_nexthop(struct bgp_path_info *path)
struct bgp_nexthop_cache *bnc = path->nexthop;
bgp_mplsvpn_path_nh_label_unlink(path);
+ bgp_mplsvpn_path_nh_label_bind_unlink(path);
if (!bnc)
return;
@@ -467,7 +468,12 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
pi->sub_type == BGP_ROUTE_IMPORTED && pi->extra &&
pi->extra->num_labels && !bnc->is_evpn_gwip_nexthop)
return bgp_isvalid_nexthop_for_mpls(bnc, pi);
+ else if (safi == SAFI_MPLS_VPN && pi &&
+ pi->sub_type != BGP_ROUTE_IMPORTED)
+ /* avoid not redistributing mpls vpn routes */
+ return 1;
else
+ /* mpls-vpn routes with BGP_ROUTE_IMPORTED subtype */
return (bgp_isvalid_nexthop(bnc));
}
@@ -1190,7 +1196,12 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
bnc_is_valid_nexthop =
bgp_isvalid_nexthop_for_mpls(bnc, path) ? true
: false;
+ } else if (safi == SAFI_MPLS_VPN &&
+ path->sub_type != BGP_ROUTE_IMPORTED) {
+ /* avoid not redistributing mpls vpn routes */
+ bnc_is_valid_nexthop = true;
} else {
+ /* mpls-vpn routes with BGP_ROUTE_IMPORTED subtype */
if (bgp_update_martian_nexthop(
bnc->bgp, afi, safi, path->type,
path->sub_type, path->attr, dest)) {
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index 9469a0778f..0cac58ade1 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -1080,6 +1080,7 @@ void bgp_notify_io_invalid(struct peer *peer, uint8_t code, uint8_t sub_code,
* @param orf_type Outbound Route Filtering type
* @param when_to_refresh Whether to refresh immediately or defer
* @param remove Whether to remove ORF for specified AFI/SAFI
+ * @param subtype BGP enhanced route refresh optional subtypes
*/
void bgp_route_refresh_send(struct peer *peer, afi_t afi, safi_t safi,
uint8_t orf_type, uint8_t when_to_refresh,
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index 67a02249c8..0b821acfae 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -298,6 +298,29 @@ struct bgp_path_info *bgp_path_info_unlock(struct bgp_path_info *path)
return path;
}
+bool bgp_path_info_nexthop_changed(struct bgp_path_info *pi, struct peer *to,
+ afi_t afi)
+{
+ if (pi->peer->sort == BGP_PEER_IBGP && to->sort == BGP_PEER_IBGP &&
+ !CHECK_FLAG(to->af_flags[afi][SAFI_MPLS_VPN],
+ PEER_FLAG_FORCE_NEXTHOP_SELF))
+ /* IBGP RR with no nexthop self force configured */
+ return false;
+
+ if (to->sort == BGP_PEER_IBGP &&
+ !CHECK_FLAG(to->af_flags[afi][SAFI_MPLS_VPN],
+ PEER_FLAG_NEXTHOP_SELF))
+ /* IBGP RR with no nexthop self configured */
+ return false;
+
+ if (CHECK_FLAG(to->af_flags[afi][SAFI_MPLS_VPN],
+ PEER_FLAG_NEXTHOP_UNCHANGED))
+ /* IBGP or EBGP with nexthop attribute unchanged */
+ return false;
+
+ return true;
+}
+
/* This function sets flag BGP_NODE_SELECT_DEFER based on condition */
static int bgp_dest_set_defer_flag(struct bgp_dest *dest, bool delete)
{
@@ -1996,6 +2019,7 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
int samepeer_safe = 0; /* for synthetic mplsvpns routes */
bool nh_reset = false;
uint64_t cum_bw;
+ mpls_label_t label;
if (DISABLE_BGP_ANNOUNCE)
return false;
@@ -2081,7 +2105,7 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
/* If it's labeled safi, make sure the route has a valid label. */
if (safi == SAFI_LABELED_UNICAST) {
- mpls_label_t label = bgp_adv_label(dest, pi, peer, afi, safi);
+ label = bgp_adv_label(dest, pi, peer, afi, safi);
if (!bgp_is_valid_label(&label)) {
if (bgp_debug_update(NULL, p, subgrp->update_group, 0))
zlog_debug("u%" PRIu64 ":s%" PRIu64
@@ -2090,6 +2114,29 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
p, &label);
return false;
}
+ } else if (safi == SAFI_MPLS_VPN &&
+ CHECK_FLAG(pi->flags, BGP_PATH_MPLSVPN_NH_LABEL_BIND) &&
+ pi->mplsvpn.bmnc.nh_label_bind_cache && peer &&
+ pi->peer != peer && pi->sub_type != BGP_ROUTE_IMPORTED &&
+ pi->sub_type != BGP_ROUTE_STATIC &&
+ bgp_mplsvpn_path_uses_valid_mpls_label(pi) &&
+ bgp_path_info_nexthop_changed(pi, peer, afi)) {
+ /* Redistributed mpls vpn route between distinct
+ * peers from 'pi->peer' to 'to',
+ * and an mpls label is used in this path,
+ * and there is a nh label bind entry,
+ * then get appropriate mpls local label
+ * and check its validity
+ */
+ label = bgp_mplsvpn_nh_label_bind_get_label(pi);
+ if (!bgp_is_valid_label(&label)) {
+ if (bgp_debug_update(NULL, p, subgrp->update_group, 0))
+ zlog_debug("u%" PRIu64 ":s%" PRIu64
+ " %pFX is filtered - no valid label",
+ subgrp->update_group->id, subgrp->id,
+ p);
+ return false;
+ }
}
/* Do not send back route to sender. */
@@ -2924,7 +2971,7 @@ void subgroup_process_announce_selected(struct update_subgroup *subgrp,
* is pending (BGP_NODE_FIB_INSTALL_PENDING), do not advertise the
* route
*/
- advertise = bgp_check_advertise(bgp, dest);
+ advertise = bgp_check_advertise(bgp, dest, safi);
if (selected) {
if (subgroup_announce_check(dest, selected, subgrp, p, &attr,
@@ -2933,7 +2980,7 @@ void subgroup_process_announce_selected(struct update_subgroup *subgrp,
* in FIB, then it is advertised
*/
if (advertise) {
- if (!bgp_check_withdrawal(bgp, dest)) {
+ if (!bgp_check_withdrawal(bgp, dest, safi)) {
struct attr *adv_attr =
bgp_attr_intern(&attr);
@@ -3097,6 +3144,112 @@ need_null_label:
return true;
}
+/* Right now, since we only deal with per-prefix labels, it is not
+ * necessary to do this upon changes to best path. Exceptions:
+ * - label index has changed -> recalculate resulting label
+ * - path_info sub_type changed -> switch to/from null label value
+ * - no valid label (due to removed static label binding) -> get new one
+ */
+static void bgp_lu_handle_label_allocation(struct bgp *bgp,
+ struct bgp_dest *dest,
+ struct bgp_path_info *new_select,
+ struct bgp_path_info *old_select,
+ afi_t afi)
+{
+ mpls_label_t mpls_label_null;
+
+ if (bgp->allocate_mpls_labels[afi][SAFI_UNICAST]) {
+ if (new_select) {
+ if (!old_select ||
+ bgp_label_index_differs(new_select, old_select) ||
+ new_select->sub_type != old_select->sub_type ||
+ !bgp_is_valid_label(&dest->local_label)) {
+ /* control label imposition for local
+ * routes, aggregate and redistributed
+ * routes
+ */
+ mpls_label_null = MPLS_LABEL_IMPLICIT_NULL;
+ if (bgp_lu_need_null_label(bgp, new_select, afi,
+ &mpls_label_null)) {
+ if (CHECK_FLAG(
+ dest->flags,
+ BGP_NODE_REGISTERED_FOR_LABEL) ||
+ CHECK_FLAG(
+ dest->flags,
+ BGP_NODE_LABEL_REQUESTED))
+ bgp_unregister_for_label(dest);
+ dest->local_label = mpls_lse_encode(
+ mpls_label_null, 0, 0, 1);
+ bgp_set_valid_label(&dest->local_label);
+ } else
+ bgp_register_for_label(dest,
+ new_select);
+ }
+ } else if (CHECK_FLAG(dest->flags,
+ BGP_NODE_REGISTERED_FOR_LABEL) ||
+ CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED)) {
+ bgp_unregister_for_label(dest);
+ }
+ } else if (CHECK_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL) ||
+ CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED)) {
+ bgp_unregister_for_label(dest);
+ }
+}
+
+static struct interface *
+bgp_label_get_resolved_nh_iface(const struct bgp_path_info *pi)
+{
+ struct nexthop *nh;
+
+ if (pi->nexthop == NULL || pi->nexthop->nexthop == NULL ||
+ !CHECK_FLAG(pi->nexthop->flags, BGP_NEXTHOP_VALID))
+ /* next-hop is not valid */
+ return NULL;
+
+ nh = pi->nexthop->nexthop;
+ if (nh->ifindex == IFINDEX_INTERNAL &&
+ nh->type != NEXTHOP_TYPE_IPV4_IFINDEX &&
+ nh->type != NEXTHOP_TYPE_IPV6_IFINDEX)
+ /* next-hop does not contain valid interface */
+ return NULL;
+
+ return if_lookup_by_index(nh->ifindex, nh->vrf_id);
+}
+
+static void
+bgp_mplsvpn_handle_label_allocation(struct bgp *bgp, struct bgp_dest *dest,
+ struct bgp_path_info *new_select,
+ struct bgp_path_info *old_select, afi_t afi)
+{
+ struct interface *ifp;
+ struct bgp_interface *bgp_ifp;
+
+ if (bgp->allocate_mpls_labels[afi][SAFI_MPLS_VPN] && new_select) {
+ ifp = bgp_label_get_resolved_nh_iface(new_select);
+ if (ifp)
+ bgp_ifp = (struct bgp_interface *)(ifp->info);
+ else
+ bgp_ifp = NULL;
+ if (bgp_ifp &&
+ CHECK_FLAG(bgp_ifp->flags,
+ BGP_INTERFACE_MPLS_L3VPN_SWITCHING) &&
+ bgp_mplsvpn_path_uses_valid_mpls_label(new_select) &&
+ new_select->sub_type != BGP_ROUTE_IMPORTED &&
+ new_select->sub_type != BGP_ROUTE_STATIC)
+ bgp_mplsvpn_nh_label_bind_register_local_label(
+ bgp, dest, new_select);
+ else
+ bgp_mplsvpn_path_nh_label_bind_unlink(new_select);
+ } else {
+ if (new_select)
+ /* no mpls vpn allocation */
+ bgp_mplsvpn_path_nh_label_bind_unlink(new_select);
+ else if (old_select)
+ /* unlink old selection if any */
+ bgp_mplsvpn_path_nh_label_bind_unlink(old_select);
+ }
+}
+
/*
* old_select = The old best path
* new_select = the new best path
@@ -3123,7 +3276,6 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest,
struct bgp_path_info *old_select;
struct bgp_path_info_pair old_and_new;
int debug = 0;
- mpls_label_t mpls_label_null;
if (CHECK_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS)) {
if (dest)
@@ -3174,49 +3326,18 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest,
old_select = old_and_new.old;
new_select = old_and_new.new;
- /* Do we need to allocate or free labels?
- * Right now, since we only deal with per-prefix labels, it is not
- * necessary to do this upon changes to best path. Exceptions:
- * - label index has changed -> recalculate resulting label
- * - path_info sub_type changed -> switch to/from null label value
- * - no valid label (due to removed static label binding) -> get new one
- */
- if (bgp->allocate_mpls_labels[afi][safi]) {
- if (new_select) {
- if (!old_select
- || bgp_label_index_differs(new_select, old_select)
- || new_select->sub_type != old_select->sub_type
- || !bgp_is_valid_label(&dest->local_label)) {
- /* control label imposition for local routes,
- * aggregate and redistributed routes
- */
- mpls_label_null = MPLS_LABEL_IMPLICIT_NULL;
- if (bgp_lu_need_null_label(bgp, new_select, afi,
- &mpls_label_null)) {
- if (CHECK_FLAG(
- dest->flags,
- BGP_NODE_REGISTERED_FOR_LABEL)
- || CHECK_FLAG(
- dest->flags,
- BGP_NODE_LABEL_REQUESTED))
- bgp_unregister_for_label(dest);
- dest->local_label = mpls_lse_encode(
- mpls_label_null, 0, 0, 1);
- bgp_set_valid_label(&dest->local_label);
- } else
- bgp_register_for_label(dest,
- new_select);
- }
- } else if (CHECK_FLAG(dest->flags,
- BGP_NODE_REGISTERED_FOR_LABEL)
- || CHECK_FLAG(dest->flags,
- BGP_NODE_LABEL_REQUESTED)) {
- bgp_unregister_for_label(dest);
- }
- } else if (CHECK_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL)
- || CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED)) {
- bgp_unregister_for_label(dest);
- }
+ if (safi == SAFI_UNICAST || safi == SAFI_LABELED_UNICAST)
+ /* label unicast path :
+ * Do we need to allocate or free labels?
+ */
+ bgp_lu_handle_label_allocation(bgp, dest, new_select,
+ old_select, afi);
+ else if (safi == SAFI_MPLS_VPN)
+ /* mpls vpn path:
+ * Do we need to allocate or free labels?
+ */
+ bgp_mplsvpn_handle_label_allocation(bgp, dest, new_select,
+ old_select, afi);
if (debug)
zlog_debug(
@@ -3227,10 +3348,11 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest,
/* If best route remains the same and this is not due to user-initiated
* clear, see exactly what needs to be done.
*/
- if (old_select && old_select == new_select
- && !CHECK_FLAG(dest->flags, BGP_NODE_USER_CLEAR)
- && !CHECK_FLAG(old_select->flags, BGP_PATH_ATTR_CHANGED)
- && !bgp_addpath_is_addpath_used(&bgp->tx_addpath, afi, safi)) {
+ if (old_select && old_select == new_select &&
+ !CHECK_FLAG(dest->flags, BGP_NODE_USER_CLEAR) &&
+ !CHECK_FLAG(dest->flags, BGP_NODE_PROCESS_CLEAR) &&
+ !CHECK_FLAG(old_select->flags, BGP_PATH_ATTR_CHANGED) &&
+ !bgp_addpath_is_addpath_used(&bgp->tx_addpath, afi, safi)) {
if (bgp_zebra_has_route_changed(old_select)) {
#ifdef ENABLE_BGP_VNC
vnc_import_bgp_add_route(bgp, p, old_select);
@@ -3284,18 +3406,20 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest,
*/
UNSET_FLAG(dest->flags, BGP_NODE_USER_CLEAR);
+ /* If the process wants to force deletion this flag will be set
+ */
+ UNSET_FLAG(dest->flags, BGP_NODE_PROCESS_CLEAR);
+
/* bestpath has changed; bump version */
if (old_select || new_select) {
bgp_bump_version(dest);
- if (!bgp->t_rmap_def_originate_eval) {
- bgp_lock(bgp);
+ if (!bgp->t_rmap_def_originate_eval)
event_add_timer(
bm->master,
update_group_refresh_default_originate_route_map,
- bgp, RMAP_DEFAULT_ORIGINATE_EVAL_TIMER,
+ bgp, bgp->rmap_def_originate_eval_timer,
&bgp->t_rmap_def_originate_eval);
- }
}
if (old_select)
@@ -4028,7 +4152,6 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
afi_t nh_afi;
bool force_evpn_import = false;
safi_t orig_safi = safi;
- bool leak_success = true;
int allowas_in = 0;
if (frrtrace_enabled(frr_bgp, process_update)) {
@@ -4172,6 +4295,16 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
goto filtered;
}
+ if ((afi == AFI_IP || afi == AFI_IP6) && safi == SAFI_MPLS_VPN &&
+ bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT &&
+ !CHECK_FLAG(bgp->af_flags[afi][safi],
+ BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL) &&
+ vpn_leak_to_vrf_no_retain_filter_check(bgp, attr, afi)) {
+ reason =
+ "no import. Filtered by no bgp retain route-target all";
+ goto filtered;
+ }
+
/* If the route has Node Target Extended Communities, check
* if it's allowed to be installed locally.
*/
@@ -4623,10 +4756,12 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
/* Nexthop reachability check - for unicast and
* labeled-unicast.. */
- if (((afi == AFI_IP || afi == AFI_IP6)
- && (safi == SAFI_UNICAST || safi == SAFI_LABELED_UNICAST))
- || (safi == SAFI_EVPN &&
- bgp_evpn_is_prefix_nht_supported(p))) {
+ if (((afi == AFI_IP || afi == AFI_IP6) &&
+ (safi == SAFI_UNICAST || safi == SAFI_LABELED_UNICAST ||
+ (safi == SAFI_MPLS_VPN &&
+ pi->sub_type != BGP_ROUTE_IMPORTED))) ||
+ (safi == SAFI_EVPN &&
+ bgp_evpn_is_prefix_nht_supported(p))) {
if (safi != SAFI_EVPN && peer->sort == BGP_PEER_EBGP
&& peer->ttl == BGP_DEFAULT_TTL
&& !CHECK_FLAG(peer->flags,
@@ -4647,10 +4782,14 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
if (bgp_find_or_add_nexthop(bgp, bgp_nexthop, nh_afi,
safi, pi, NULL, connected,
bgp_nht_param_prefix) ||
- CHECK_FLAG(peer->flags, PEER_FLAG_IS_RFAPI_HD))
+ CHECK_FLAG(peer->flags, PEER_FLAG_IS_RFAPI_HD)) {
+ if (accept_own)
+ bgp_path_info_set_flag(
+ dest, pi, BGP_PATH_ACCEPT_OWN);
+
bgp_path_info_set_flag(dest, pi,
BGP_PATH_VALID);
- else {
+ } else {
if (BGP_DEBUG(nht, NHT)) {
zlog_debug("%s(%pI4): NH unresolved",
__func__,
@@ -4660,10 +4799,13 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
BGP_PATH_VALID);
}
} else {
+ /* case mpls-vpn routes with accept-own community
+ * (which have the BGP_ROUTE_IMPORTED subtype)
+ * case other afi/safi not supporting nexthop tracking
+ */
if (accept_own)
bgp_path_info_set_flag(dest, pi,
BGP_PATH_ACCEPT_OWN);
-
bgp_path_info_set_flag(dest, pi, BGP_PATH_VALID);
}
@@ -4715,7 +4857,7 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
}
if ((SAFI_MPLS_VPN == safi)
&& (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT)) {
- leak_success = vpn_leak_to_vrf_update(bgp, pi, prd);
+ vpn_leak_to_vrf_update(bgp, pi, prd);
}
#ifdef ENABLE_BGP_VNC
@@ -4730,13 +4872,6 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
type, sub_type, NULL);
}
#endif
- if ((safi == SAFI_MPLS_VPN) &&
- !CHECK_FLAG(bgp->af_flags[afi][safi],
- BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL) &&
- !leak_success) {
- bgp_unlink_nexthop(pi);
- bgp_path_info_delete(dest, pi);
- }
return;
} // End of implicit withdraw
@@ -4793,9 +4928,11 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
}
/* Nexthop reachability check. */
- if (((afi == AFI_IP || afi == AFI_IP6)
- && (safi == SAFI_UNICAST || safi == SAFI_LABELED_UNICAST))
- || (safi == SAFI_EVPN && bgp_evpn_is_prefix_nht_supported(p))) {
+ if (((afi == AFI_IP || afi == AFI_IP6) &&
+ (safi == SAFI_UNICAST || safi == SAFI_LABELED_UNICAST ||
+ (safi == SAFI_MPLS_VPN &&
+ new->sub_type != BGP_ROUTE_IMPORTED))) ||
+ (safi == SAFI_EVPN && bgp_evpn_is_prefix_nht_supported(p))) {
if (safi != SAFI_EVPN && peer->sort == BGP_PEER_EBGP
&& peer->ttl == BGP_DEFAULT_TTL
&& !CHECK_FLAG(peer->flags,
@@ -4810,18 +4947,25 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
if (bgp_find_or_add_nexthop(bgp, bgp, nh_afi, safi, new, NULL,
connected, bgp_nht_param_prefix) ||
- CHECK_FLAG(peer->flags, PEER_FLAG_IS_RFAPI_HD))
+ CHECK_FLAG(peer->flags, PEER_FLAG_IS_RFAPI_HD)) {
+ if (accept_own)
+ bgp_path_info_set_flag(dest, new,
+ BGP_PATH_ACCEPT_OWN);
+
bgp_path_info_set_flag(dest, new, BGP_PATH_VALID);
- else {
+ } else {
if (BGP_DEBUG(nht, NHT))
zlog_debug("%s(%pI4): NH unresolved", __func__,
&attr_new->nexthop);
bgp_path_info_unset_flag(dest, new, BGP_PATH_VALID);
}
} else {
+ /* case mpls-vpn routes with accept-own community
+ * (which have the BGP_ROUTE_IMPORTED subtype)
+ * case other afi/safi not supporting nexthop tracking
+ */
if (accept_own)
bgp_path_info_set_flag(dest, new, BGP_PATH_ACCEPT_OWN);
-
bgp_path_info_set_flag(dest, new, BGP_PATH_VALID);
}
@@ -4878,7 +5022,7 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
}
if ((SAFI_MPLS_VPN == safi)
&& (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT)) {
- leak_success = vpn_leak_to_vrf_update(bgp, new, prd);
+ vpn_leak_to_vrf_update(bgp, new, prd);
}
#ifdef ENABLE_BGP_VNC
if (SAFI_MPLS_VPN == safi) {
@@ -4892,13 +5036,6 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
sub_type, NULL);
}
#endif
- if ((safi == SAFI_MPLS_VPN) &&
- !CHECK_FLAG(bgp->af_flags[afi][safi],
- BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL) &&
- !leak_success) {
- bgp_unlink_nexthop(new);
- bgp_path_info_delete(dest, new);
- }
return;
@@ -7754,7 +7891,7 @@ bool bgp_aggregate_route(struct bgp *bgp, const struct prefix *p, afi_t afi,
/* If suppress fib is enabled and route not installed
* in FIB, skip the route
*/
- if (!bgp_check_advertise(bgp, dest))
+ if (!bgp_check_advertise(bgp, dest, safi))
continue;
match = 0;
@@ -8268,7 +8405,7 @@ void bgp_aggregate_increment(struct bgp *bgp, const struct prefix *p,
/* If suppress fib is enabled and route not installed
* in FIB, do not update the aggregate route
*/
- if (!bgp_check_advertise(bgp, pi->net))
+ if (!bgp_check_advertise(bgp, pi->net, safi))
return;
child = bgp_node_get(table, p);
@@ -11790,7 +11927,7 @@ int bgp_show_table_rd(struct vty *vty, struct bgp *bgp, safi_t safi,
"\nDisplayed %ld routes and %ld total paths\n",
output_cum, total_cum);
} else {
- if (use_json && output_cum == 0)
+ if (use_json && output_cum == 0 && json_header_depth == 0)
vty_out(vty, "{}\n");
}
return CMD_SUCCESS;
diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h
index 311d181f90..ccfd9d00d8 100644
--- a/bgpd/bgp_route.h
+++ b/bgpd/bgp_route.h
@@ -246,6 +246,22 @@ struct bgp_path_info_extra {
struct bgp_path_mh_info *mh_info;
};
+struct bgp_mplsvpn_label_nh {
+ /* For nexthop per label linked list */
+ LIST_ENTRY(bgp_path_info) label_nh_thread;
+
+ /* Back pointer to the bgp label per nexthop structure */
+ struct bgp_label_per_nexthop_cache *label_nexthop_cache;
+};
+
+struct bgp_mplsvpn_nh_label_bind {
+ /* For mplsvpn nexthop label bind linked list */
+ LIST_ENTRY(bgp_path_info) nh_label_bind_thread;
+
+ /* Back pointer to the bgp mplsvpn nexthop label bind structure */
+ struct bgp_mplsvpn_nh_label_bind_cache *nh_label_bind_cache;
+};
+
struct bgp_path_info {
/* For linked list. */
struct bgp_path_info *next;
@@ -298,6 +314,8 @@ struct bgp_path_info {
#define BGP_PATH_ANNC_NH_SELF (1 << 14)
#define BGP_PATH_LINK_BW_CHG (1 << 15)
#define BGP_PATH_ACCEPT_OWN (1 << 16)
+#define BGP_PATH_MPLSVPN_LABEL_NH (1 << 17)
+#define BGP_PATH_MPLSVPN_NH_LABEL_BIND (1 << 18)
/* BGP route type. This can be static, RIP, OSPF, BGP etc. */
uint8_t type;
@@ -320,11 +338,10 @@ struct bgp_path_info {
uint32_t addpath_rx_id;
struct bgp_addpath_info_data tx_addpath;
- /* For nexthop per label linked list */
- LIST_ENTRY(bgp_path_info) label_nh_thread;
-
- /* Back pointer to the bgp label per nexthop structure */
- struct bgp_label_per_nexthop_cache *label_nexthop_cache;
+ union {
+ struct bgp_mplsvpn_label_nh blnc;
+ struct bgp_mplsvpn_nh_label_bind bmnc;
+ } mplsvpn;
};
/* Structure used in BGP path selection */
@@ -592,8 +609,12 @@ static inline void prep_for_rmap_apply(struct bgp_path_info *dst_pi,
}
}
-static inline bool bgp_check_advertise(struct bgp *bgp, struct bgp_dest *dest)
+static inline bool bgp_check_advertise(struct bgp *bgp, struct bgp_dest *dest,
+ safi_t safi)
{
+ if (!bgp_fibupd_safi(safi))
+ return true;
+
return (!(BGP_SUPPRESS_FIB_ENABLED(bgp) &&
CHECK_FLAG(dest->flags, BGP_NODE_FIB_INSTALL_PENDING) &&
(!bgp_option_check(BGP_OPT_NO_FIB))));
@@ -605,11 +626,12 @@ static inline bool bgp_check_advertise(struct bgp *bgp, struct bgp_dest *dest)
* This function assumes that bgp_check_advertise was already returned
* as good to go.
*/
-static inline bool bgp_check_withdrawal(struct bgp *bgp, struct bgp_dest *dest)
+static inline bool bgp_check_withdrawal(struct bgp *bgp, struct bgp_dest *dest,
+ safi_t safi)
{
struct bgp_path_info *pi, *selected = NULL;
- if (!BGP_SUPPRESS_FIB_ENABLED(bgp))
+ if (!bgp_fibupd_safi(safi) || !BGP_SUPPRESS_FIB_ENABLED(bgp))
return false;
for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) {
@@ -695,6 +717,8 @@ extern struct bgp_dest *bgp_afi_node_get(struct bgp_table *table, afi_t afi,
struct prefix_rd *prd);
extern struct bgp_path_info *bgp_path_info_lock(struct bgp_path_info *path);
extern struct bgp_path_info *bgp_path_info_unlock(struct bgp_path_info *path);
+extern bool bgp_path_info_nexthop_changed(struct bgp_path_info *pi,
+ struct peer *to, afi_t afi);
extern struct bgp_path_info *
bgp_get_imported_bpi_ultimate(struct bgp_path_info *info);
extern void bgp_path_info_add(struct bgp_dest *dest, struct bgp_path_info *pi);
diff --git a/bgpd/bgp_table.h b/bgpd/bgp_table.h
index 9027af5ba3..91941315f7 100644
--- a/bgpd/bgp_table.h
+++ b/bgpd/bgp_table.h
@@ -100,6 +100,7 @@ struct bgp_node {
#define BGP_NODE_FIB_INSTALLED (1 << 6)
#define BGP_NODE_LABEL_REQUESTED (1 << 7)
#define BGP_NODE_SOFT_RECONFIG (1 << 8)
+#define BGP_NODE_PROCESS_CLEAR (1 << 9)
struct bgp_addpath_node_data tx_addpath;
diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c
index 5cb081ba61..0fe6180bea 100644
--- a/bgpd/bgp_updgrp.c
+++ b/bgpd/bgp_updgrp.c
@@ -2110,7 +2110,6 @@ void update_group_refresh_default_originate_route_map(struct event *thread)
update_group_walk(bgp, update_group_default_originate_route_map_walkcb,
reason);
EVENT_OFF(bgp->t_rmap_def_originate_eval);
- bgp_unlock(bgp);
}
/*
diff --git a/bgpd/bgp_updgrp_adv.c b/bgpd/bgp_updgrp_adv.c
index eea5280410..68fd11a042 100644
--- a/bgpd/bgp_updgrp_adv.c
+++ b/bgpd/bgp_updgrp_adv.c
@@ -355,6 +355,7 @@ static void subgroup_coalesce_timer(struct event *thread)
{
struct update_subgroup *subgrp;
struct bgp *bgp;
+ safi_t safi;
subgrp = EVENT_ARG(thread);
if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
@@ -365,7 +366,7 @@ static void subgroup_coalesce_timer(struct event *thread)
subgrp->v_coalesce = 0;
bgp = SUBGRP_INST(subgrp);
subgroup_announce_route(subgrp);
-
+ safi = SUBGRP_SAFI(subgrp);
/* While the announce_route() may kick off the route advertisement timer
* for
@@ -376,7 +377,8 @@ static void subgroup_coalesce_timer(struct event *thread)
* announce, this is the method currently employed to trigger the EOR.
*/
if (!bgp_update_delay_active(SUBGRP_INST(subgrp)) &&
- !(BGP_SUPPRESS_FIB_ENABLED(bgp))) {
+ !(bgp_fibupd_safi(safi) && BGP_SUPPRESS_FIB_ENABLED(bgp))) {
+
struct peer_af *paf;
struct peer *peer;
@@ -597,7 +599,8 @@ void bgp_adj_out_set_subgroup(struct bgp_dest *dest,
* the flag PEER_STATUS_ADV_DELAY which will allow
* more routes to be sent in the update message
*/
- if (BGP_SUPPRESS_FIB_ENABLED(bgp)) {
+ if (bgp_fibupd_safi(safi) &&
+ BGP_SUPPRESS_FIB_ENABLED(bgp)) {
adv_peer = PAF_PEER(paf);
if (!bgp_adv_fifo_count(
&subgrp->sync->withdraw))
@@ -1068,7 +1071,7 @@ void group_announce_route(struct bgp *bgp, afi_t afi, safi_t safi,
/* If suppress fib is enabled, the route will be advertised when
* FIB status is received
*/
- if (!bgp_check_advertise(bgp, dest))
+ if (!bgp_check_advertise(bgp, dest, safi))
return;
update_group_af_walk(bgp, afi, safi, group_announce_route_walkcb, &ctx);
diff --git a/bgpd/bgp_updgrp_packet.c b/bgpd/bgp_updgrp_packet.c
index e04d5ae245..49b7f51286 100644
--- a/bgpd/bgp_updgrp_packet.c
+++ b/bgpd/bgp_updgrp_packet.c
@@ -789,6 +789,29 @@ struct bpacket *subgroup_update_packet(struct update_subgroup *subgrp)
safi);
label_pnt = &label;
num_labels = 1;
+ } else if (safi == SAFI_MPLS_VPN && path &&
+ CHECK_FLAG(path->flags,
+ BGP_PATH_MPLSVPN_NH_LABEL_BIND) &&
+ path->mplsvpn.bmnc.nh_label_bind_cache &&
+ path->peer && path->peer != peer &&
+ path->sub_type != BGP_ROUTE_IMPORTED &&
+ path->sub_type != BGP_ROUTE_STATIC &&
+ bgp_mplsvpn_path_uses_valid_mpls_label(
+ path) &&
+ bgp_path_info_nexthop_changed(path, peer,
+ afi)) {
+ /* Redistributed mpls vpn route between distinct
+ * peers from 'pi->peer' to 'to',
+ * and an mpls label is used in this path,
+ * and there is a nh label bind entry,
+ * then get appropriate mpls local label. When
+ * called here, 'get_label()' returns a valid
+ * label.
+ */
+ label = bgp_mplsvpn_nh_label_bind_get_label(
+ path);
+ label_pnt = &label;
+ num_labels = 1;
} else if (path && path->extra) {
label_pnt = &path->extra->label[0];
num_labels = path->extra->num_labels;
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index b524df2a1d..d03398c802 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -7959,6 +7959,26 @@ DEFPY (bgp_condadv_period,
return CMD_SUCCESS;
}
+DEFPY (bgp_def_originate_eval,
+ bgp_def_originate_eval_cmd,
+ "[no$no] bgp default-originate timer (0-3600)$timer",
+ NO_STR
+ BGP_STR
+ "Control default-originate\n"
+ "Set period to rescan BGP table to check if default-originate condition is met\n"
+ "Period between BGP table scans, in seconds; default 5\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+
+ bgp->rmap_def_originate_eval_timer =
+ no ? RMAP_DEFAULT_ORIGINATE_EVAL_TIMER : timer;
+
+ if (bgp->t_rmap_def_originate_eval)
+ EVENT_OFF(bgp->t_rmap_def_originate_eval);
+
+ return CMD_SUCCESS;
+}
+
DEFPY (neighbor_advertise_map,
neighbor_advertise_map_cmd,
"[no$no] neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor advertise-map RMAP_NAME$advertise_str <exist-map|non-exist-map>$exist RMAP_NAME$condition_str",
@@ -9973,6 +9993,7 @@ DEFPY (bgp_imexport_vpn,
bool yes = true;
int flag;
enum vpn_policy_direction dir;
+ struct bgp *bgp_default = bgp_get_default();
if (argv_find(argv, argc, "no", &idx))
yes = false;
@@ -10008,14 +10029,18 @@ DEFPY (bgp_imexport_vpn,
SET_FLAG(bgp->af_flags[afi][safi], flag);
if (!previous_state) {
/* trigger export current vrf */
- vpn_leak_postchange(dir, afi, bgp_get_default(), bgp);
+ vpn_leak_postchange(dir, afi, bgp_default, bgp);
}
} else {
if (previous_state) {
/* trigger un-export current vrf */
- vpn_leak_prechange(dir, afi, bgp_get_default(), bgp);
+ vpn_leak_prechange(dir, afi, bgp_default, bgp);
}
UNSET_FLAG(bgp->af_flags[afi][safi], flag);
+ if (previous_state && bgp_default &&
+ !CHECK_FLAG(bgp_default->af_flags[afi][SAFI_MPLS_VPN],
+ BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL))
+ vpn_leak_no_retain(bgp, bgp_default, afi);
}
hook_call(bgp_snmp_init_stats, bgp);
@@ -18668,6 +18693,12 @@ int bgp_config_write(struct vty *vty)
" bgp conditional-advertisement timer %u\n",
bgp->condition_check_period);
+ /* default-originate timer configuration */
+ if (bgp->rmap_def_originate_eval_timer !=
+ RMAP_DEFAULT_ORIGINATE_EVAL_TIMER)
+ vty_out(vty, " bgp default-originate timer %u\n",
+ bgp->rmap_def_originate_eval_timer);
+
/* peer-group */
for (ALL_LIST_ELEMENTS(bgp->group, node, nnode, group)) {
bgp_config_write_peer_global(vty, bgp, group->conf);
@@ -19017,6 +19048,12 @@ static int config_write_interface_one(struct vty *vty, struct vrf *vrf)
vty_out(vty, " mpls bgp forwarding\n");
write++;
}
+ if (CHECK_FLAG(iifp->flags,
+ BGP_INTERFACE_MPLS_L3VPN_SWITCHING)) {
+ vty_out(vty,
+ " mpls bgp l3vpn-multi-domain-switching\n");
+ write++;
+ }
if_vty_config_end(vty);
}
@@ -19067,6 +19104,35 @@ DEFPY(mpls_bgp_forwarding, mpls_bgp_forwarding_cmd,
return CMD_SUCCESS;
}
+DEFPY(mpls_bgp_l3vpn_multi_domain_switching,
+ mpls_bgp_l3vpn_multi_domain_switching_cmd,
+ "[no$no] mpls bgp l3vpn-multi-domain-switching",
+ NO_STR MPLS_STR BGP_STR
+ "Bind a local MPLS label to incoming L3VPN updates\n")
+{
+ bool check;
+ struct bgp_interface *iifp;
+
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ iifp = ifp->info;
+ if (!iifp) {
+ vty_out(vty, "Interface %s not available\n", ifp->name);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ check = CHECK_FLAG(iifp->flags, BGP_INTERFACE_MPLS_L3VPN_SWITCHING);
+ if (check == !no)
+ return CMD_SUCCESS;
+ if (no)
+ UNSET_FLAG(iifp->flags, BGP_INTERFACE_MPLS_L3VPN_SWITCHING);
+ else
+ SET_FLAG(iifp->flags, BGP_INTERFACE_MPLS_L3VPN_SWITCHING);
+ /* trigger a nht update on eBGP sessions */
+ if (if_is_operative(ifp))
+ bgp_nht_ifp_up(ifp);
+
+ return CMD_SUCCESS;
+}
+
DEFPY (bgp_inq_limit,
bgp_inq_limit_cmd,
"bgp input-queue-limit (1-4294967295)$limit",
@@ -19126,6 +19192,8 @@ static void bgp_vty_if_init(void)
/* "mpls bgp forwarding" commands. */
install_element(INTERFACE_NODE, &mpls_bgp_forwarding_cmd);
+ install_element(INTERFACE_NODE,
+ &mpls_bgp_l3vpn_multi_domain_switching_cmd);
}
void bgp_vty_init(void)
@@ -20330,6 +20398,9 @@ void bgp_vty_init(void)
install_element(BGP_VPNV4_NODE, &neighbor_advertise_map_cmd);
install_element(BGP_VPNV6_NODE, &neighbor_advertise_map_cmd);
+ /* bgp default-originate timer */
+ install_element(BGP_NODE, &bgp_def_originate_eval_cmd);
+
/* neighbor maximum-prefix-out commands. */
install_element(BGP_NODE, &neighbor_maximum_prefix_out_cmd);
install_element(BGP_NODE, &no_neighbor_maximum_prefix_out_cmd);
@@ -20943,16 +21014,13 @@ static const char *community_list_config_str(struct community_entry *entry)
{
const char *str;
- if (entry->any)
- str = "";
- else {
- if (entry->style == COMMUNITY_LIST_STANDARD)
- str = community_str(entry->u.com, false, false);
- else if (entry->style == LARGE_COMMUNITY_LIST_STANDARD)
- str = lcommunity_str(entry->u.lcom, false, false);
- else
- str = entry->config;
- }
+ if (entry->style == COMMUNITY_LIST_STANDARD)
+ str = community_str(entry->u.com, false, false);
+ else if (entry->style == LARGE_COMMUNITY_LIST_STANDARD)
+ str = lcommunity_str(entry->u.lcom, false, false);
+ else
+ str = entry->config;
+
return str;
}
@@ -20975,13 +21043,8 @@ static void community_list_show(struct vty *vty, struct community_list *list)
: "expanded",
list->name);
}
- if (entry->any)
- vty_out(vty, " %s\n",
- community_direct_str(entry->direct));
- else
- vty_out(vty, " %s %s\n",
- community_direct_str(entry->direct),
- community_list_config_str(entry));
+ vty_out(vty, " %s %s\n", community_direct_str(entry->direct),
+ community_list_config_str(entry));
}
}
@@ -21340,13 +21403,8 @@ static void lcommunity_list_show(struct vty *vty, struct community_list *list)
: "expanded",
list->name);
}
- if (entry->any)
- vty_out(vty, " %s\n",
- community_direct_str(entry->direct));
- else
- vty_out(vty, " %s %s\n",
- community_direct_str(entry->direct),
- community_list_config_str(entry));
+ vty_out(vty, " %s %s\n", community_direct_str(entry->direct),
+ community_list_config_str(entry));
}
}
@@ -21642,13 +21700,8 @@ static void extcommunity_list_show(struct vty *vty, struct community_list *list)
: "expanded",
list->name);
}
- if (entry->any)
- vty_out(vty, " %s\n",
- community_direct_str(entry->direct));
- else
- vty_out(vty, " %s %s\n",
- community_direct_str(entry->direct),
- community_list_config_str(entry));
+ vty_out(vty, " %s %s\n", community_direct_str(entry->direct),
+ community_list_config_str(entry));
}
}
diff --git a/bgpd/bgp_vty.h b/bgpd/bgp_vty.h
index 826723b92d..a105b6de3f 100644
--- a/bgpd/bgp_vty.h
+++ b/bgpd/bgp_vty.h
@@ -145,7 +145,7 @@ extern void bgp_config_write_wpkt_quanta(struct vty *vty, struct bgp *bgp);
extern void bgp_config_write_rpkt_quanta(struct vty *vty, struct bgp *bgp);
extern void bgp_config_write_listen(struct vty *vty, struct bgp *bgp);
extern void bgp_config_write_coalesce_time(struct vty *vty, struct bgp *bgp);
-extern int bgp_vty_return(struct vty *vty, int ret);
+extern int bgp_vty_return(struct vty *vty, enum bgp_create_error_code ret);
extern bool bgp_config_inprocess(void);
extern struct peer *peer_and_group_lookup_vty(struct vty *vty,
const char *peer_str);
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index 1965cd2704..6513df33fa 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -2984,9 +2984,9 @@ static int bgp_zebra_process_local_l3vni(ZAPI_CALLBACK_ARGS)
if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug(
- "Rx L3-VNI ADD VRF %s VNI %u RMAC svi-mac %pEA vrr-mac %pEA filter %s svi-if %u",
- vrf_id_to_name(vrf_id), l3vni, &svi_rmac,
- &vrr_rmac,
+ "Rx L3-VNI ADD VRF %s VNI %u Originator-IP %pI4 RMAC svi-mac %pEA vrr-mac %pEA filter %s svi-if %u",
+ vrf_id_to_name(vrf_id), l3vni, &originator_ip,
+ &svi_rmac, &vrr_rmac,
filter ? "prefix-routes-only" : "none",
svi_ifindex);
@@ -3914,10 +3914,13 @@ int bgp_zebra_srv6_manager_release_locator_chunk(const char *name)
void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label,
ifindex_t ifindex, vrf_id_t vrf_id,
- enum lsp_types_t ltype, struct prefix *p)
+ enum lsp_types_t ltype, struct prefix *p,
+ uint32_t num_labels,
+ mpls_label_t out_labels[])
{
struct zapi_labels zl = {};
struct zapi_nexthop *znh;
+ int i = 0;
zl.type = ltype;
zl.local_label = label;
@@ -3935,8 +3938,16 @@ void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label,
: NEXTHOP_TYPE_IPV6_IFINDEX;
znh->ifindex = ifindex;
znh->vrf_id = vrf_id;
- znh->label_num = 0;
-
+ if (num_labels == 0)
+ znh->label_num = 0;
+ else {
+ if (num_labels > MPLS_MAX_LABELS)
+ znh->label_num = MPLS_MAX_LABELS;
+ else
+ znh->label_num = num_labels;
+ for (i = 0; i < znh->label_num; i++)
+ znh->labels[i] = out_labels[i];
+ }
/* vrf_id is DEFAULT_VRF */
zebra_send_mpls_labels(zclient, cmd, &zl);
}
diff --git a/bgpd/bgp_zebra.h b/bgpd/bgp_zebra.h
index 7c85d86b31..7c60b542f8 100644
--- a/bgpd/bgp_zebra.h
+++ b/bgpd/bgp_zebra.h
@@ -121,5 +121,6 @@ extern int bgp_zebra_srv6_manager_release_locator_chunk(const char *name);
extern void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label,
ifindex_t index, vrf_id_t vrfid,
enum lsp_types_t ltype,
- struct prefix *p);
+ struct prefix *p, uint32_t num_labels,
+ mpls_label_t out_labels[]);
#endif /* _QUAGGA_BGP_ZEBRA_H */
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 5ce5abae4b..27d9c49efb 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -80,7 +80,6 @@
#include "bgp_trace.h"
DEFINE_MTYPE_STATIC(BGPD, PEER_TX_SHUTDOWN_MSG, "Peer shutdown message (TX)");
-DEFINE_MTYPE_STATIC(BGPD, BGP_EVPN_INFO, "BGP EVPN instance information");
DEFINE_QOBJ_TYPE(bgp_master);
DEFINE_QOBJ_TYPE(bgp);
DEFINE_QOBJ_TYPE(peer);
@@ -2320,6 +2319,7 @@ int peer_activate(struct peer *peer, afi_t afi, safi_t safi)
struct listnode *node, *nnode;
struct peer *tmp_peer;
struct bgp *bgp;
+ safi_t safi_check;
/* Nothing to do if we've already activated this peer */
if (peer->afc[afi][safi])
@@ -2350,16 +2350,22 @@ int peer_activate(struct peer *peer, afi_t afi, safi_t safi)
}
/* If this is the first peer to be activated for this
- * afi/labeled-unicast recalc bestpaths to trigger label allocation */
- if (ret != BGP_ERR_PEER_SAFI_CONFLICT && safi == SAFI_LABELED_UNICAST
- && !bgp->allocate_mpls_labels[afi][SAFI_UNICAST]) {
+ * afi/labeled-unicast or afi/mpls-vpn, recalc bestpaths to trigger
+ * label allocation */
+ if (safi == SAFI_LABELED_UNICAST)
+ safi_check = SAFI_UNICAST;
+ else
+ safi_check = safi;
+ if (ret != BGP_ERR_PEER_SAFI_CONFLICT &&
+ (safi == SAFI_LABELED_UNICAST || safi == SAFI_MPLS_VPN) &&
+ !bgp->allocate_mpls_labels[afi][safi_check]) {
if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug(
- "peer(s) are now active for labeled-unicast, allocate MPLS labels");
-
- bgp->allocate_mpls_labels[afi][SAFI_UNICAST] = 1;
- bgp_recalculate_afi_safi_bestpaths(bgp, afi, SAFI_UNICAST);
+ "peer(s) are now active for %s, allocate MPLS labels",
+ safi2str(safi));
+ bgp->allocate_mpls_labels[afi][safi_check] = 1;
+ bgp_recalculate_afi_safi_bestpaths(bgp, afi, safi_check);
}
if (safi == SAFI_FLOWSPEC) {
@@ -2425,6 +2431,7 @@ int peer_deactivate(struct peer *peer, afi_t afi, safi_t safi)
struct peer *tmp_peer;
struct listnode *node, *nnode;
struct bgp *bgp;
+ safi_t safi_check;
/* Nothing to do if we've already de-activated this peer */
if (!peer->afc[afi][safi])
@@ -2446,17 +2453,22 @@ int peer_deactivate(struct peer *peer, afi_t afi, safi_t safi)
bgp = peer->bgp;
/* If this is the last peer to be deactivated for this
- * afi/labeled-unicast recalc bestpaths to trigger label deallocation */
- if (safi == SAFI_LABELED_UNICAST
- && bgp->allocate_mpls_labels[afi][SAFI_UNICAST]
- && !bgp_afi_safi_peer_exists(bgp, afi, safi)) {
+ * afi/labeled-unicast or afi/mpls-vpn, recalc bestpaths to trigger
+ * label deallocation */
+ if (safi == SAFI_LABELED_UNICAST)
+ safi_check = SAFI_UNICAST;
+ else
+ safi_check = safi;
+ if ((safi == SAFI_LABELED_UNICAST || safi == SAFI_MPLS_VPN) &&
+ bgp->allocate_mpls_labels[afi][safi_check] &&
+ !bgp_afi_safi_peer_exists(bgp, afi, safi)) {
if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug(
- "peer(s) are no longer active for labeled-unicast, deallocate MPLS labels");
-
- bgp->allocate_mpls_labels[afi][SAFI_UNICAST] = 0;
- bgp_recalculate_afi_safi_bestpaths(bgp, afi, SAFI_UNICAST);
+ "peer(s) are no longer active for %s, deallocate MPLS labels",
+ safi2str(safi));
+ bgp->allocate_mpls_labels[afi][safi_check] = 0;
+ bgp_recalculate_afi_safi_bestpaths(bgp, afi, safi_check);
}
return ret;
}
@@ -3331,6 +3343,7 @@ static struct bgp *bgp_create(as_t *as, const char *name,
bgp_addpath_init_bgp_data(&bgp->tx_addpath);
bgp->fast_convergence = false;
bgp->llgr_stale_time = BGP_DEFAULT_LLGR_STALE_TIME;
+ bgp->rmap_def_originate_eval_timer = RMAP_DEFAULT_ORIGINATE_EVAL_TIMER;
#ifdef ENABLE_BGP_VNC
if (inst_type != BGP_INSTANCE_TYPE_VRF) {
@@ -3361,6 +3374,8 @@ static struct bgp *bgp_create(as_t *as, const char *name,
bgp_label_per_nexthop_cache_init(
&bgp->mpls_labels_per_nexthop[afi]);
+ bgp_mplsvpn_nh_label_bind_cache_init(&bgp->mplsvpn_nh_label_bind);
+
if (name)
bgp->name = XSTRDUP(MTYPE_BGP, name);
@@ -3403,8 +3418,6 @@ static struct bgp *bgp_create(as_t *as, const char *name,
/* assign a unique rd id for auto derivation of vrf's RD */
bf_assign_index(bm->rd_idspace, bgp->vrf_rd_id);
- bgp->evpn_info = XCALLOC(MTYPE_BGP_EVPN_INFO,
- sizeof(struct bgp_evpn_info));
bgp_evpn_init(bgp);
bgp_evpn_vrf_es_init(bgp);
bgp_pbr_init(bgp);
@@ -3704,11 +3717,8 @@ void bgp_instance_down(struct bgp *bgp)
struct listnode *next;
/* Stop timers. */
- if (bgp->t_rmap_def_originate_eval) {
+ if (bgp->t_rmap_def_originate_eval)
EVENT_OFF(bgp->t_rmap_def_originate_eval);
- bgp_unlock(bgp); /* TODO - This timer is started with a lock -
- why? */
- }
/* Bring down peers, so corresponding routes are purged. */
for (ALL_LIST_ELEMENTS(bgp->peer, node, next, peer)) {
@@ -3811,11 +3821,8 @@ int bgp_delete(struct bgp *bgp)
vpn_leak_zebra_vrf_label_withdraw(bgp, AFI_IP6);
/* Stop timers. */
- if (bgp->t_rmap_def_originate_eval) {
+ if (bgp->t_rmap_def_originate_eval)
EVENT_OFF(bgp->t_rmap_def_originate_eval);
- bgp_unlock(bgp); /* TODO - This timer is started with a lock -
- why? */
- }
/* Inform peers we're going down. */
for (ALL_LIST_ELEMENTS(bgp->peer, node, next, peer))
@@ -3977,7 +3984,6 @@ void bgp_free(struct bgp *bgp)
bgp_evpn_cleanup(bgp);
bgp_pbr_cleanup(bgp);
bgp_srv6_cleanup(bgp);
- XFREE(MTYPE_BGP_EVPN_INFO, bgp->evpn_info);
for (afi = AFI_IP; afi < AFI_MAX; afi++) {
enum vpn_policy_direction dir;
@@ -8259,6 +8265,7 @@ void bgp_init(unsigned short instance)
bgp_lp_vty_init();
bgp_label_per_nexthop_init();
+ bgp_mplsvpn_nexthop_init();
cmd_variable_handler_register(bgp_viewvrf_var_handlers);
}
@@ -8311,6 +8318,7 @@ struct peer *peer_lookup_in_view(struct vty *vty, struct bgp *bgp,
int ret;
struct peer *peer;
union sockunion su;
+ struct peer_group *group;
/* Get peer sockunion. */
ret = str2sockunion(ip_str, &su);
@@ -8320,6 +8328,11 @@ struct peer *peer_lookup_in_view(struct vty *vty, struct bgp *bgp,
peer = peer_lookup_by_hostname(bgp, ip_str);
if (!peer) {
+ group = peer_group_lookup(bgp, ip_str);
+ peer = listnode_head(group->peer);
+ }
+
+ if (!peer) {
if (use_json) {
json_object *json_no = NULL;
json_no = json_object_new_object();
@@ -8396,3 +8409,16 @@ static ssize_t printfrr_bp(struct fbuf *buf, struct printfrr_eargs *ea,
return bprintfrr(buf, "%s(%s)", peer->host,
peer->hostname ? peer->hostname : "Unknown");
}
+
+const struct message bgp_martian_type_str[] = {
+ {BGP_MARTIAN_IF_IP, "Self Interface IP"},
+ {BGP_MARTIAN_TUN_IP, "Self Tunnel IP"},
+ {BGP_MARTIAN_IF_MAC, "Self Interface MAC"},
+ {BGP_MARTIAN_RMAC, "Self RMAC"},
+ {BGP_MARTIAN_SOO, "Self Site-of-Origin"},
+ {0}};
+
+const char *bgp_martian_type2str(enum bgp_martian_type mt)
+{
+ return lookup_msg(bgp_martian_type_str, mt, "Unknown Martian Type");
+}
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 3da867a441..95bc07d167 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -331,6 +331,9 @@ struct as_confed {
char *as_pretty;
};
+struct bgp_mplsvpn_nh_label_bind_cache;
+PREDECL_RBTREE_UNIQ(bgp_mplsvpn_nh_label_bind_cache);
+
/* BGP instance structure. */
struct bgp {
/* AS number of this BGP instance. */
@@ -578,6 +581,9 @@ struct bgp {
struct bgp_label_per_nexthop_cache_head
mpls_labels_per_nexthop[AFI_MAX];
+ /* Tree for mplsvpn next-hop label bind cache */
+ struct bgp_mplsvpn_nh_label_bind_cache_head mplsvpn_nh_label_bind;
+
/* Allocate hash entries to store policy routing information
* The hash are used to host pbr rules somewhere.
* Actually, pbr will only be used by flowspec
@@ -601,6 +607,7 @@ struct bgp {
/* timer to re-evaluate neighbor default-originate route-maps */
struct event *t_rmap_def_originate_eval;
+ uint16_t rmap_def_originate_eval_timer;
#define RMAP_DEFAULT_ORIGINATE_EVAL_TIMER 5
/* BGP distance configuration. */
@@ -810,6 +817,8 @@ DECLARE_QOBJ_TYPE(bgp);
struct bgp_interface {
#define BGP_INTERFACE_MPLS_BGP_FORWARDING (1 << 0)
+/* L3VPN multi domain switching */
+#define BGP_INTERFACE_MPLS_L3VPN_SWITCHING (1 << 1)
uint32_t flags;
};
@@ -2107,6 +2116,26 @@ enum peer_change_type {
peer_change_reset_out,
};
+/* Enumeration of martian ("self") entry types.
+ * Routes carrying fields that match a self entry are considered martians
+ * and should be handled accordingly, i.e. dropped or import-filtered.
+ * Note:
+ * These "martians" are separate from routes optionally allowed via
+ * 'bgp allow-martian-nexthop'. The optionally allowed martians are
+ * simply prefixes caught by ipv4_martian(), i.e. routes outside
+ * the non-reserved IPv4 Unicast address space.
+ */
+enum bgp_martian_type {
+ BGP_MARTIAN_IF_IP, /* bgp->address_hash */
+ BGP_MARTIAN_TUN_IP, /* bgp->tip_hash */
+ BGP_MARTIAN_IF_MAC, /* bgp->self_mac_hash */
+ BGP_MARTIAN_RMAC, /* bgp->rmac */
+ BGP_MARTIAN_SOO, /* bgp->evpn_info->macvrf_soo */
+};
+
+extern const struct message bgp_martian_type_str[];
+extern const char *bgp_martian_type2str(enum bgp_martian_type mt);
+
extern struct bgp_master *bm;
extern unsigned int multipath_num;
diff --git a/configure.ac b/configure.ac
index 0120c517c6..47ee44a7df 100644
--- a/configure.ac
+++ b/configure.ac
@@ -7,7 +7,7 @@
##
AC_PREREQ([2.69])
-AC_INIT([frr], [9.0-dev], [https://github.com/frrouting/frr/issues])
+AC_INIT([frr], [9.1-dev], [https://github.com/frrouting/frr/issues])
PACKAGE_URL="https://frrouting.org/"
AC_SUBST([PACKAGE_URL])
PACKAGE_FULLNAME="FRRouting"
diff --git a/debian/changelog b/debian/changelog
index 008a97c7d5..5c0429d69d 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,14 +1,14 @@
-frr (9.0~dev-1) UNRELEASED; urgency=medium
+frr (9.1~dev-1) UNRELEASED; urgency=medium
- * FRR Dev 9.0
+ * FRR Dev 9.1
- -- Donatas Abraitis <donatas@opensourcerouting.org> Tue, 07 Feb 2023 16:00:00 +0500
+ -- Jafar Al-Gharaibeh <jafar@atcorp.com> Tue, 06 Jun 2023 12:00:00 -0600
-frr (8.5-1) UNRELEASED; urgency=medium
+frr (8.5-0) unstable; urgency=medium
* New upstream release FRR 8.5
- -- Donatas Abraitis <donatas@opensourcerouting.org> Tue, 07 Feb 2023 16:00:00 +0500
+ -- Jafar Al-Gharaibeh <jafar@atcorp.com> Fri, 10 Mar 2023 02:00:00 -0600
frr (8.4.2-1) unstable; urgency=medium
diff --git a/doc/developer/building-docker.rst b/doc/developer/building-docker.rst
index 4cf356049e..3b1542b223 100644
--- a/doc/developer/building-docker.rst
+++ b/doc/developer/building-docker.rst
@@ -16,8 +16,8 @@ The following platform images are used to support Travis CI and can also
be used to reproduce topotest failures when the docker host is Ubuntu
(tested on 18.04 and 20.04):
-* Ubuntu 18.04
* Ubuntu 20.04
+* Ubuntu 22.04
The following platform images may also be built, but these simply install a
binary package from an existing repository and do not perform source builds:
@@ -130,57 +130,75 @@ No script, multi-arch (ex. amd64, arm64)::
-Building Ubuntu 18.04 Image
+Building Ubuntu 20.04 Image
---------------------------
Build image (from project root directory)::
- docker build -t frr-ubuntu18:latest -f docker/ubuntu18-ci/Dockerfile .
+ docker build -t frr-ubuntu20:latest --build-arg=UBUNTU_VERSION=20.04 -f docker/ubuntu-ci/Dockerfile .
+
+Running Full Topotest::
+
+ docker run --init -it --privileged --name frr -v /lib/modules:/lib/modules \
+ frr-ubuntu20:latest bash -c 'cd ~/frr/tests/topotests ; sudo pytest -nauto --dist=loadfile'
+
+Extract results from the above run into `run-results` dir and analyze::
+
+ tests/topotest/analyze.py -C frr -Ar run-results
Start the container::
- docker run -d --privileged --name frr-ubuntu18 --mount type=bind,source=/lib/modules,target=/lib/modules frr-ubuntu18:latest
+ docker run -d --init --privileged --name frr-ubuntu20 --mount type=bind,source=/lib/modules,target=/lib/modules frr-ubuntu20:latest
Running a topotest (when the docker host is Ubuntu)::
- docker exec frr-ubuntu18 bash -c 'cd ~/frr/tests/topotests/ospf-topo1 ; sudo pytest test_ospf_topo1.py'
+ docker exec frr-ubuntu20 bash -c 'cd ~/frr/tests/topotests/ospf-topo1 ; sudo pytest test_ospf_topo1.py'
Starting an interactive bash session::
- docker exec -it frr-ubuntu18 bash
+ docker exec -it frr-ubuntu20 bash
Stopping an removing a container::
- docker stop frr-ubuntu18 ; docker rm frr-ubuntu18
+ docker stop frr-ubuntu20 ; docker rm frr-ubuntu20
Removing the built image::
- docker rmi frr-ubuntu18:latest
+ docker rmi frr-ubuntu20:latest
-Building Ubuntu 20.04 Image
+Building Ubuntu 22.04 Image
---------------------------
Build image (from project root directory)::
- docker build -t frr-ubuntu20:latest -f docker/ubuntu20-ci/Dockerfile .
+ docker build -t frr-ubuntu22:latest -f docker/ubuntu-ci/Dockerfile .
+
+Running Full Topotest::
+
+ docker run --init -it --privileged --name frr -v /lib/modules:/lib/modules \
+ frr-ubuntu22:latest bash -c 'cd ~/frr/tests/topotests ; sudo pytest -nauto --dist=loadfile'
+
+Extract results from the above run into `run-results` dir and analyze::
+
+ tests/topotest/analyze.py -C frr -Ar run-results
Start the container::
- docker run -d --privileged --name frr-ubuntu20 --mount type=bind,source=/lib/modules,target=/lib/modules frr-ubuntu20:latest
+ docker run -d --init --privileged --name frr-ubuntu22 --mount type=bind,source=/lib/modules,target=/lib/modules frr-ubuntu22:latest
Running a topotest (when the docker host is Ubuntu)::
- docker exec frr-ubuntu20 bash -c 'cd ~/frr/tests/topotests/ospf-topo1 ; sudo pytest test_ospf_topo1.py'
+ docker exec frr-ubuntu22 bash -c 'cd ~/frr/tests/topotests/ospf-topo1 ; sudo pytest test_ospf_topo1.py'
Starting an interactive bash session::
- docker exec -it frr-ubuntu20 bash
+ docker exec -it frr-ubuntu22 bash
Stopping an removing a container::
- docker stop frr-ubuntu20 ; docker rm frr-ubuntu20
+ docker stop frr-ubuntu22 ; docker rm frr-ubuntu22
Removing the built image::
- docker rmi frr-ubuntu20:latest
+ docker rmi frr-ubuntu22:latest
diff --git a/doc/developer/process-architecture.rst b/doc/developer/process-architecture.rst
index 33ef278c4d..06ee6a3c37 100644
--- a/doc/developer/process-architecture.rst
+++ b/doc/developer/process-architecture.rst
@@ -46,7 +46,8 @@ implemented in FRR. This doc should be expanded and broken off into its own
section. For now it provides basic information necessary to understand the
interplay between the event system and kernel threads.
-The core event system is implemented in :file:`lib/thread.[ch]`. The primary
+The core event system is implemented in :file:`lib/event.c` and
+:file:`lib/frrevent.h`. The primary
structure is ``struct event_loop``, hereafter referred to as a
``threadmaster``. A ``threadmaster`` is a global state object, or context, that
holds all the tasks currently pending execution as well as statistics on tasks
@@ -57,41 +58,41 @@ execute. At initialization, a daemon will typically create one
fetch each task and execute it.
These tasks have various types corresponding to their general action. The types
-are given by integer macros in :file:`event.h` and are:
+are given by integer macros in :file:`frrevent.h` and are:
-``THREAD_READ``
+``EVENT_READ``
Task which waits for a file descriptor to become ready for reading and then
executes.
-``THREAD_WRITE``
+``EVENT_WRITE``
Task which waits for a file descriptor to become ready for writing and then
executes.
-``THREAD_TIMER``
+``EVENT_TIMER``
Task which executes after a certain amount of time has passed since it was
scheduled.
-``THREAD_EVENT``
+``EVENT_EVENT``
Generic task that executes with high priority and carries an arbitrary
integer indicating the event type to its handler. These are commonly used to
implement the finite state machines typically found in routing protocols.
-``THREAD_READY``
+``EVENT_READY``
Type used internally for tasks on the ready queue.
-``THREAD_UNUSED``
+``EVENT_UNUSED``
Type used internally for ``struct event`` objects that aren't being used.
The event system pools ``struct event`` to avoid heap allocations; this is
the type they have when they're in the pool.
-``THREAD_EXECUTE``
+``EVENT_EXECUTE``
Just before a task is run its type is changed to this. This is used to show
``X`` as the type in the output of :clicmd:`show thread cpu`.
The programmer never has to work with these types explicitly. Each type of task
is created and queued via special-purpose functions (actually macros, but
irrelevant for the time being) for the specific type. For example, to add a
-``THREAD_READ`` task, you would call
+``EVENT_READ`` task, you would call
::
@@ -113,9 +114,9 @@ sockets needed for peerings or IPC.
To retrieve the next task to run the program calls ``event_fetch()``.
``event_fetch()`` internally computes which task to execute next based on
-rudimentary priority logic. Events (type ``THREAD_EVENT``) execute with the
+rudimentary priority logic. Events (type ``EVENT_EVENT``) execute with the
highest priority, followed by expired timers and finally I/O tasks (type
-``THREAD_READ`` and ``THREAD_WRITE``). When scheduling a task a function and an
+``EVENT_READ`` and ``EVENT_WRITE``). When scheduling a task a function and an
arbitrary argument are provided. The task returned from ``event_fetch()`` is
then executed with ``event_call()``.
@@ -135,23 +136,23 @@ Mapping the general names used in the figure to specific FRR functions:
- ``task`` is ``struct event *``
- ``fetch`` is ``event_fetch()``
-- ``exec()`` is ``event_call``
+- ``exec()`` is ``event_call()``
- ``cancel()`` is ``event_cancel()``
- ``schedule()`` is any of the various task-specific ``event_add_*`` functions
Adding tasks is done with various task-specific function-like macros. These
-macros wrap underlying functions in :file:`thread.c` to provide additional
+macros wrap underlying functions in :file:`event.c` to provide additional
information added at compile time, such as the line number the task was
scheduled from, that can be accessed at runtime for debugging, logging and
informational purposes. Each task type has its own specific scheduling function
-that follow the naming convention ``event_add_<type>``; see :file:`event.h`
+that follow the naming convention ``event_add_<type>``; see :file:`frrevent.h`
for details.
There are some gotchas to keep in mind:
- I/O tasks are keyed off the file descriptor associated with the I/O
operation. This means that for any given file descriptor, only one of each
- type of I/O task (``THREAD_READ`` and ``THREAD_WRITE``) can be scheduled. For
+ type of I/O task (``EVENT_READ`` and ``EVENT_WRITE``) can be scheduled. For
example, scheduling two write tasks one after the other will overwrite the
first task with the second, resulting in total loss of the first task and
difficult bugs.
@@ -209,7 +210,8 @@ Kernel Thread Wrapper
The basis for the integration of pthreads and the event system is a lightweight
wrapper for both systems implemented in :file:`lib/frr_pthread.[ch]`. The
header provides a core datastructure, ``struct frr_pthread``, that encapsulates
-structures from both POSIX threads and :file:`thread.[ch]`. In particular, this
+structures from both POSIX threads and :file:`event.c`, :file:`frrevent.h`.
+In particular, this
datastructure has a pointer to a ``threadmaster`` that runs within the pthread.
It also has fields for a name as well as start and stop functions that have
signatures similar to the POSIX arguments for ``pthread_create()``.
@@ -217,18 +219,18 @@ signatures similar to the POSIX arguments for ``pthread_create()``.
Calling ``frr_pthread_new()`` creates and registers a new ``frr_pthread``. The
returned structure has a pre-initialized ``threadmaster``, and its ``start``
and ``stop`` functions are initialized to defaults that will run a basic event
-loop with the given threadmaster. Calling ``frr_pthread_run`` starts the thread
+loop with the given threadmaster. Calling ``frr_pthread_run()`` starts the thread
with the ``start`` function. From there, the model is the same as the regular
event model. To schedule tasks on a particular pthread, simply use the regular
-:file:`thread.c` functions as usual and provide the ``threadmaster`` pointed to
+:file:`event.c` functions as usual and provide the ``threadmaster`` pointed to
from the ``frr_pthread``. As part of implementing the wrapper, the
-:file:`thread.c` functions were made thread-safe. Consequently, it is safe to
+:file:`event.c` functions were made thread-safe. Consequently, it is safe to
schedule events on a ``threadmaster`` belonging both to the calling thread as
well as *any other pthread*. This serves as the basis for inter-thread
communication and boils down to a slightly more complicated method of message
passing, where the messages are the regular task events as used in the
event-driven model. The only difference is thread cancellation, which requires
-calling ``event_cancel_async()`` instead of ``event_cancel`` to cancel a task
+calling ``event_cancel_async()`` instead of ``event_cancel()`` to cancel a task
currently scheduled on a ``threadmaster`` belonging to a different pthread.
This is necessary to avoid race conditions in the specific case where one
pthread wants to guarantee that a task on another pthread is cancelled before
diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst
index 773691e698..1c2d6b3bdb 100644
--- a/doc/developer/topotests.rst
+++ b/doc/developer/topotests.rst
@@ -46,12 +46,6 @@ Installing Topotest Requirements
# To enable the gRPC topotest install:
python3 -m pip install grpcio grpcio-tools
- # Install Socat tool to run PIMv6 tests,
- # Socat code can be taken from below url,
- # which has latest changes done for PIMv6,
- # join and traffic:
- https://github.com/opensourcerouting/socat/
-
Enable Coredumps
""""""""""""""""
@@ -459,7 +453,7 @@ the tests log directory e.g.,::
-rw------- 1 root root 45172 Apr 19 05:30 capture-r2-r2-eth0.pcap
-rw------- 1 root root 48412 Apr 19 05:30 capture-sw1.pcap
...
--
+
Viewing Live Daemon Logs
""""""""""""""""""""""""
diff --git a/doc/developer/workflow.rst b/doc/developer/workflow.rst
index 65befaccba..0b386bf09f 100644
--- a/doc/developer/workflow.rst
+++ b/doc/developer/workflow.rst
@@ -1331,10 +1331,23 @@ frr-format plugin
Using the plugin also changes the string for ``PRI[udx]64`` from the
system value to ``%L[udx]`` (normally ``%ll[udx]`` or ``%l[udx]``.)
-Additionally, the FRR codebase is regularly scanned with Coverity.
-Unfortunately Coverity does not have the ability to handle scanning pull
-requests, but after code is merged it will send an email notifying project
-members with Coverity access of newly introduced defects.
+Additionally, the FRR codebase is regularly scanned for static analysis
+errors with Coverity and pull request changes are scanned as part of the
+Continuous Integration (CI) process. Developers can scan their commits for
+Coverity static analysis errors prior to submission using the
+``scan-build`` command. To use this command, the ``clang-tools`` package must
+be installed. For example, this can be accomplished on Ubuntu with the
+``sudo apt-get install clang-tools`` command. Then, touch the files you want scanned and
+invoke the ``scan-file`` command. For example::
+
+ cd ~/GitHub/frr
+ touch ospfd/ospf_flood.c ospfd/ospf_vty.c ospfd/ospf_opaque.c
+ cd build
+ scan-file make -j32
+
+The results of the scan including any static analysis errors will appear inline.
+Additionally, there will a directory in the /tmp containing the Coverity
+reports (e.g., scan-build-2023-06-09-120100-473730-1).
Executing non-installed dynamic binaries
----------------------------------------
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index 7737101947..ff8ff5a96c 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -455,7 +455,7 @@ Administrative Distance Metrics
.. _bgp-requires-policy:
Require policy on EBGP
--------------------------------
+----------------------
.. clicmd:: bgp ebgp-requires-policy
@@ -1733,6 +1733,12 @@ Configuring Peers
and will not be displayed as part of a `show run`. The no form
of the command turns off this ability.
+.. clicmd:: bgp default-originate timer (0-3600)
+
+ Set the period to rerun the default-originate route-map scanner process. The
+ default is 5 seconds. With a full routing table, it might be useful to increase
+ this setting to avoid scanning the whole BGP table aggressively.
+
.. clicmd:: bgp default ipv4-unicast
This command allows the user to specify that the IPv4 Unicast address
@@ -2971,6 +2977,14 @@ by issuing the following command under the interface configuration context.
This configuration will install VPN prefixes originated from an e-bgp session,
and with the next-hop directly connected.
+.. clicmd:: mpls bgp l3vpn-multi-domain-switching
+
+Redistribute labeled L3VPN routes from AS to neighboring AS (RFC-4364 option
+B, or within the same AS when the iBGP peer uses ``next-hop-self`` to rewrite
+the next-hop attribute). The labeled L3VPN routes received on this interface are
+re-advertised with local labels and an MPLS table swap entry is set to bind
+the local label to the received label.
+
.. _bgp-l3vpn-srv6:
L3VPN SRv6
@@ -3227,6 +3241,77 @@ Example configuration:
exit-address-family
!
+.. _bgp-evpn-mac-vrf-site-of-origin:
+
+EVPN MAC-VRF Site-of-Origin
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+In some EVPN deployments it is useful to associate a logical VTEP's Layer 2
+domain (MAC-VRF) with a Site-of-Origin "site" identifier. This provides a
+BGP topology-independent means of marking and import-filtering EVPN routes
+originated from a particular L2 domain. One situation where this is valuable
+is when deploying EVPN using anycast VTEPs, i.e. Active/Active MLAG, as it
+can be used to avoid ownership conflicts between the two control planes
+(EVPN vs MLAG).
+
+Example Use Case (MLAG Anycast VTEPs):
+
+During normal operation, an MLAG VTEP will advertise EVPN routes for attached
+hosts using a shared anycast IP as the BGP next-hop. It is expected for its
+MLAG peer to drop routes originated by the MLAG Peer since they have a Martian
+(self) next-hop. However, prior to the anycast IP being assigned to the local
+system, the anycast BGP next-hop will not be considered a Martian (self) IP.
+This results in a timing window where hosts that are locally attached to the
+MLAG pair's L2 domain can be learned both as "local" (via MLAG) or "remote"
+(via an EVPN route with a non-local next-hop). This can trigger erroneous MAC
+Mobility events, as the host "moves" between one MLAG Peer's Unique VTEP-IP
+and the shared anycast VTEP-IP, which causes unnecessary control plane and
+data plane events to propagate throughout the EVPN domain.
+By associating the MAC-VRF of both MLAG VTEPs with the same site identifier,
+EVPN routes originated by one MLAG VTEP will ignored by its MLAG peer, ensuring
+that only the MLAG control plane attempts to take ownership of local hosts.
+
+The EVPN MAC-VRF Site-of-Origin feature works by influencing two behaviors:
+
+1. All EVPN routes originating from the local MAC-VRF will have a
+ Site-of-Origin extended community added to the route, matching the
+ configured value.
+2. EVPN routes will be subjected to a "self SoO" check during MAC-VRF
+ or IP-VRF import processing. If the EVPN route is found to carry a
+ Site-of-Origin extended community whose value matches the locally
+ configured MAC-VRF Site-of-Origin, the route will be maintained in
+ the global EVPN RIB ("show bgp l2vpn evpn route") but will not be
+ imported into the corresponding MAC-VRF ("show bgp vni") or IP-VRF
+ ("show bgp [vrf <vrfname>] [ipv4 | ipv6 [unicast]]").
+
+The import filtering described in item (2) is constrained just to Type-2
+(MAC-IP) and Type-3 (IMET) EVPN routes.
+
+The EVPN MAC-VRF Site-of-Origin can be configured using a single CLI command
+under ``address-family l2vpn evpn`` of the EVPN underlay BGP instance.
+
+.. clicmd:: [no] mac-vrf soo <site-of-origin-string>
+
+Example configuration:
+
+.. code-block:: frr
+
+ router bgp 100
+ neighbor 192.168.0.1 remote-as 101
+ !
+ address-family ipv4 l2vpn evpn
+ neighbor 192.168.0.1 activate
+ advertise-all-vni
+ mac-vrf soo 100.64.0.0:777
+ exit-address-family
+
+This configuration ensures:
+
+1. EVPN routes originated from a local L2VNI will have a Site-of-Origin
+ extended community with the value ``100.64.0.0:777``
+2. Received EVPN routes carrying a Site-of-Origin extended community with the
+ value ``100.64.0.0:777`` will not be imported into a local MAC-VRF (L2VNI)
+ or IP-VRF (L3VNI).
+
.. _bgp-evpn-mh:
EVPN Multihoming
@@ -3886,6 +3971,12 @@ structure is extended with :clicmd:`show bgp [afi] [safi]`.
Total number of neighbors 1
exit1#
+If PfxRcd and/or PfxSnt is shown as ``(Policy)``, that means that the EBGP
+default policy is turned on, but you don't have any filters applied for
+incoming/outgoing directions.
+
+.. seealso:: :ref:`bgp-requires-policy`
+
.. clicmd:: show bgp [afi] [safi] [all] [wide|json]
.. clicmd:: show bgp vrfs [<VRFNAME$vrf_name>] [json]
diff --git a/docker/ubuntu-ci/Dockerfile b/docker/ubuntu-ci/Dockerfile
new file mode 100644
index 0000000000..ada7a48d37
--- /dev/null
+++ b/docker/ubuntu-ci/Dockerfile
@@ -0,0 +1,125 @@
+ARG UBUNTU_VERSION=22.04
+FROM ubuntu:$UBUNTU_VERSION
+
+ARG DEBIAN_FRONTEND=noninteractive
+ENV APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
+
+# Update and install build requirements.
+RUN apt update && apt upgrade -y && \
+ # Basic build requirements from documentation
+ apt-get install -y \
+ autoconf \
+ automake \
+ bison \
+ build-essential \
+ flex \
+ git \
+ install-info \
+ libc-ares-dev \
+ libcap-dev \
+ libelf-dev \
+ libjson-c-dev \
+ libpam0g-dev \
+ libreadline-dev \
+ libsnmp-dev \
+ libtool \
+ make \
+ perl \
+ pkg-config \
+ python3-dev \
+ python3-sphinx \
+ texinfo \
+ && \
+ # Protobuf build requirements
+ apt-get install -y \
+ libprotobuf-c-dev \
+ protobuf-c-compiler \
+ && \
+ # Libyang2 extra build requirements
+ apt-get install -y \
+ cmake \
+ libpcre2-dev \
+ && \
+ # Runtime/triage/testing requirements
+ apt-get install -y \
+ curl \
+ gdb \
+ iproute2 \
+ iputils-ping \
+ liblua5.3-dev \
+ libssl-dev \
+ lua5.3 \
+ net-tools \
+ python2 \
+ python3-pip \
+ snmp \
+ snmp-mibs-downloader \
+ snmpd \
+ sudo \
+ time \
+ tshark \
+ valgrind \
+ yodl \
+ && \
+ download-mibs && \
+ wget https://raw.githubusercontent.com/FRRouting/frr-mibs/main/iana/IANA-IPPM-METRICS-REGISTRY-MIB -O /usr/share/snmp/mibs/iana/IANA-IPPM-METRICS-REGISTRY-MIB && \
+ wget https://raw.githubusercontent.com/FRRouting/frr-mibs/main/ietf/SNMPv2-PDU -O /usr/share/snmp/mibs/ietf/SNMPv2-PDU && \
+ wget https://raw.githubusercontent.com/FRRouting/frr-mibs/main/ietf/IPATM-IPMC-MIB -O /usr/share/snmp/mibs/ietf/IPATM-IPMC-MIB && \
+ curl https://bootstrap.pypa.io/pip/2.7/get-pip.py --output /tmp/get-pip.py && \
+ python2 /tmp/get-pip.py && \
+ rm -f /tmp/get-pip.py && \
+ python3 -m pip install wheel && \
+ python3 -m pip install pytest && \
+ python3 -m pip install pytest-sugar && \
+ python3 -m pip install pytest-xdist && \
+ python3 -m pip install "scapy>=2.4.2" && \
+ python3 -m pip install xmltodict && \
+ python3 -m pip install grpcio grpcio-tools && \
+ python2 -m pip install 'exabgp<4.0.0'
+
+RUN groupadd -r -g 92 frr && \
+ groupadd -r -g 85 frrvty && \
+ adduser --system --ingroup frr --home /home/frr \
+ --gecos "FRR suite" --shell /bin/bash frr && \
+ usermod -a -G frrvty frr && \
+ useradd -d /var/run/exabgp/ -s /bin/false exabgp && \
+ echo 'frr ALL = NOPASSWD: ALL' | tee /etc/sudoers.d/frr && \
+ mkdir -p /home/frr && chown frr.frr /home/frr
+
+USER frr:frr
+
+# build and install libyang2
+RUN cd && pwd && ls -al && \
+ git clone https://github.com/CESNET/libyang.git && \
+ cd libyang && \
+ git checkout v2.1.80 && \
+ mkdir build; cd build && \
+ cmake -DCMAKE_INSTALL_PREFIX:PATH=/usr \
+ -DCMAKE_BUILD_TYPE:String="Release" .. && \
+ make -j $(nproc) && \
+ sudo make install
+
+COPY --chown=frr:frr . /home/frr/frr/
+
+RUN cd ~/frr && \
+ ./bootstrap.sh && \
+ ./configure \
+ --prefix=/usr \
+ --localstatedir=/var/run/frr \
+ --sbindir=/usr/lib/frr \
+ --sysconfdir=/etc/frr \
+ --enable-sharpd \
+ --enable-multipath=64 \
+ --enable-user=frr \
+ --enable-group=frr \
+ --enable-vty-group=frrvty \
+ --enable-snmp=agentx \
+ --enable-scripting \
+ --with-pkg-extra-version=-my-manual-build && \
+ make -j $(nproc) && \
+ sudo make install
+
+RUN cd ~/frr && make check || true
+
+COPY docker/ubuntu-ci/docker-start /usr/sbin/docker-start
+CMD ["/usr/sbin/docker-start"]
diff --git a/docker/ubuntu18-ci/docker-start b/docker/ubuntu-ci/docker-start
index 9a45c722f1..9a45c722f1 100755
--- a/docker/ubuntu18-ci/docker-start
+++ b/docker/ubuntu-ci/docker-start
diff --git a/docker/ubuntu18-ci/Dockerfile b/docker/ubuntu18-ci/Dockerfile
deleted file mode 100644
index dab8606739..0000000000
--- a/docker/ubuntu18-ci/Dockerfile
+++ /dev/null
@@ -1,73 +0,0 @@
-FROM ubuntu:18.04
-
-ARG DEBIAN_FRONTEND=noninteractive
-ENV APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
-# Update Ubuntu Software repository
-RUN apt update && \
- apt-get install -y \
- git autoconf automake libtool make libreadline-dev texinfo \
- pkg-config libpam0g-dev libjson-c-dev bison flex python3-pip \
- libc-ares-dev python3-dev python3-sphinx \
- install-info build-essential libsnmp-dev perl libcap-dev \
- libelf-dev libprotobuf-c-dev protobuf-c-compiler \
- sudo gdb iputils-ping time \
- python-pip net-tools iproute2 && \
- python3 -m pip install wheel && \
- python3 -m pip install pytest && \
- python3 -m pip install pytest-xdist && \
- python3 -m pip install "scapy>=2.4.2" && \
- python3 -m pip install xmltodict && \
- python2 -m pip install 'exabgp<4.0.0'
-
-RUN groupadd -r -g 92 frr && \
- groupadd -r -g 85 frrvty && \
- adduser --system --ingroup frr --home /home/frr \
- --gecos "FRR suite" --shell /bin/bash frr && \
- usermod -a -G frrvty frr && \
- useradd -d /var/run/exabgp/ -s /bin/false exabgp && \
- echo 'frr ALL = NOPASSWD: ALL' | tee /etc/sudoers.d/frr && \
- mkdir -p /home/frr && chown frr.frr /home/frr
-
-#for libyang 2
-RUN apt-get install -y cmake libpcre2-dev
-
-USER frr:frr
-
-# build and install libyang2
-RUN cd && pwd && ls -al && \
- git clone https://github.com/CESNET/libyang.git && \
- cd libyang && \
- git checkout v2.0.0 && \
- mkdir build; cd build && \
- cmake -DCMAKE_INSTALL_PREFIX:PATH=/usr \
- -DCMAKE_BUILD_TYPE:String="Release" .. && \
- make -j $(nproc) && \
- sudo make install
-
-COPY --chown=frr:frr . /home/frr/frr/
-
-RUN cd && ls -al && ls -al frr
-
-RUN cd ~/frr && \
- ./bootstrap.sh && \
- ./configure \
- --prefix=/usr \
- --localstatedir=/var/run/frr \
- --sbindir=/usr/lib/frr \
- --sysconfdir=/etc/frr \
- --enable-vtysh \
- --enable-pimd \
- --enable-sharpd \
- --enable-multipath=64 \
- --enable-user=frr \
- --enable-group=frr \
- --enable-vty-group=frrvty \
- --enable-snmp=agentx \
- --with-pkg-extra-version=-my-manual-build && \
- make -j $(nproc) && \
- sudo make install
-
-RUN cd ~/frr && make check || true
-
-COPY docker/ubuntu18-ci/docker-start /usr/sbin/docker-start
-CMD ["/usr/sbin/docker-start"]
diff --git a/docker/ubuntu18-ci/README.md b/docker/ubuntu18-ci/README.md
deleted file mode 100644
index 4e8ab891e6..0000000000
--- a/docker/ubuntu18-ci/README.md
+++ /dev/null
@@ -1,44 +0,0 @@
-# Ubuntu 18.04
-
-This builds an ubuntu 18.04 container for dev / test
-
-# Build
-
-```
-docker build -t frr-ubuntu18:latest -f docker/ubuntu18-ci/Dockerfile .
-```
-
-# Running
-
-```
-docker run -d --privileged --name frr-ubuntu18 --mount type=bind,source=/lib/modules,target=/lib/modules frr-ubuntu18:latest
-```
-
-# make check
-
-```
-docker exec frr-ubuntu18 bash -c 'cd ~/frr ; make check'
-```
-
-# interactive bash
-```
-docker exec -it frr-ubuntu18 bash
-```
-
-# topotest -- when Host O/S is Ubuntu only
-
-```
-docker exec frr-ubuntu18 bash -c 'cd ~/frr/tests/topotests/ospf-topo1 ; sudo pytest test_ospf_topo1.py'
-```
-
-# stop & remove container
-
-```
-docker stop frr-ubuntu18 ; docker rm frr-ubuntu18
-```
-
-# remove image
-
-```
-docker rmi frr-ubuntu18:latest
-```
diff --git a/docker/ubuntu20-ci/Dockerfile b/docker/ubuntu20-ci/Dockerfile
deleted file mode 100644
index 7e49910a72..0000000000
--- a/docker/ubuntu20-ci/Dockerfile
+++ /dev/null
@@ -1,78 +0,0 @@
-FROM ubuntu:20.04
-
-ARG DEBIAN_FRONTEND=noninteractive
-ENV APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
-# Update Ubuntu Software repository
-RUN apt update && \
- apt-get install -y \
- git autoconf automake libtool make libreadline-dev texinfo \
- pkg-config libpam0g-dev libjson-c-dev bison flex python3-pip \
- libc-ares-dev python3-dev python3-sphinx \
- install-info build-essential libsnmp-dev perl \
- libcap-dev python2 libelf-dev libprotobuf-c-dev protobuf-c-compiler \
- sudo gdb curl iputils-ping time \
- lua5.3 liblua5.3-dev \
- net-tools iproute2 && \
- curl https://bootstrap.pypa.io/pip/2.7/get-pip.py --output /tmp/get-pip.py && \
- python2 /tmp/get-pip.py && \
- rm -f /tmp/get-pip.py && \
- python3 -m pip install wheel && \
- python3 -m pip install pytest && \
- python3 -m pip install pytest-xdist && \
- python3 -m pip install "scapy>=2.4.2" && \
- python3 -m pip install xmltodict && \
- python2 -m pip install 'exabgp<4.0.0'
-
-RUN groupadd -r -g 92 frr && \
- groupadd -r -g 85 frrvty && \
- adduser --system --ingroup frr --home /home/frr \
- --gecos "FRR suite" --shell /bin/bash frr && \
- usermod -a -G frrvty frr && \
- useradd -d /var/run/exabgp/ -s /bin/false exabgp && \
- echo 'frr ALL = NOPASSWD: ALL' | tee /etc/sudoers.d/frr && \
- mkdir -p /home/frr && chown frr.frr /home/frr
-
-#for libyang 2
-RUN apt-get install -y cmake libpcre2-dev
-
-USER frr:frr
-
-# build and install libyang2
-RUN cd && pwd && ls -al && \
- git clone https://github.com/CESNET/libyang.git && \
- cd libyang && \
- git checkout v2.0.0 && \
- mkdir build; cd build && \
- cmake -DCMAKE_INSTALL_PREFIX:PATH=/usr \
- -DCMAKE_BUILD_TYPE:String="Release" .. && \
- make -j $(nproc) && \
- sudo make install
-
-COPY --chown=frr:frr . /home/frr/frr/
-
-RUN cd && ls -al && ls -al frr
-
-RUN cd ~/frr && \
- ./bootstrap.sh && \
- ./configure \
- --prefix=/usr \
- --localstatedir=/var/run/frr \
- --sbindir=/usr/lib/frr \
- --sysconfdir=/etc/frr \
- --enable-vtysh \
- --enable-pimd \
- --enable-sharpd \
- --enable-multipath=64 \
- --enable-user=frr \
- --enable-group=frr \
- --enable-vty-group=frrvty \
- --enable-snmp=agentx \
- --enable-scripting \
- --with-pkg-extra-version=-my-manual-build && \
- make -j $(nproc) && \
- sudo make install
-
-RUN cd ~/frr && make check || true
-
-COPY docker/ubuntu20-ci/docker-start /usr/sbin/docker-start
-CMD ["/usr/sbin/docker-start"]
diff --git a/docker/ubuntu20-ci/README.md b/docker/ubuntu20-ci/README.md
index 11138c6507..536f8e2e35 100644
--- a/docker/ubuntu20-ci/README.md
+++ b/docker/ubuntu20-ci/README.md
@@ -5,13 +5,25 @@ This builds an ubuntu 20.04 container for dev / test
# Build
```
-docker build -t frr-ubuntu20:latest -f docker/ubuntu20-ci/Dockerfile .
+docker build -t frr-ubuntu20:latest --build-arg=UBUNTU_VERSION=20.04 -f docker/ubuntu-ci/Dockerfile .
+```
+
+# Running Full Topotest
+
+```
+docker run --init -it --privileged --name frr -v /lib/modules:/lib/modules frr-ubuntu22:latest bash -c 'cd ~/frr/tests/topotests ; sudo pytest -nauto --dist=loadfile'
+```
+
+# Extract results from the above run into `run-results` dir and analyze
+
+```
+tests/topotest/analyze.py -C frr -Ar run-results
```
# Running
```
-docker run -d --privileged --name frr-ubuntu20 --mount type=bind,source=/lib/modules,target=/lib/modules frr-ubuntu20:latest
+docker run -d --init --privileged --name frr-ubuntu20 --mount type=bind,source=/lib/modules,target=/lib/modules frr-ubuntu20:latest
```
# make check
diff --git a/docker/ubuntu20-ci/docker-start b/docker/ubuntu20-ci/docker-start
deleted file mode 100755
index 9a45c722f1..0000000000
--- a/docker/ubuntu20-ci/docker-start
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-if [ $(uname -a | grep -ci Ubuntu) -ge 1 ]; then
- #for topotests under ubuntu host
- sudo modprobe mpls-router mpls-iptunnel
- sudo /etc/init.d/openvswitch-switch start
-fi
-while true ; do sleep 365d ; done
diff --git a/docker/ubuntu22-ci/README.md b/docker/ubuntu22-ci/README.md
new file mode 100644
index 0000000000..403abbf5bb
--- /dev/null
+++ b/docker/ubuntu22-ci/README.md
@@ -0,0 +1,57 @@
+# Ubuntu 22.04
+
+This builds an ubuntu 22.04 container for dev / test
+
+# Build
+
+```
+docker build -t frr-ubuntu22:latest -f docker/ubuntu-ci/Dockerfile .
+```
+
+# Running Full Topotest
+
+```
+docker run --init -it --privileged --name frr -v /lib/modules:/lib/modules frr-ubuntu22:latest bash -c 'cd ~/frr/tests/topotests ; sudo pytest -nauto --dist=loadfile'
+```
+
+# Extract results from the above run into `run-results` dir and analyze
+
+```
+tests/topotest/analyze.py -C frr -Ar run-results
+```
+
+# Running
+
+```
+docker run -d --init --privileged --name frr --mount type=bind,source=/lib/modules,target=/lib/modules frr-ubuntu22:latest
+```
+
+# make check
+
+```
+docker exec frr bash -c 'cd ~/frr ; make check'
+```
+
+# interactive bash
+
+```
+docker exec -it frr bash
+```
+
+# topotest -- when Host O/S is Ubuntu only
+
+```
+docker exec frr bash -c 'cd ~/frr/tests/topotests/ospf-topo1 ; sudo pytest test_ospf_topo1.py'
+```
+
+# stop & remove container
+
+```
+docker stop frr ; docker rm frr
+```
+
+# remove image
+
+```
+docker rmi frr-ubuntu22:latest
+```
diff --git a/isisd/isis_tlvs.c b/isisd/isis_tlvs.c
index 4ad877ce0f..e871ae8c4f 100644
--- a/isisd/isis_tlvs.c
+++ b/isisd/isis_tlvs.c
@@ -1133,7 +1133,7 @@ static int unpack_item_ext_subtlv_asla(uint16_t mtid, uint8_t subtlv_len,
uint8_t uabm_flag_len;
uint8_t sabm[ASLA_APP_IDENTIFIER_BIT_LENGTH] = {0};
uint8_t uabm[ASLA_APP_IDENTIFIER_BIT_LENGTH] = {0};
- uint8_t readable;
+ uint8_t readable = subtlv_len;
uint8_t subsubtlv_type;
uint8_t subsubtlv_len;
size_t nb_groups;
@@ -1156,15 +1156,23 @@ static int unpack_item_ext_subtlv_asla(uint16_t mtid, uint8_t subtlv_len,
asla->standard_apps_length = ASLA_APPS_LENGTH_MASK & sabm_flag_len;
asla->user_def_apps_length = ASLA_APPS_LENGTH_MASK & uabm_flag_len;
+ readable -= ISIS_SUBSUBTLV_HDR_SIZE;
+ if (readable <
+ asla->standard_apps_length + asla->user_def_apps_length) {
+ TLV_SIZE_MISMATCH(log, indent, "ASLA");
+ return -1;
+ }
+
for (int i = 0; i < asla->standard_apps_length; i++)
sabm[i] = stream_getc(s);
for (int i = 0; i < asla->user_def_apps_length; i++)
uabm[i] = stream_getc(s);
+ readable -= (asla->standard_apps_length + asla->user_def_apps_length);
+
asla->standard_apps = sabm[0];
asla->user_def_apps = uabm[0];
- readable = subtlv_len - 4;
while (readable > 0) {
if (readable < ISIS_SUBSUBTLV_HDR_SIZE) {
TLV_SIZE_MISMATCH(log, indent, "ASLA Sub TLV");
diff --git a/ldpd/control.c b/ldpd/control.c
index 6bb5204d13..db52d46325 100644
--- a/ldpd/control.c
+++ b/ldpd/control.c
@@ -106,8 +106,7 @@ static void control_accept(struct event *thread)
*/
if (errno == ENFILE || errno == EMFILE)
accept_pause();
- else if (errno != EWOULDBLOCK && errno != EINTR &&
- errno != ECONNABORTED)
+ else if (errno != EWOULDBLOCK && errno != EINTR && errno != ECONNABORTED)
log_warn("%s: accept", __func__);
return;
}
@@ -192,8 +191,7 @@ static void control_dispatch_imsg(struct event *thread)
c->iev.ev_read = NULL;
- if (((n = imsg_read(&c->iev.ibuf)) == -1 && errno != EAGAIN) ||
- n == 0) {
+ if (((n = imsg_read(&c->iev.ibuf)) == -1 && errno != EAGAIN) || n == 0) {
control_close(fd);
return;
}
@@ -217,12 +215,10 @@ static void control_dispatch_imsg(struct event *thread)
/* ignore */
break;
case IMSG_CTL_SHOW_INTERFACE:
- if (imsg.hdr.len == IMSG_HEADER_SIZE +
- sizeof(ifidx)) {
+ if (imsg.hdr.len == IMSG_HEADER_SIZE + sizeof(ifidx)) {
memcpy(&ifidx, imsg.data, sizeof(ifidx));
ldpe_iface_ctl(c, ifidx);
- imsg_compose_event(&c->iev, IMSG_CTL_END, 0,
- 0, -1, NULL, 0);
+ imsg_compose_event(&c->iev, IMSG_CTL_END, 0, 0, -1, NULL, 0);
}
break;
case IMSG_CTL_SHOW_DISCOVERY:
@@ -242,8 +238,7 @@ static void control_dispatch_imsg(struct event *thread)
ldpe_nbr_ctl(c);
break;
case IMSG_CTL_CLEAR_NBR:
- if (imsg.hdr.len != IMSG_HEADER_SIZE +
- sizeof(struct ctl_nbr))
+ if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(struct ctl_nbr))
break;
nbr_clear_ctl(imsg.data);
@@ -255,8 +250,7 @@ static void control_dispatch_imsg(struct event *thread)
/* ignore */
break;
default:
- log_debug("%s: error handling imsg %d", __func__,
- imsg.hdr.type);
+ log_debug("%s: error handling imsg %d", __func__, imsg.hdr.type);
break;
}
imsg_free(&imsg);
diff --git a/ldpd/init.c b/ldpd/init.c
index 15d653b747..f0cb98e5c0 100644
--- a/ldpd/init.c
+++ b/ldpd/init.c
@@ -31,13 +31,13 @@ send_init(struct nbr *nbr)
if ((buf = ibuf_open(size)) == NULL)
fatal(__func__);
- err |= gen_ldp_hdr(buf, size);
+ SET_FLAG(err, gen_ldp_hdr(buf, size));
size -= LDP_HDR_SIZE;
- err |= gen_msg_hdr(buf, MSG_TYPE_INIT, size);
- err |= gen_init_prms_tlv(buf, nbr);
- err |= gen_cap_dynamic_tlv(buf);
- err |= gen_cap_twcard_tlv(buf, 1);
- err |= gen_cap_unotif_tlv(buf, 1);
+ SET_FLAG(err, gen_msg_hdr(buf, MSG_TYPE_INIT, size));
+ SET_FLAG(err, gen_init_prms_tlv(buf, nbr));
+ SET_FLAG(err, gen_cap_dynamic_tlv(buf));
+ SET_FLAG(err, gen_cap_twcard_tlv(buf, 1));
+ SET_FLAG(err, gen_cap_unotif_tlv(buf, 1));
if (err) {
ibuf_free(buf);
return;
@@ -121,62 +121,56 @@ recv_init(struct nbr *nbr, char *buf, uint16_t len)
return (-1);
case TLV_TYPE_DYNAMIC_CAP:
if (tlv_len != CAP_TLV_DYNAMIC_LEN) {
- session_shutdown(nbr, S_BAD_TLV_LEN, msg.id,
- msg.type);
+ session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type);
return (-1);
}
- if (caps_rcvd & F_CAP_TLV_RCVD_DYNAMIC) {
- session_shutdown(nbr, S_BAD_TLV_VAL, msg.id,
- msg.type);
+ if (CHECK_FLAG(caps_rcvd, F_CAP_TLV_RCVD_DYNAMIC)) {
+ session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type);
return (-1);
}
- caps_rcvd |= F_CAP_TLV_RCVD_DYNAMIC;
+ SET_FLAG(caps_rcvd, F_CAP_TLV_RCVD_DYNAMIC);
- nbr->flags |= F_NBR_CAP_DYNAMIC;
+ SET_FLAG(nbr->flags, F_NBR_CAP_DYNAMIC);
log_debug("%s: lsr-id %pI4 announced the Dynamic Capability Announcement capability", __func__,
&nbr->id);
break;
case TLV_TYPE_TWCARD_CAP:
if (tlv_len != CAP_TLV_TWCARD_LEN) {
- session_shutdown(nbr, S_BAD_TLV_LEN, msg.id,
- msg.type);
+ session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type);
return (-1);
}
- if (caps_rcvd & F_CAP_TLV_RCVD_TWCARD) {
- session_shutdown(nbr, S_BAD_TLV_VAL, msg.id,
- msg.type);
+ if (CHECK_FLAG(caps_rcvd, F_CAP_TLV_RCVD_TWCARD)) {
+ session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type);
return (-1);
}
- caps_rcvd |= F_CAP_TLV_RCVD_TWCARD;
+ SET_FLAG(caps_rcvd, F_CAP_TLV_RCVD_TWCARD);
- nbr->flags |= F_NBR_CAP_TWCARD;
+ SET_FLAG(nbr->flags, F_NBR_CAP_TWCARD);
log_debug("%s: lsr-id %pI4 announced the Typed Wildcard FEC capability", __func__, &nbr->id);
break;
case TLV_TYPE_UNOTIF_CAP:
if (tlv_len != CAP_TLV_UNOTIF_LEN) {
- session_shutdown(nbr, S_BAD_TLV_LEN, msg.id,
- msg.type);
+ session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type);
return (-1);
}
- if (caps_rcvd & F_CAP_TLV_RCVD_UNOTIF) {
- session_shutdown(nbr, S_BAD_TLV_VAL, msg.id,
- msg.type);
+ if (CHECK_FLAG(caps_rcvd, F_CAP_TLV_RCVD_UNOTIF)) {
+ session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type);
return (-1);
}
- caps_rcvd |= F_CAP_TLV_RCVD_UNOTIF;
+ SET_FLAG(caps_rcvd, F_CAP_TLV_RCVD_UNOTIF);
- nbr->flags |= F_NBR_CAP_UNOTIF;
+ SET_FLAG(nbr->flags, F_NBR_CAP_UNOTIF);
log_debug("%s: lsr-id %pI4 announced the Unrecognized Notification capability", __func__,
&nbr->id);
break;
default:
- if (!(ntohs(tlv.type) & UNKNOWN_FLAG))
+ if (!CHECK_FLAG(ntohs(tlv.type), UNKNOWN_FLAG))
send_notification_rtlvs(nbr, S_UNSSUPORTDCAP,
msg.id, msg.type, tlv_type, tlv_len, buf);
/* ignore unknown tlv */
@@ -217,16 +211,16 @@ send_capability(struct nbr *nbr, uint16_t capability, int enable)
if ((buf = ibuf_open(size)) == NULL)
fatal(__func__);
- err |= gen_ldp_hdr(buf, size);
+ SET_FLAG(err, gen_ldp_hdr(buf, size));
size -= LDP_HDR_SIZE;
- err |= gen_msg_hdr(buf, MSG_TYPE_CAPABILITY, size);
+ SET_FLAG(err, gen_msg_hdr(buf, MSG_TYPE_CAPABILITY, size));
switch (capability) {
case TLV_TYPE_TWCARD_CAP:
- err |= gen_cap_twcard_tlv(buf, enable);
+ SET_FLAG(err, gen_cap_twcard_tlv(buf, enable));
break;
case TLV_TYPE_UNOTIF_CAP:
- err |= gen_cap_unotif_tlv(buf, enable);
+ SET_FLAG(err, gen_cap_unotif_tlv(buf, enable));
break;
case TLV_TYPE_DYNAMIC_CAP:
/*
@@ -288,52 +282,47 @@ recv_capability(struct nbr *nbr, char *buf, uint16_t len)
switch (tlv_type) {
case TLV_TYPE_TWCARD_CAP:
if (tlv_len != CAP_TLV_TWCARD_LEN) {
- session_shutdown(nbr, S_BAD_TLV_LEN, msg.id,
- msg.type);
+ session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type);
return (-1);
}
- if (caps_rcvd & F_CAP_TLV_RCVD_TWCARD) {
- session_shutdown(nbr, S_BAD_TLV_VAL, msg.id,
- msg.type);
+ if (CHECK_FLAG(caps_rcvd, F_CAP_TLV_RCVD_TWCARD)) {
+ session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type);
return (-1);
}
- caps_rcvd |= F_CAP_TLV_RCVD_TWCARD;
+ SET_FLAG(caps_rcvd, F_CAP_TLV_RCVD_TWCARD);
memcpy(&reserved, buf, sizeof(reserved));
enable = reserved & STATE_BIT;
if (enable)
- nbr->flags |= F_NBR_CAP_TWCARD;
+ SET_FLAG(nbr->flags, F_NBR_CAP_TWCARD);
else
- nbr->flags &= ~F_NBR_CAP_TWCARD;
+ UNSET_FLAG(nbr->flags, F_NBR_CAP_TWCARD);
log_debug("%s: lsr-id %pI4 %s the Typed Wildcard FEC capability", __func__, &nbr->id,
(enable) ? "announced" : "withdrew");
break;
case TLV_TYPE_UNOTIF_CAP:
if (tlv_len != CAP_TLV_UNOTIF_LEN) {
- session_shutdown(nbr, S_BAD_TLV_LEN, msg.id,
- msg.type);
+ session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type);
return (-1);
}
- if (caps_rcvd & F_CAP_TLV_RCVD_UNOTIF) {
- session_shutdown(nbr, S_BAD_TLV_VAL, msg.id,
- msg.type);
+ if (CHECK_FLAG(caps_rcvd, F_CAP_TLV_RCVD_UNOTIF)) {
+ session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type);
return (-1);
}
- caps_rcvd |= F_CAP_TLV_RCVD_UNOTIF;
+ SET_FLAG(caps_rcvd, F_CAP_TLV_RCVD_UNOTIF);
memcpy(&reserved, buf, sizeof(reserved));
enable = reserved & STATE_BIT;
if (enable)
- nbr->flags |= F_NBR_CAP_UNOTIF;
+ SET_FLAG(nbr->flags, F_NBR_CAP_UNOTIF);
else
- nbr->flags &= ~F_NBR_CAP_UNOTIF;
+ UNSET_FLAG(nbr->flags, F_NBR_CAP_UNOTIF);
log_debug("%s: lsr-id %pI4 %s the Unrecognized Notification capability", __func__,
- &nbr->id, (enable) ? "announced" :
- "withdrew");
+ &nbr->id, (enable) ? "announced" : "withdrew");
break;
case TLV_TYPE_DYNAMIC_CAP:
/*
@@ -346,7 +335,7 @@ recv_capability(struct nbr *nbr, char *buf, uint16_t len)
*/
/* FALLTHROUGH */
default:
- if (!(ntohs(tlv.type) & UNKNOWN_FLAG))
+ if (!CHECK_FLAG(ntohs(tlv.type), UNKNOWN_FLAG))
send_notification_rtlvs(nbr, S_UNSSUPORTDCAP,
msg.id, msg.type, tlv_type, tlv_len, buf);
/* ignore unknown tlv */
diff --git a/ldpd/ldp_debug.c b/ldpd/ldp_debug.c
index d2aeaba8b3..957fb8e556 100644
--- a/ldpd/ldp_debug.c
+++ b/ldpd/ldp_debug.c
@@ -97,8 +97,7 @@ ldp_vty_debug(struct vty *vty, const char *negate, const char *type_str,
DEBUG_ON(zebra, LDP_DEBUG_ZEBRA);
}
- main_imsg_compose_both(IMSG_DEBUG_UPDATE, &ldp_debug,
- sizeof(ldp_debug));
+ main_imsg_compose_both(IMSG_DEBUG_UPDATE, &ldp_debug, sizeof(ldp_debug));
return (CMD_SUCCESS);
}
@@ -119,13 +118,11 @@ ldp_vty_show_debugging(struct vty *vty)
if (LDP_DEBUG(labels, LDP_DEBUG_LABELS))
vty_out (vty, " LDP labels debugging is on\n");
if (LDP_DEBUG(msg, LDP_DEBUG_MSG_RECV_ALL))
- vty_out (vty,
- " LDP detailed messages debugging is on (inbound)\n");
+ vty_out (vty, " LDP detailed messages debugging is on (inbound)\n");
else if (LDP_DEBUG(msg, LDP_DEBUG_MSG_RECV))
vty_out (vty," LDP messages debugging is on (inbound)\n");
if (LDP_DEBUG(msg, LDP_DEBUG_MSG_SEND_ALL))
- vty_out (vty,
- " LDP detailed messages debugging is on (outbound)\n");
+ vty_out (vty, " LDP detailed messages debugging is on (outbound)\n");
else if (LDP_DEBUG(msg, LDP_DEBUG_MSG_SEND))
vty_out (vty," LDP messages debugging is on (outbound)\n");
if (LDP_DEBUG(sync, LDP_DEBUG_SYNC))
diff --git a/ldpd/log.c b/ldpd/log.c
index a9898a64f0..7c4d782dcf 100644
--- a/ldpd/log.c
+++ b/ldpd/log.c
@@ -35,13 +35,11 @@ vlog(int pri, const char *fmt, va_list ap)
switch (ldpd_process) {
case PROC_LDE_ENGINE:
vsnprintfrr(buf, sizeof(buf), fmt, ap);
- lde_imsg_compose_parent_sync(IMSG_LOG, pri, buf,
- strlen(buf) + 1);
+ lde_imsg_compose_parent_sync(IMSG_LOG, pri, buf, strlen(buf) + 1);
break;
case PROC_LDP_ENGINE:
vsnprintfrr(buf, sizeof(buf), fmt, ap);
- ldpe_imsg_compose_parent_sync(IMSG_LOG, pri, buf,
- strlen(buf) + 1);
+ ldpe_imsg_compose_parent_sync(IMSG_LOG, pri, buf, strlen(buf) + 1);
break;
case PROC_MAIN:
vzlog(pri, fmt, ap);
@@ -121,15 +119,13 @@ void
fatal(const char *emsg)
{
if (emsg == NULL)
- logit(LOG_CRIT, "fatal in %s: %s", log_procname,
- strerror(errno));
+ logit(LOG_CRIT, "fatal in %s: %s", log_procname, strerror(errno));
else
if (errno)
logit(LOG_CRIT, "fatal in %s: %s: %s",
log_procname, emsg, strerror(errno));
else
- logit(LOG_CRIT, "fatal in %s: %s",
- log_procname, emsg);
+ logit(LOG_CRIT, "fatal in %s: %s", log_procname, emsg);
exit(1);
}
diff --git a/ldpd/logmsg.c b/ldpd/logmsg.c
index 4f1d950bb3..75f4293f0c 100644
--- a/ldpd/logmsg.c
+++ b/ldpd/logmsg.c
@@ -74,8 +74,7 @@ log_addr(int af, const union ldpd_addr *addr)
switch (af) {
case AF_INET:
round = (round + 1) % NUM_LOGS;
- if (inet_ntop(AF_INET, &addr->v4, buf[round],
- sizeof(buf[round])) == NULL)
+ if (inet_ntop(AF_INET, &addr->v4, buf[round], sizeof(buf[round])) == NULL)
return ("???");
return (buf[round]);
case AF_INET6:
@@ -166,8 +165,7 @@ log_hello_src(const struct hello_source *src)
switch (src->type) {
case HELLO_LINK:
- snprintf(buf, sizeof(buf), "iface %s",
- src->link.ia->iface->name);
+ snprintf(buf, sizeof(buf), "iface %s", src->link.ia->iface->name);
break;
case HELLO_TARGETED:
snprintf(buf, sizeof(buf), "source %s",
diff --git a/ldpd/notification.c b/ldpd/notification.c
index af5bb267d7..1709098d09 100644
--- a/ldpd/notification.c
+++ b/ldpd/notification.c
@@ -25,28 +25,28 @@ send_notification_full(struct tcp_conn *tcp, struct notify_msg *nm)
/* calculate size */
size = LDP_HDR_SIZE + LDP_MSG_SIZE + STATUS_SIZE;
- if (nm->flags & F_NOTIF_PW_STATUS)
+ if (CHECK_FLAG(nm->flags, F_NOTIF_PW_STATUS))
size += PW_STATUS_TLV_SIZE;
- if (nm->flags & F_NOTIF_FEC)
+ if (CHECK_FLAG(nm->flags, F_NOTIF_FEC))
size += len_fec_tlv(&nm->fec);
- if (nm->flags & F_NOTIF_RETURNED_TLVS)
+ if (CHECK_FLAG(nm->flags, F_NOTIF_RETURNED_TLVS))
size += TLV_HDR_SIZE * 2 + nm->rtlvs.length;
if ((buf = ibuf_open(size)) == NULL)
fatal(__func__);
- err |= gen_ldp_hdr(buf, size);
+ SET_FLAG(err, gen_ldp_hdr(buf, size));
size -= LDP_HDR_SIZE;
- err |= gen_msg_hdr(buf, MSG_TYPE_NOTIFICATION, size);
- err |= gen_status_tlv(buf, nm->status_code, nm->msg_id, nm->msg_type);
+ SET_FLAG(err, gen_msg_hdr(buf, MSG_TYPE_NOTIFICATION, size));
+ SET_FLAG(err, gen_status_tlv(buf, nm->status_code, nm->msg_id, nm->msg_type));
/* optional tlvs */
- if (nm->flags & F_NOTIF_PW_STATUS)
- err |= gen_pw_status_tlv(buf, nm->pw_status);
- if (nm->flags & F_NOTIF_FEC)
- err |= gen_fec_tlv(buf, &nm->fec);
- if (nm->flags & F_NOTIF_RETURNED_TLVS)
- err |= gen_returned_tlvs(buf, nm->rtlvs.type, nm->rtlvs.length,
- nm->rtlvs.data);
+ if (CHECK_FLAG(nm->flags, F_NOTIF_PW_STATUS))
+ SET_FLAG(err, gen_pw_status_tlv(buf, nm->pw_status));
+ if (CHECK_FLAG(nm->flags, F_NOTIF_FEC))
+ SET_FLAG(err, gen_fec_tlv(buf, &nm->fec));
+ if (CHECK_FLAG(nm->flags, F_NOTIF_RETURNED_TLVS))
+ SET_FLAG(err, gen_returned_tlvs(buf, nm->rtlvs.type, nm->rtlvs.length,
+ nm->rtlvs.data));
if (err) {
ibuf_free(buf);
return;
@@ -121,7 +121,7 @@ send_notification_rtlvs(struct nbr *nbr, uint32_t status_code, uint32_t msg_id,
nm.rtlvs.type = tlv_type;
nm.rtlvs.length = tlv_len;
nm.rtlvs.data = tlv_data;
- nm.flags |= F_NOTIF_RETURNED_TLVS;
+ SET_FLAG(nm.flags, F_NOTIF_RETURNED_TLVS);
}
send_notification_full(nbr->tcp, &nm);
@@ -189,13 +189,12 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len)
break;
case TLV_TYPE_PW_STATUS:
if (tlv_len != 4) {
- session_shutdown(nbr, S_BAD_TLV_LEN,
- msg.id, msg.type);
+ session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type);
return (-1);
}
nm.pw_status = ntohl(*(uint32_t *)buf);
- nm.flags |= F_NOTIF_PW_STATUS;
+ SET_FLAG(nm.flags, F_NOTIF_PW_STATUS);
break;
case TLV_TYPE_FEC:
if ((tlen = tlv_decode_fec_elm(nbr, &msg, buf,
@@ -203,12 +202,11 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len)
return (-1);
/* allow only one fec element */
if (tlen != tlv_len) {
- session_shutdown(nbr, S_BAD_TLV_VAL,
- msg.id, msg.type);
+ session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type);
leconf->stats.bad_tlv_len++;
return (-1);
}
- nm.flags |= F_NOTIF_FEC;
+ SET_FLAG(nm.flags, F_NOTIF_FEC);
break;
default:
if (!(ntohs(tlv.type) & UNKNOWN_FLAG)) {
@@ -226,9 +224,8 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len)
/* sanity checks */
switch (nm.status_code) {
case S_PW_STATUS:
- if (!(nm.flags & (F_NOTIF_PW_STATUS|F_NOTIF_FEC))) {
- send_notification(nbr->tcp, S_MISS_MSG,
- msg.id, msg.type);
+ if (!CHECK_FLAG(nm.flags, (F_NOTIF_PW_STATUS|F_NOTIF_FEC))) {
+ send_notification(nbr->tcp, S_MISS_MSG, msg.id, msg.type);
return (-1);
}
@@ -236,20 +233,17 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len)
case MAP_TYPE_PWID:
break;
default:
- send_notification(nbr->tcp, S_BAD_TLV_VAL,
- msg.id, msg.type);
+ send_notification(nbr->tcp, S_BAD_TLV_VAL, msg.id, msg.type);
return (-1);
}
break;
case S_ENDOFLIB:
- if (!(nm.flags & F_NOTIF_FEC)) {
- send_notification(nbr->tcp, S_MISS_MSG,
- msg.id, msg.type);
+ if (!CHECK_FLAG(nm.flags, F_NOTIF_FEC)) {
+ send_notification(nbr->tcp, S_MISS_MSG, msg.id, msg.type);
return (-1);
}
if (nm.fec.type != MAP_TYPE_TYPED_WCARD) {
- send_notification(nbr->tcp, S_BAD_TLV_VAL,
- msg.id, msg.type);
+ send_notification(nbr->tcp, S_BAD_TLV_VAL, msg.id, msg.type);
return (-1);
}
break;
@@ -259,7 +253,7 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len)
log_msg_notification(0, nbr, &nm);
- if (st.status_code & htonl(STATUS_FATAL)) {
+ if (CHECK_FLAG(st.status_code, htonl(STATUS_FATAL))) {
if (nbr->state == NBR_STA_OPENSENT)
nbr_start_idtimer(nbr);
@@ -269,11 +263,9 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len)
* initialization, it SHOULD transmit a Shutdown message and
* then close the transport connection".
*/
- if (nbr->state != NBR_STA_OPER &&
- nm.status_code == S_SHUTDOWN) {
+ if (nbr->state != NBR_STA_OPER && nm.status_code == S_SHUTDOWN) {
leconf->stats.session_attempts++;
- send_notification(nbr->tcp, S_SHUTDOWN,
- msg.id, msg.type);
+ send_notification(nbr->tcp, S_SHUTDOWN, msg.id, msg.type);
}
leconf->stats.shutdown_rcv_notify++;
@@ -287,8 +279,7 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len)
switch (nm.status_code) {
case S_PW_STATUS:
case S_ENDOFLIB:
- ldpe_imsg_compose_lde(IMSG_NOTIFICATION, nbr->peerid, 0,
- &nm, sizeof(nm));
+ ldpe_imsg_compose_lde(IMSG_NOTIFICATION, nbr->peerid, 0, &nm, sizeof(nm));
break;
case S_NO_HELLO:
leconf->stats.session_rejects_hello++;
@@ -361,8 +352,8 @@ gen_returned_tlvs(struct ibuf *buf, uint16_t type, uint16_t length,
tlv.length = htons(length);
err = ibuf_add(buf, &rtlvs, sizeof(rtlvs));
- err |= ibuf_add(buf, &tlv, sizeof(tlv));
- err |= ibuf_add(buf, tlv_data, length);
+ SET_FLAG(err, ibuf_add(buf, &tlv, sizeof(tlv)));
+ SET_FLAG(err, ibuf_add(buf, tlv_data, length));
return (err);
}
@@ -378,9 +369,9 @@ log_msg_notification(int out, struct nbr *nbr, struct notify_msg *nm)
debug_msg(out, "notification: lsr-id %pI4, status %s",
&nbr->id, status_code_name(nm->status_code));
- if (nm->flags & F_NOTIF_FEC)
+ if (CHECK_FLAG(nm->flags, F_NOTIF_FEC))
debug_msg(out, "notification: fec %s", log_map(&nm->fec));
- if (nm->flags & F_NOTIF_PW_STATUS)
+ if (CHECK_FLAG(nm->flags, F_NOTIF_PW_STATUS))
debug_msg(out, "notification: pw-status %s",
(nm->pw_status == PW_FORWARDING) ? "forwarding" : "not forwarding");
}
diff --git a/ldpd/pfkey.c b/ldpd/pfkey.c
index 4bea2e1904..ae771cae19 100644
--- a/ldpd/pfkey.c
+++ b/ldpd/pfkey.c
@@ -256,8 +256,7 @@ pfkey_read(int sd, struct sadb_msg *h)
}
/* XXX: Only one message can be outstanding. */
- if (hdr.sadb_msg_seq == sadb_msg_seq &&
- hdr.sadb_msg_pid == pid) {
+ if (hdr.sadb_msg_seq == sadb_msg_seq && hdr.sadb_msg_pid == pid) {
if (h)
*h = hdr;
return (0);
@@ -412,8 +411,7 @@ pfkey_establish(struct nbr *nbr, struct nbr_params *nbrp)
{
switch (nbr->auth.method) {
case AUTH_MD5SIG:
- strlcpy(nbr->auth.md5key, nbrp->auth.md5key,
- sizeof(nbr->auth.md5key));
+ strlcpy(nbr->auth.md5key, nbrp->auth.md5key, sizeof(nbr->auth.md5key));
return pfkey_md5sig_establish(nbr, nbrp);
case AUTH_NONE:
return 0;
diff --git a/lib/mgmt_be_client.c b/lib/mgmt_be_client.c
index 5c875204f7..fdeff3ec0a 100644
--- a/lib/mgmt_be_client.c
+++ b/lib/mgmt_be_client.c
@@ -20,14 +20,6 @@
#include "lib/mgmt_be_client_clippy.c"
-#define MGMTD_BE_CLIENT_DBG(fmt, ...) \
- DEBUGD(&mgmt_dbg_be_client, "BE-CLIENT: %s:" fmt, __func__, \
- ##__VA_ARGS__)
-#define MGMTD_BE_CLIENT_ERR(fmt, ...) \
- zlog_err("BE-CLIENT: %s: ERROR: " fmt, __func__, ##__VA_ARGS__)
-#define MGMTD_DBG_BE_CLIENT_CHECK() \
- DEBUG_MODE_CHECK(&mgmt_dbg_be_client, DEBUG_MODE_ALL)
-
DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_CLIENT, "backend client");
DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_CLIENT_NAME, "backend client name");
DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_BATCH, "backend transaction batch data");
@@ -217,14 +209,16 @@ static void mgmt_be_cleanup_all_batches(struct mgmt_be_txn_ctx *txn)
}
static struct mgmt_be_txn_ctx *
-mgmt_be_find_txn_by_id(struct mgmt_be_client *client_ctx, uint64_t txn_id)
+mgmt_be_find_txn_by_id(struct mgmt_be_client *client_ctx, uint64_t txn_id,
+ bool warn)
{
struct mgmt_be_txn_ctx *txn = NULL;
- FOREACH_BE_TXN_IN_LIST (client_ctx, txn) {
+ FOREACH_BE_TXN_IN_LIST (client_ctx, txn)
if (txn->txn_id == txn_id)
return txn;
- }
+ if (warn)
+ MGMTD_BE_CLIENT_ERR("Unknown txn-id: %" PRIu64, txn_id);
return NULL;
}
@@ -234,20 +228,21 @@ mgmt_be_txn_create(struct mgmt_be_client *client_ctx, uint64_t txn_id)
{
struct mgmt_be_txn_ctx *txn = NULL;
- txn = mgmt_be_find_txn_by_id(client_ctx, txn_id);
- if (!txn) {
- txn = XCALLOC(MTYPE_MGMTD_BE_TXN,
- sizeof(struct mgmt_be_txn_ctx));
- assert(txn);
+ txn = mgmt_be_find_txn_by_id(client_ctx, txn_id, false);
+ if (txn) {
+ MGMTD_BE_CLIENT_ERR("Can't create existing txn-id: %" PRIu64,
+ txn_id);
+ return NULL;
+ }
- txn->txn_id = txn_id;
- txn->client = client_ctx;
- mgmt_be_batches_init(&txn->cfg_batches);
- mgmt_be_batches_init(&txn->apply_cfgs);
- mgmt_be_txns_add_tail(&client_ctx->txn_head, txn);
+ txn = XCALLOC(MTYPE_MGMTD_BE_TXN, sizeof(struct mgmt_be_txn_ctx));
+ txn->txn_id = txn_id;
+ txn->client = client_ctx;
+ mgmt_be_batches_init(&txn->cfg_batches);
+ mgmt_be_batches_init(&txn->apply_cfgs);
+ mgmt_be_txns_add_tail(&client_ctx->txn_head, txn);
- MGMTD_BE_CLIENT_DBG("Added new txn-id: %" PRIu64, txn_id);
- }
+ MGMTD_BE_CLIENT_DBG("Created new txn-id: %" PRIu64, txn_id);
return txn;
}
@@ -297,7 +292,7 @@ static void mgmt_be_cleanup_all_txns(struct mgmt_be_client *client_ctx)
}
static int mgmt_be_send_txn_reply(struct mgmt_be_client *client_ctx,
- uint64_t txn_id, bool create, bool success)
+ uint64_t txn_id, bool create)
{
Mgmtd__BeMessage be_msg;
Mgmtd__BeTxnReply txn_reply;
@@ -305,7 +300,7 @@ static int mgmt_be_send_txn_reply(struct mgmt_be_client *client_ctx,
mgmtd__be_txn_reply__init(&txn_reply);
txn_reply.create = create;
txn_reply.txn_id = txn_id;
- txn_reply.success = success;
+ txn_reply.success = true;
mgmtd__be_message__init(&be_msg);
be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_TXN_REPLY;
@@ -321,44 +316,29 @@ static int mgmt_be_process_txn_req(struct mgmt_be_client *client_ctx,
{
struct mgmt_be_txn_ctx *txn;
- txn = mgmt_be_find_txn_by_id(client_ctx, txn_id);
if (create) {
- if (txn) {
- /*
- * Transaction with same txn-id already exists.
- * Should not happen under any circumstances.
- */
- MGMTD_BE_CLIENT_ERR(
- "txn-id: %" PRIu64 " already exists", txn_id);
- mgmt_be_send_txn_reply(client_ctx, txn_id, create,
- false);
- }
+ MGMTD_BE_CLIENT_DBG("Creating new txn-id %" PRIu64, txn_id);
- MGMTD_BE_CLIENT_DBG("Created new txn-id %" PRIu64, txn_id);
txn = mgmt_be_txn_create(client_ctx, txn_id);
+ if (!txn)
+ goto failed;
if (client_ctx->cbs.txn_notify)
- (void)(*client_ctx->cbs.txn_notify)(
- client_ctx, client_ctx->user_data,
- &txn->client_data, false);
+ (*client_ctx->cbs.txn_notify)(client_ctx,
+ client_ctx->user_data,
+ &txn->client_data, false);
} else {
- if (!txn) {
- /*
- * Transaction with same txn-id does not exists.
- * Return sucess anyways.
- */
- MGMTD_BE_CLIENT_DBG("txn-id: %" PRIu64
- " for delete does NOT exists",
- txn_id);
- } else {
- MGMTD_BE_CLIENT_DBG("Delete txn-id: %" PRIu64, txn_id);
+ MGMTD_BE_CLIENT_DBG("Deleting txn-id: %" PRIu64, txn_id);
+ txn = mgmt_be_find_txn_by_id(client_ctx, txn_id, false);
+ if (txn)
mgmt_be_txn_delete(client_ctx, &txn);
- }
}
- mgmt_be_send_txn_reply(client_ctx, txn_id, create, true);
+ return mgmt_be_send_txn_reply(client_ctx, txn_id, create);
- return 0;
+failed:
+ msg_conn_disconnect(&client_ctx->client.conn, true);
+ return -1;
}
static int mgmt_be_send_cfgdata_create_reply(struct mgmt_be_client *client_ctx,
@@ -610,25 +590,23 @@ static int mgmt_be_process_cfgdata_req(struct mgmt_be_client *client_ctx,
{
struct mgmt_be_txn_ctx *txn;
- txn = mgmt_be_find_txn_by_id(client_ctx, txn_id);
- if (!txn) {
- MGMTD_BE_CLIENT_ERR("Invalid txn-id: %" PRIu64
- " from MGMTD server",
- txn_id);
- mgmt_be_send_cfgdata_create_reply(
- client_ctx, txn_id, batch_id, false,
- "Transaction context not created yet");
- } else {
- mgmt_be_update_setcfg_in_batch(client_ctx, txn, batch_id,
- cfg_req, num_req);
- }
+ txn = mgmt_be_find_txn_by_id(client_ctx, txn_id, true);
+ if (!txn)
+ goto failed;
+
+ mgmt_be_update_setcfg_in_batch(client_ctx, txn, batch_id, cfg_req,
+ num_req);
if (txn && end_of_data) {
- MGMTD_BE_CLIENT_DBG("Triggering CFG_PREPARE_REQ processing");
- mgmt_be_txn_cfg_prepare(txn);
+ MGMTD_BE_CLIENT_DBG("End of data; CFG_PREPARE_REQ processing");
+ if (mgmt_be_txn_cfg_prepare(txn))
+ goto failed;
}
return 0;
+failed:
+ msg_conn_disconnect(&client_ctx->client.conn, true);
+ return -1;
}
static int mgmt_be_send_apply_reply(struct mgmt_be_client *client_ctx,
@@ -731,23 +709,28 @@ static int mgmt_be_process_cfg_apply(struct mgmt_be_client *client_ctx,
{
struct mgmt_be_txn_ctx *txn;
- txn = mgmt_be_find_txn_by_id(client_ctx, txn_id);
- if (!txn) {
- mgmt_be_send_apply_reply(client_ctx, txn_id, NULL, 0, false,
- "Transaction not created yet!");
- return -1;
- }
+ txn = mgmt_be_find_txn_by_id(client_ctx, txn_id, true);
+ if (!txn)
+ goto failed;
MGMTD_BE_CLIENT_DBG("Trigger CFG_APPLY_REQ processing");
- mgmt_be_txn_proc_cfgapply(txn);
+ if (mgmt_be_txn_proc_cfgapply(txn))
+ goto failed;
return 0;
+failed:
+ msg_conn_disconnect(&client_ctx->client.conn, true);
+ return -1;
}
+
static int mgmt_be_client_handle_msg(struct mgmt_be_client *client_ctx,
Mgmtd__BeMessage *be_msg)
{
/*
+ * On error we may have closed the connection so don't do anything with
+ * the client_ctx on return.
+ *
* protobuf-c adds a max size enum with an internal, and changing by
* version, name; cast to an int to avoid unhandled enum warnings
*/
@@ -888,6 +871,11 @@ static int _notify_conenct_disconnect(struct msg_client *msg_client,
if (client->cbs.client_connect_notify)
(void)(*client->cbs.client_connect_notify)(
client, client->user_data, connected);
+
+ /* Cleanup any in-progress TXN on disconnect */
+ if (!connected)
+ mgmt_be_cleanup_all_txns(client);
+
return 0;
}
diff --git a/lib/mgmt_be_client.h b/lib/mgmt_be_client.h
index 4d8a1f51a1..4ad5ca5957 100644
--- a/lib/mgmt_be_client.h
+++ b/lib/mgmt_be_client.h
@@ -131,10 +131,20 @@ mgmt_be_client_name2id(const char *name)
return MGMTD_BE_CLIENT_ID_MAX;
}
+extern struct debug mgmt_dbg_be_client;
+
/***************************************************************
* API prototypes
***************************************************************/
+#define MGMTD_BE_CLIENT_DBG(fmt, ...) \
+ DEBUGD(&mgmt_dbg_be_client, "BE-CLIENT: %s: " fmt, __func__, \
+ ##__VA_ARGS__)
+#define MGMTD_BE_CLIENT_ERR(fmt, ...) \
+ zlog_err("BE-CLIENT: %s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#define MGMTD_DBG_BE_CLIENT_CHECK() \
+ DEBUG_MODE_CHECK(&mgmt_dbg_be_client, DEBUG_MODE_ALL)
+
/**
* Create backend client and connect to MGMTD.
*
diff --git a/lib/mgmt_fe_client.c b/lib/mgmt_fe_client.c
index 35a6d7d909..be7263f21b 100644
--- a/lib/mgmt_fe_client.c
+++ b/lib/mgmt_fe_client.c
@@ -50,6 +50,22 @@ struct mgmt_fe_client {
struct debug mgmt_dbg_fe_client = {0, "Management frontend client operations"};
+static inline const char *dsid2name(Mgmtd__DatastoreId id)
+{
+ switch ((int)id) {
+ case MGMTD_DS_NONE:
+ return "none";
+ case MGMTD_DS_RUNNING:
+ return "running";
+ case MGMTD_DS_CANDIDATE:
+ return "candidate";
+ case MGMTD_DS_OPERATIONAL:
+ return "operational";
+ default:
+ return "unknown-datastore-id";
+ }
+}
+
static struct mgmt_fe_client_session *
mgmt_fe_find_session_by_client_id(struct mgmt_fe_client *client,
uint64_t client_id)
@@ -124,18 +140,15 @@ static int mgmt_fe_send_session_req(struct mgmt_fe_client *client,
{
Mgmtd__FeMessage fe_msg;
Mgmtd__FeSessionReq sess_req;
- bool scok;
mgmtd__fe_session_req__init(&sess_req);
sess_req.create = create;
if (create) {
sess_req.id_case = MGMTD__FE_SESSION_REQ__ID_CLIENT_CONN_ID;
sess_req.client_conn_id = session->client_id;
- scok = true;
} else {
sess_req.id_case = MGMTD__FE_SESSION_REQ__ID_SESSION_ID;
sess_req.session_id = session->session_id;
- scok = false;
}
mgmtd__fe_message__init(&fe_msg);
@@ -146,7 +159,7 @@ static int mgmt_fe_send_session_req(struct mgmt_fe_client *client,
"Sending SESSION_REQ %s message for client-id %" PRIu64,
create ? "create" : "destroy", session->client_id);
- return mgmt_fe_client_send_msg(client, &fe_msg, scok);
+ return mgmt_fe_client_send_msg(client, &fe_msg, true);
}
int mgmt_fe_send_lockds_req(struct mgmt_fe_client *client, uint64_t session_id,
@@ -168,8 +181,9 @@ int mgmt_fe_send_lockds_req(struct mgmt_fe_client *client, uint64_t session_id,
fe_msg.lockds_req = &lockds_req;
MGMTD_FE_CLIENT_DBG(
- "Sending %sLOCK_REQ message for Ds:%d session-id %" PRIu64,
- lock ? "" : "UN", ds_id, session_id);
+ "Sending LOCKDS_REQ (%sLOCK) message for DS:%s session-id %" PRIu64,
+ lock ? "" : "UN", dsid2name(ds_id), session_id);
+
return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
@@ -197,9 +211,9 @@ int mgmt_fe_send_setcfg_req(struct mgmt_fe_client *client, uint64_t session_id,
fe_msg.setcfg_req = &setcfg_req;
MGMTD_FE_CLIENT_DBG(
- "Sending SET_CONFIG_REQ message for Ds:%d session-id %" PRIu64
+ "Sending SET_CONFIG_REQ message for DS:%s session-id %" PRIu64
" (#xpaths:%d)",
- ds_id, session_id, num_data_reqs);
+ dsid2name(ds_id), session_id, num_data_reqs);
return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
@@ -227,8 +241,8 @@ int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client *client,
fe_msg.commcfg_req = &commitcfg_req;
MGMTD_FE_CLIENT_DBG(
- "Sending COMMIT_CONFIG_REQ message for Src-Ds:%d, Dst-Ds:%d session-id %" PRIu64,
- src_ds_id, dest_ds_id, session_id);
+ "Sending COMMIT_CONFIG_REQ message for Src-DS:%s, Dst-DS:%s session-id %" PRIu64,
+ dsid2name(src_ds_id), dsid2name(dest_ds_id), session_id);
return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
@@ -254,9 +268,9 @@ int mgmt_fe_send_getcfg_req(struct mgmt_fe_client *client, uint64_t session_id,
fe_msg.getcfg_req = &getcfg_req;
MGMTD_FE_CLIENT_DBG(
- "Sending GET_CONFIG_REQ message for Ds:%d session-id %" PRIu64
+ "Sending GET_CONFIG_REQ message for DS:%s session-id %" PRIu64
" (#xpaths:%d)",
- ds_id, session_id, num_data_reqs);
+ dsid2name(ds_id), session_id, num_data_reqs);
return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
@@ -282,9 +296,9 @@ int mgmt_fe_send_getdata_req(struct mgmt_fe_client *client, uint64_t session_id,
fe_msg.getdata_req = &getdata_req;
MGMTD_FE_CLIENT_DBG(
- "Sending GET_CONFIG_REQ message for Ds:%d session-id %" PRIu64
+ "Sending GET_CONFIG_REQ message for DS:%s session-id %" PRIu64
" (#xpaths:%d)",
- ds_id, session_id, num_data_reqs);
+ dsid2name(ds_id), session_id, num_data_reqs);
return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
diff --git a/lib/mgmt_fe_client.h b/lib/mgmt_fe_client.h
index edf861746c..b0ac44bb3e 100644
--- a/lib/mgmt_fe_client.h
+++ b/lib/mgmt_fe_client.h
@@ -119,19 +119,18 @@ struct mgmt_fe_client_cbs {
extern struct debug mgmt_dbg_fe_client;
+/***************************************************************
+ * API prototypes
+ ***************************************************************/
+
#define MGMTD_FE_CLIENT_DBG(fmt, ...) \
- DEBUGD(&mgmt_dbg_fe_client, "FE-CLIENT: %s:" fmt, __func__, \
+ DEBUGD(&mgmt_dbg_fe_client, "FE-CLIENT: %s: " fmt, __func__, \
##__VA_ARGS__)
#define MGMTD_FE_CLIENT_ERR(fmt, ...) \
zlog_err("FE-CLIENT: %s: ERROR: " fmt, __func__, ##__VA_ARGS__)
#define MGMTD_DBG_FE_CLIENT_CHECK() \
DEBUG_MODE_CHECK(&mgmt_dbg_fe_client, DEBUG_MODE_ALL)
-
-/***************************************************************
- * API prototypes
- ***************************************************************/
-
/*
* Initialize library and try connecting with MGMTD FrontEnd interface.
*
diff --git a/lib/mgmt_msg.c b/lib/mgmt_msg.c
index 0d9802a2b3..ba69c20aba 100644
--- a/lib/mgmt_msg.c
+++ b/lib/mgmt_msg.c
@@ -59,11 +59,12 @@ enum mgmt_msg_rsched mgmt_msg_read(struct mgmt_msg_state *ms, int fd,
*/
while (avail > sizeof(struct mgmt_msg_hdr)) {
n = stream_read_try(ms->ins, fd, avail);
- MGMT_MSG_DBG(dbgtag, "got %zd bytes", n);
/* -2 is normal nothing read, and to retry */
- if (n == -2)
+ if (n == -2) {
+ MGMT_MSG_DBG(dbgtag, "nothing more to read");
break;
+ }
if (n <= 0) {
if (n == 0)
MGMT_MSG_ERR(ms, "got EOF/disconnect");
@@ -73,6 +74,7 @@ enum mgmt_msg_rsched mgmt_msg_read(struct mgmt_msg_state *ms, int fd,
safe_strerror(errno));
return MSR_DISCONNECT;
}
+ MGMT_MSG_DBG(dbgtag, "read %zd bytes", n);
ms->nrxb += n;
avail -= n;
}
diff --git a/lib/northbound.c b/lib/northbound.c
index 775f6ff92f..ef2344ee11 100644
--- a/lib/northbound.c
+++ b/lib/northbound.c
@@ -792,18 +792,19 @@ static void nb_update_candidate_changes(struct nb_config *candidate,
LYD_TREE_DFS_BEGIN (root, dnode) {
op = nb_lyd_diff_get_op(dnode);
switch (op) {
- case 'c':
+ case 'c': /* create */
nb_config_diff_created(dnode, seq, cfg_chgs);
LYD_TREE_DFS_continue = 1;
break;
- case 'd':
+ case 'd': /* delete */
nb_config_diff_deleted(dnode, seq, cfg_chgs);
LYD_TREE_DFS_continue = 1;
break;
- case 'r':
+ case 'r': /* replace */
nb_config_diff_add_change(cfg_chgs, NB_OP_MODIFY, seq,
dnode);
break;
+ case 'n': /* none */
default:
break;
}
diff --git a/lib/northbound_cli.c b/lib/northbound_cli.c
index e9c89d2029..9d6ec66689 100644
--- a/lib/northbound_cli.c
+++ b/lib/northbound_cli.c
@@ -202,7 +202,7 @@ int nb_cli_apply_changes(struct vty *vty, const char *xpath_base_fmt, ...)
return CMD_SUCCESS;
implicit_commit = vty_needs_implicit_commit(vty);
- ret = vty_mgmt_send_config_data(vty);
+ ret = vty_mgmt_send_config_data(vty, implicit_commit);
if (ret >= 0 && !implicit_commit)
vty->mgmt_num_pending_setcfg++;
return ret;
@@ -229,9 +229,16 @@ int nb_cli_apply_changes_clear_pending(struct vty *vty,
if (vty_mgmt_should_process_cli_apply_changes(vty)) {
VTY_CHECK_XPATH;
-
+ /*
+ * The legacy user wanted to clear pending (i.e., perform a
+ * commit immediately) due to some non-yang compatible
+ * functionality. This new mgmtd code however, continues to send
+ * changes putting off the commit until XFRR_end is received
+ * (i.e., end-of-config-file). This should be fine b/c all
+ * conversions to mgmtd require full proper implementations.
+ */
implicit_commit = vty_needs_implicit_commit(vty);
- ret = vty_mgmt_send_config_data(vty);
+ ret = vty_mgmt_send_config_data(vty, implicit_commit);
if (ret >= 0 && !implicit_commit)
vty->mgmt_num_pending_setcfg++;
return ret;
diff --git a/lib/vty.c b/lib/vty.c
index b701f3bc0f..fd00e11c5f 100644
--- a/lib/vty.c
+++ b/lib/vty.c
@@ -134,18 +134,22 @@ void vty_mgmt_resume_response(struct vty *vty, bool success)
uint8_t header[4] = {0, 0, 0, 0};
int ret = CMD_SUCCESS;
- if (!vty->mgmt_req_pending) {
+ if (!vty->mgmt_req_pending_cmd) {
zlog_err(
- "vty response called without setting mgmt_req_pending");
+ "vty resume response called without mgmt_req_pending_cmd");
return;
}
if (!success)
ret = CMD_WARNING_CONFIG_FAILED;
- vty->mgmt_req_pending = false;
+ MGMTD_FE_CLIENT_DBG(
+ "resuming CLI cmd after %s on vty session-id: %" PRIu64
+ " with '%s'",
+ vty->mgmt_req_pending_cmd, vty->mgmt_session_id,
+ success ? "succeeded" : "failed");
- MGMTD_FE_CLIENT_DBG("resuming: %s:", success ? "succeeded" : "failed");
+ vty->mgmt_req_pending_cmd = NULL;
if (vty->type != VTY_FILE) {
header[3] = ret;
@@ -2217,6 +2221,8 @@ bool mgmt_vty_read_configs(void)
line_num = 0;
(void)config_from_file(vty, confp, &line_num);
count++;
+
+ fclose(confp);
}
snprintf(path, sizeof(path), "%s/mgmtd.conf", frr_sysconfdir);
@@ -2240,6 +2246,8 @@ bool mgmt_vty_read_configs(void)
line_num = 0;
(void)config_from_file(vty, confp, &line_num);
count++;
+
+ fclose(confp);
}
vty->pending_allowed = false;
@@ -2270,6 +2278,19 @@ static void vtysh_read(struct event *thread)
sock = EVENT_FD(thread);
vty = EVENT_ARG(thread);
+ /*
+ * This code looks like it can read multiple commands from the `buf`
+ * value returned by read(); however, it cannot in some cases.
+ *
+ * There are multiple paths out of the "copying to vty->buf" loop, which
+ * lose any content not yet copied from the stack `buf`, `passfd`,
+ * `CMD_SUSPEND` and finally if a front-end for mgmtd (generally this
+ * would be mgmtd itself). So these code paths are counting on vtysh not
+ * sending us more than 1 command line before waiting on the reply to
+ * that command.
+ */
+ assert(vty->type == VTY_SHELL_SERV);
+
if ((nbytes = read(sock, buf, VTY_READ_BUFSIZ)) <= 0) {
if (nbytes < 0) {
if (ERRNO_IO_RETRY(errno)) {
@@ -2344,8 +2365,13 @@ static void vtysh_read(struct event *thread)
/* with new infra we need to stop response till
* we get response through callback.
*/
- if (vty->mgmt_req_pending)
+ if (vty->mgmt_req_pending_cmd) {
+ MGMTD_FE_CLIENT_DBG(
+ "postpone CLI cmd response pending mgmtd %s on vty session-id %" PRIu64,
+ vty->mgmt_req_pending_cmd,
+ vty->mgmt_session_id);
return;
+ }
/* warning: watchfrr hardcodes this result write
*/
@@ -2419,7 +2445,16 @@ void vty_close(struct vty *vty)
vty->status = VTY_CLOSE;
+ /*
+ * If we reach here with pending config to commit we will be losing it
+ * so warn the user.
+ */
+ if (vty->mgmt_num_pending_setcfg)
+ MGMTD_FE_CLIENT_ERR(
+ "vty closed, uncommitted config will be lost.");
+
if (mgmt_fe_client && vty->mgmt_session_id) {
+ MGMTD_FE_CLIENT_DBG("closing vty session");
mgmt_fe_destroy_client_session(mgmt_fe_client,
vty->mgmt_client_id);
vty->mgmt_session_id = 0;
@@ -3440,7 +3475,9 @@ static void vty_mgmt_session_notify(struct mgmt_fe_client *client,
vty->mgmt_session_id = session_id;
} else {
vty->mgmt_session_id = 0;
- vty_close(vty);
+ /* We may come here by way of vty_close() and short-circuits */
+ if (vty->status != VTY_CLOSE)
+ vty_close(vty);
}
}
@@ -3609,13 +3646,13 @@ int vty_mgmt_send_lockds_req(struct vty *vty, Mgmtd__DatastoreId ds_id,
return -1;
}
- vty->mgmt_req_pending = true;
+ vty->mgmt_req_pending_cmd = "MESSAGE_LOCKDS_REQ";
}
return 0;
}
-int vty_mgmt_send_config_data(struct vty *vty)
+int vty_mgmt_send_config_data(struct vty *vty, bool implicit_commit)
{
Mgmtd__YangDataValue value[VTY_MAXCFGCHANGES];
Mgmtd__YangData cfg_data[VTY_MAXCFGCHANGES];
@@ -3623,7 +3660,6 @@ int vty_mgmt_send_config_data(struct vty *vty)
Mgmtd__YangCfgDataReq *cfgreq[VTY_MAXCFGCHANGES] = {0};
size_t indx;
int cnt;
- bool implicit_commit = false;
if (vty->type == VTY_FILE) {
/*
@@ -3697,7 +3733,6 @@ int vty_mgmt_send_config_data(struct vty *vty)
}
vty->mgmt_req_id++;
- implicit_commit = vty_needs_implicit_commit(vty);
if (cnt && mgmt_fe_send_setcfg_req(
mgmt_fe_client, vty->mgmt_session_id,
vty->mgmt_req_id, MGMTD_DS_CANDIDATE, cfgreq,
@@ -3709,7 +3744,7 @@ int vty_mgmt_send_config_data(struct vty *vty)
return -1;
}
- vty->mgmt_req_pending = true;
+ vty->mgmt_req_pending_cmd = "MESSAGE_SETCFG_REQ";
}
return 0;
@@ -3729,7 +3764,7 @@ int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only, bool abort)
return -1;
}
- vty->mgmt_req_pending = true;
+ vty->mgmt_req_pending_cmd = "MESSAGE_COMMCFG_REQ";
vty->mgmt_num_pending_setcfg = 0;
}
@@ -3766,7 +3801,7 @@ int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore,
return -1;
}
- vty->mgmt_req_pending = true;
+ vty->mgmt_req_pending_cmd = "MESSAGE_GETCFG_REQ";
return 0;
}
@@ -3800,7 +3835,7 @@ int vty_mgmt_send_get_data(struct vty *vty, Mgmtd__DatastoreId datastore,
return -1;
}
- vty->mgmt_req_pending = true;
+ vty->mgmt_req_pending_cmd = "MESSAGE_GETDATA_REQ";
return 0;
}
diff --git a/lib/vty.h b/lib/vty.h
index 28f27d0d47..3b651d20a2 100644
--- a/lib/vty.h
+++ b/lib/vty.h
@@ -147,7 +147,6 @@ struct vty {
/* Dynamic transaction information. */
bool pending_allowed;
bool pending_commit;
- bool no_implicit_commit;
char *pending_cmds_buf;
size_t pending_cmds_buflen;
size_t pending_cmds_bufpos;
@@ -229,7 +228,7 @@ struct vty {
/* set when we have sent mgmtd a *REQ command in response to some vty
* CLI command and we are waiting on the reply so we can respond to the
* vty user. */
- bool mgmt_req_pending;
+ const char *mgmt_req_pending_cmd;
bool mgmt_locked_candidate_ds;
};
@@ -408,7 +407,7 @@ extern bool vty_mgmt_fe_enabled(void);
extern bool vty_mgmt_should_process_cli_apply_changes(struct vty *vty);
extern bool mgmt_vty_read_configs(void);
-extern int vty_mgmt_send_config_data(struct vty *vty);
+extern int vty_mgmt_send_config_data(struct vty *vty, bool implicit_commit);
extern int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only,
bool abort);
extern int vty_mgmt_send_get_config(struct vty *vty,
@@ -422,11 +421,7 @@ extern void vty_mgmt_resume_response(struct vty *vty, bool success);
static inline bool vty_needs_implicit_commit(struct vty *vty)
{
- return (frr_get_cli_mode() == FRR_CLI_CLASSIC
- ? ((vty->pending_allowed || vty->no_implicit_commit)
- ? false
- : true)
- : false);
+ return frr_get_cli_mode() == FRR_CLI_CLASSIC && !vty->pending_allowed;
}
#ifdef __cplusplus
diff --git a/mgmtd/mgmt_be_adapter.c b/mgmtd/mgmt_be_adapter.c
index 2d01f8ecad..49a307e9c2 100644
--- a/mgmtd/mgmt_be_adapter.c
+++ b/mgmtd/mgmt_be_adapter.c
@@ -20,7 +20,7 @@
#include "mgmtd/mgmt_be_adapter.h"
#define MGMTD_BE_ADAPTER_DBG(fmt, ...) \
- DEBUGD(&mgmt_debug_be, "BE-ADAPTER: %s:" fmt, __func__, ##__VA_ARGS__)
+ DEBUGD(&mgmt_debug_be, "BE-ADAPTER: %s: " fmt, __func__, ##__VA_ARGS__)
#define MGMTD_BE_ADAPTER_ERR(fmt, ...) \
zlog_err("BE-ADAPTER: %s: ERROR: " fmt, __func__, ##__VA_ARGS__)
@@ -564,8 +564,8 @@ mgmt_be_adapter_handle_msg(struct mgmt_be_client_adapter *adapter,
return 0;
}
-static int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id, bool create)
+int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, bool create)
{
Mgmtd__BeMessage be_msg;
Mgmtd__BeTxnReq txn_req;
@@ -584,11 +584,10 @@ static int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter,
return mgmt_be_adapter_send_msg(adapter, &be_msg);
}
-static int
-mgmt_be_send_cfgdata_create_req(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id, uint64_t batch_id,
- Mgmtd__YangCfgDataReq **cfgdata_reqs,
- size_t num_reqs, bool end_of_data)
+int mgmt_be_send_cfgdata_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, uint64_t batch_id,
+ Mgmtd__YangCfgDataReq **cfgdata_reqs,
+ size_t num_reqs, bool end_of_data)
{
Mgmtd__BeMessage be_msg;
Mgmtd__BeCfgDataCreateReq cfgdata_req;
@@ -612,8 +611,8 @@ mgmt_be_send_cfgdata_create_req(struct mgmt_be_client_adapter *adapter,
return mgmt_be_adapter_send_msg(adapter, &be_msg);
}
-static int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id)
+int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id)
{
Mgmtd__BeMessage be_msg;
Mgmtd__BeCfgDataApplyReq apply_req;
@@ -834,35 +833,6 @@ int mgmt_be_get_adapter_config(struct mgmt_be_client_adapter *adapter,
return 0;
}
-int mgmt_be_create_txn(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id)
-{
- return mgmt_be_send_txn_req(adapter, txn_id, true);
-}
-
-int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id)
-{
- return mgmt_be_send_txn_req(adapter, txn_id, false);
-}
-
-int mgmt_be_send_cfg_data_create_req(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id, uint64_t batch_id,
- struct mgmt_be_cfgreq *cfg_req,
- bool end_of_data)
-{
- return mgmt_be_send_cfgdata_create_req(
- adapter, txn_id, batch_id, cfg_req->cfgdata_reqs,
- cfg_req->num_reqs, end_of_data);
-}
-
-extern int
-mgmt_be_send_cfg_apply_req(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id)
-{
- return mgmt_be_send_cfgapply_req(adapter, txn_id);
-}
-
void mgmt_be_get_subscr_info_for_xpath(
const char *xpath, struct mgmt_be_client_subscr_info *subscr_info)
{
diff --git a/mgmtd/mgmt_be_adapter.h b/mgmtd/mgmt_be_adapter.h
index 8f4eef5fb3..e1676e63af 100644
--- a/mgmtd/mgmt_be_adapter.h
+++ b/mgmtd/mgmt_be_adapter.h
@@ -115,13 +115,9 @@ mgmt_be_get_adapter_config(struct mgmt_be_client_adapter *adapter,
struct mgmt_ds_ctx *ds_ctx,
struct nb_config_cbs **cfg_chgs);
-/* Create a transaction. */
-extern int mgmt_be_create_txn(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id);
-
-/* Destroy a transaction. */
-extern int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id);
+/* Create/destroy a transaction. */
+extern int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, bool create);
/*
* Send config data create request to backend client.
@@ -135,8 +131,11 @@ extern int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter,
* batch_id
* Request batch ID.
*
- * cfg_req
- * Config data request.
+ * cfgdata_reqs
+ * An array of pointer to Mgmtd__YangCfgDataReq.
+ *
+ * num_reqs
+ * Length of the cfgdata_reqs array.
*
* end_of_data
* TRUE if the data from last batch, FALSE otherwise.
@@ -144,37 +143,15 @@ extern int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter,
* Returns:
* 0 on success, -1 on failure.
*/
-extern int mgmt_be_send_cfg_data_create_req(
- struct mgmt_be_client_adapter *adapter, uint64_t txn_id,
- uint64_t batch_id, struct mgmt_be_cfgreq *cfg_req, bool end_of_data);
-
-/*
- * Send config validate request to backend client.
- *
- * adaptr
- * Backend adapter information.
- *
- * txn_id
- * Unique transaction identifier.
- *
- * batch_ids
- * List of request batch IDs.
- *
- * num_batch_ids
- * Number of batch ids.
- *
- * Returns:
- * 0 on success, -1 on failure.
- */
-extern int
-mgmt_be_send_cfg_validate_req(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id, uint64_t batch_ids[],
- size_t num_batch_ids);
+extern int mgmt_be_send_cfgdata_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, uint64_t batch_id,
+ Mgmtd__YangCfgDataReq **cfgdata_reqs,
+ size_t num_reqs, bool end_of_data);
/*
* Send config apply request to backend client.
*
- * adaptr
+ * adapter
* Backend adapter information.
*
* txn_id
@@ -183,9 +160,8 @@ mgmt_be_send_cfg_validate_req(struct mgmt_be_client_adapter *adapter,
* Returns:
* 0 on success, -1 on failure.
*/
-extern int
-mgmt_be_send_cfg_apply_req(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id);
+extern int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id);
/*
* Dump backend adapter status to vty.
diff --git a/mgmtd/mgmt_ds.c b/mgmtd/mgmt_ds.c
index 3fd47862b2..5a4b00d309 100644
--- a/mgmtd/mgmt_ds.c
+++ b/mgmtd/mgmt_ds.c
@@ -16,7 +16,7 @@
#include "libyang/libyang.h"
#define MGMTD_DS_DBG(fmt, ...) \
- DEBUGD(&mgmt_debug_ds, "%s:" fmt, __func__, ##__VA_ARGS__)
+ DEBUGD(&mgmt_debug_ds, "DS: %s: " fmt, __func__, ##__VA_ARGS__)
#define MGMTD_DS_ERR(fmt, ...) \
zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
@@ -80,7 +80,9 @@ static int mgmt_ds_replace_dst_with_src_ds(struct mgmt_ds_ctx *src,
if (!src || !dst)
return -1;
- MGMTD_DS_DBG("Replacing %d with %d", dst->ds_id, src->ds_id);
+
+ MGMTD_DS_DBG("Replacing %s with %s", mgmt_ds_id2name(dst->ds_id),
+ mgmt_ds_id2name(src->ds_id));
src_dnode = src->config_ds ? src->root.cfg_root->dnode
: dst->root.dnode_root;
diff --git a/mgmtd/mgmt_fe_adapter.c b/mgmtd/mgmt_fe_adapter.c
index 7509d24a6a..e9cbd444e8 100644
--- a/mgmtd/mgmt_fe_adapter.c
+++ b/mgmtd/mgmt_fe_adapter.c
@@ -17,11 +17,12 @@
#include "hash.h"
#include "jhash.h"
#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_ds.h"
#include "mgmtd/mgmt_memory.h"
#include "mgmtd/mgmt_fe_adapter.h"
#define MGMTD_FE_ADAPTER_DBG(fmt, ...) \
- DEBUGD(&mgmt_debug_fe, "FE-ADAPTER: %s:" fmt, __func__, ##__VA_ARGS__)
+ DEBUGD(&mgmt_debug_fe, "FE-ADAPTER: %s: " fmt, __func__, ##__VA_ARGS__)
#define MGMTD_FE_ADAPTER_ERR(fmt, ...) \
zlog_err("FE-ADAPTER: %s: ERROR: " fmt, __func__, ##__VA_ARGS__)
@@ -74,18 +75,19 @@ mgmt_fe_session_write_lock_ds(Mgmtd__DatastoreId ds_id,
if (!session->ds_write_locked[ds_id]) {
if (mgmt_ds_write_lock(ds_ctx) != 0) {
MGMTD_FE_ADAPTER_DBG(
- "Failed to lock the DS %u for session-id: %" PRIu64
+ "Failed to lock the DS:%s for session-id: %" PRIu64
" from %s!",
- ds_id, session->session_id,
+ mgmt_ds_id2name(ds_id), session->session_id,
session->adapter->name);
return -1;
}
session->ds_write_locked[ds_id] = true;
MGMTD_FE_ADAPTER_DBG(
- "Write-Locked the DS %u for session-id: %" PRIu64
+ "Write-Locked the DS:%s for session-id: %" PRIu64
" from %s",
- ds_id, session->session_id, session->adapter->name);
+ mgmt_ds_id2name(ds_id), session->session_id,
+ session->adapter->name);
}
return 0;
@@ -99,18 +101,19 @@ mgmt_fe_session_read_lock_ds(Mgmtd__DatastoreId ds_id,
if (!session->ds_read_locked[ds_id]) {
if (mgmt_ds_read_lock(ds_ctx) != 0) {
MGMTD_FE_ADAPTER_DBG(
- "Failed to lock the DS %u for session-is: %" PRIu64
+ "Failed to lock the DS:%s for session-is: %" PRIu64
" from %s",
- ds_id, session->session_id,
+ mgmt_ds_id2name(ds_id), session->session_id,
session->adapter->name);
return -1;
}
session->ds_read_locked[ds_id] = true;
MGMTD_FE_ADAPTER_DBG(
- "Read-Locked the DS %u for session-id: %" PRIu64
+ "Read-Locked the DS:%s for session-id: %" PRIu64
" from %s",
- ds_id, session->session_id, session->adapter->name);
+ mgmt_ds_id2name(ds_id), session->session_id,
+ session->adapter->name);
}
return 0;
@@ -126,33 +129,35 @@ static int mgmt_fe_session_unlock_ds(Mgmtd__DatastoreId ds_id,
session->ds_locked_implict[ds_id] = false;
if (mgmt_ds_unlock(ds_ctx) != 0) {
MGMTD_FE_ADAPTER_DBG(
- "Failed to unlock the DS %u taken earlier by session-id: %" PRIu64
+ "Failed to unlock the DS:%s taken earlier by session-id: %" PRIu64
" from %s",
- ds_id, session->session_id,
+ mgmt_ds_id2name(ds_id), session->session_id,
session->adapter->name);
return -1;
}
MGMTD_FE_ADAPTER_DBG(
- "Unlocked DS %u write-locked earlier by session-id: %" PRIu64
+ "Unlocked DS:%s write-locked earlier by session-id: %" PRIu64
" from %s",
- ds_id, session->session_id, session->adapter->name);
+ mgmt_ds_id2name(ds_id), session->session_id,
+ session->adapter->name);
} else if (unlock_read && session->ds_read_locked[ds_id]) {
session->ds_read_locked[ds_id] = false;
session->ds_locked_implict[ds_id] = false;
if (mgmt_ds_unlock(ds_ctx) != 0) {
MGMTD_FE_ADAPTER_DBG(
- "Failed to unlock the DS %u taken earlier by session-id: %" PRIu64
+ "Failed to unlock the DS:%s taken earlier by session-id: %" PRIu64
" from %s",
- ds_id, session->session_id,
+ mgmt_ds_id2name(ds_id), session->session_id,
session->adapter->name);
return -1;
}
MGMTD_FE_ADAPTER_DBG(
- "Unlocked DS %u read-locked earlier by session-id: %" PRIu64
+ "Unlocked DS:%s read-locked earlier by session-id: %" PRIu64
" from %s",
- ds_id, session->session_id, session->adapter->name);
+ mgmt_ds_id2name(ds_id), session->session_id,
+ session->adapter->name);
}
return 0;
@@ -1148,7 +1153,7 @@ static int mgmt_fe_session_handle_commit_config_req_msg(
}
/*
- * Next check first if the COMMCFG_REQ is for Candidate DS
+ * Next check first if the COMMCFG_REQ is for running DS
* or not. Report failure if its not. MGMTD currently only
* supports editing the Candidate DS.
*/
@@ -1278,10 +1283,10 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
session = mgmt_session_id2ctx(
fe_msg->lockds_req->session_id);
MGMTD_FE_ADAPTER_DBG(
- "Got %sLOCKDS_REQ for DS:%d for session-id %" PRIu64
+ "Got LOCKDS_REQ (%sLOCK) for DS:%s for session-id %" PRIu64
" from '%s'",
fe_msg->lockds_req->lock ? "" : "UN",
- fe_msg->lockds_req->ds_id,
+ mgmt_ds_id2name(fe_msg->lockds_req->ds_id),
fe_msg->lockds_req->session_id, adapter->name);
mgmt_fe_session_handle_lockds_req_msg(
session, fe_msg->lockds_req);
@@ -1291,11 +1296,11 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
fe_msg->setcfg_req->session_id);
session->adapter->setcfg_stats.set_cfg_count++;
MGMTD_FE_ADAPTER_DBG(
- "Got SETCFG_REQ (%d Xpaths, Implicit:%c) on DS:%d for session-id %" PRIu64
+ "Got SETCFG_REQ (%d Xpaths, Implicit:%c) on DS:%s for session-id %" PRIu64
" from '%s'",
(int)fe_msg->setcfg_req->n_data,
fe_msg->setcfg_req->implicit_commit ? 'T' : 'F',
- fe_msg->setcfg_req->ds_id,
+ mgmt_ds_id2name(fe_msg->setcfg_req->ds_id),
fe_msg->setcfg_req->session_id, adapter->name);
mgmt_fe_session_handle_setcfg_req_msg(
@@ -1305,10 +1310,10 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
session = mgmt_session_id2ctx(
fe_msg->commcfg_req->session_id);
MGMTD_FE_ADAPTER_DBG(
- "Got COMMCFG_REQ for src-DS:%d dst-DS:%d (Abort:%c) on session-id %" PRIu64
+ "Got COMMCFG_REQ for src-DS:%s dst-DS:%s (Abort:%c) on session-id %" PRIu64
" from '%s'",
- fe_msg->commcfg_req->src_ds_id,
- fe_msg->commcfg_req->dst_ds_id,
+ mgmt_ds_id2name(fe_msg->commcfg_req->src_ds_id),
+ mgmt_ds_id2name(fe_msg->commcfg_req->dst_ds_id),
fe_msg->commcfg_req->abort ? 'T' : 'F',
fe_msg->commcfg_req->session_id, adapter->name);
mgmt_fe_session_handle_commit_config_req_msg(
@@ -1318,9 +1323,9 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
session = mgmt_session_id2ctx(
fe_msg->getcfg_req->session_id);
MGMTD_FE_ADAPTER_DBG(
- "Got GETCFG_REQ for DS:%d (xpaths: %d) on session-id %" PRIu64
+ "Got GETCFG_REQ for DS:%s (xpaths: %d) on session-id %" PRIu64
" from '%s'",
- fe_msg->getcfg_req->ds_id,
+ mgmt_ds_id2name(fe_msg->getcfg_req->ds_id),
(int)fe_msg->getcfg_req->n_data,
fe_msg->getcfg_req->session_id, adapter->name);
mgmt_fe_session_handle_getcfg_req_msg(
@@ -1330,9 +1335,9 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
session = mgmt_session_id2ctx(
fe_msg->getdata_req->session_id);
MGMTD_FE_ADAPTER_DBG(
- "Got GETDATA_REQ for DS:%d (xpaths: %d) on session-id %" PRIu64
+ "Got GETDATA_REQ for DS:%s (xpaths: %d) on session-id %" PRIu64
" from '%s'",
- fe_msg->getdata_req->ds_id,
+ mgmt_ds_id2name(fe_msg->getdata_req->ds_id),
(int)fe_msg->getdata_req->n_data,
fe_msg->getdata_req->session_id, adapter->name);
mgmt_fe_session_handle_getdata_req_msg(
diff --git a/mgmtd/mgmt_history.c b/mgmtd/mgmt_history.c
index ab84b1efcf..54eb45fdf4 100644
--- a/mgmtd/mgmt_history.c
+++ b/mgmtd/mgmt_history.c
@@ -248,7 +248,7 @@ static int mgmt_history_rollback_to_cmt(struct vty *vty,
* is completed. On rollback completion mgmt_history_rollback_complete()
* shall be called to resume the rollback command return to VTYSH.
*/
- vty->mgmt_req_pending = true;
+ vty->mgmt_req_pending_cmd = "ROLLBACK";
rollback_vty = vty;
return 0;
}
diff --git a/mgmtd/mgmt_history.h b/mgmtd/mgmt_history.h
index d3f7958952..5d9b662694 100644
--- a/mgmtd/mgmt_history.h
+++ b/mgmtd/mgmt_history.h
@@ -74,9 +74,11 @@ mgmt_time_to_string(struct timespec *tv, bool long_fmt, char *buffer, size_t sz)
if (long_fmt) {
n = strftime(buffer, sz, MGMT_LONG_TIME_FMT, &tm);
+ assert(n < sz);
snprintf(&buffer[n], sz - n, ",%09lu", tv->tv_nsec);
} else {
n = strftime(buffer, sz, MGMT_SHORT_TIME_FMT, &tm);
+ assert(n < sz);
snprintf(&buffer[n], sz - n, "%09lu", tv->tv_nsec);
}
diff --git a/mgmtd/mgmt_txn.c b/mgmtd/mgmt_txn.c
index 3d818cb4c2..e64cbe1425 100644
--- a/mgmtd/mgmt_txn.c
+++ b/mgmtd/mgmt_txn.c
@@ -15,7 +15,7 @@
#include "mgmtd/mgmt_txn.h"
#define MGMTD_TXN_DBG(fmt, ...) \
- DEBUGD(&mgmt_debug_txn, "%s:" fmt, __func__, ##__VA_ARGS__)
+ DEBUGD(&mgmt_debug_txn, "TXN: %s: " fmt, __func__, ##__VA_ARGS__)
#define MGMTD_TXN_ERR(fmt, ...) \
zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
@@ -494,6 +494,8 @@ static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
struct mgmt_txn_reqs_head *pending_list = NULL;
enum mgmt_be_client_id id;
struct mgmt_be_client_adapter *adapter;
+ struct mgmt_commit_cfg_req *ccreq;
+ bool cleanup;
switch ((*txn_req)->req_event) {
case MGMTD_TXN_PROC_SETCFG:
@@ -526,32 +528,38 @@ static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
MGMTD_TXN_DBG("Deleting COMMITCFG req-id: %" PRIu64
" txn-id: %" PRIu64,
(*txn_req)->req_id, (*txn_req)->txn->txn_id);
+
+ ccreq = &(*txn_req)->req.commit_cfg;
+ cleanup = (ccreq->curr_phase >= MGMTD_COMMIT_PHASE_TXN_CREATE &&
+ ccreq->curr_phase < MGMTD_COMMIT_PHASE_TXN_DELETE);
+
FOREACH_MGMTD_BE_CLIENT_ID (id) {
/*
* Send TXN_DELETE to cleanup state for this
* transaction on backend
*/
- if ((*txn_req)->req.commit_cfg.curr_phase >=
- MGMTD_COMMIT_PHASE_TXN_CREATE &&
- (*txn_req)->req.commit_cfg.curr_phase <
- MGMTD_COMMIT_PHASE_TXN_DELETE &&
- (*txn_req)
- ->req.commit_cfg.subscr_info
- .xpath_subscr[id]) {
- adapter = mgmt_be_get_adapter_by_id(id);
- if (adapter)
- mgmt_txn_send_be_txn_delete(
- (*txn_req)->txn, adapter);
- }
- mgmt_txn_cleanup_be_cfg_batches((*txn_req)->txn,
- id);
- if ((*txn_req)->req.commit_cfg.batches) {
- hash_clean((*txn_req)->req.commit_cfg.batches,
+ /*
+ * Get rid of the batches first so we don't end up doing
+ * anything more with them
+ */
+ mgmt_txn_cleanup_be_cfg_batches((*txn_req)->txn, id);
+ if (ccreq->batches) {
+ hash_clean(ccreq->batches,
mgmt_txn_cfgbatch_hash_free);
- hash_free((*txn_req)->req.commit_cfg.batches);
- (*txn_req)->req.commit_cfg.batches = NULL;
+ hash_free(ccreq->batches);
+ ccreq->batches = NULL;
}
+
+ /*
+ * If we were in the middle of the state machine then
+ * send a txn delete message
+ */
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (adapter && cleanup &&
+ ccreq->subscr_info.xpath_subscr[id])
+ mgmt_txn_send_be_txn_delete((*txn_req)->txn,
+ adapter);
}
break;
case MGMTD_TXN_PROC_GETCFG:
@@ -635,7 +643,6 @@ static void mgmt_txn_process_set_cfg(struct event *thread)
txn->session_id);
FOREACH_TXN_REQ_IN_LIST (&txn->set_cfg_reqs, txn_req) {
- error = false;
assert(txn_req->req_event == MGMTD_TXN_PROC_SETCFG);
ds_ctx = txn_req->req.set_cfg->ds_ctx;
if (!ds_ctx) {
@@ -644,7 +651,6 @@ static void mgmt_txn_process_set_cfg(struct event *thread)
txn_req->req.set_cfg->ds_id, txn_req->req_id,
MGMTD_INTERNAL_ERROR, "No such datastore!",
txn_req->req.set_cfg->implicit_commit);
- error = true;
goto mgmt_txn_process_set_cfg_done;
}
@@ -656,7 +662,6 @@ static void mgmt_txn_process_set_cfg(struct event *thread)
MGMTD_INTERNAL_ERROR,
"Unable to retrieve DS Config Tree!",
txn_req->req.set_cfg->implicit_commit);
- error = true;
goto mgmt_txn_process_set_cfg_done;
}
@@ -713,7 +718,6 @@ static void mgmt_txn_process_set_cfg(struct event *thread)
"Failed to send SET_CONFIG_REPLY txn-id %" PRIu64
" session-id: %" PRIu64,
txn->txn_id, txn->session_id);
- error = true;
}
mgmt_txn_process_set_cfg_done:
@@ -1122,7 +1126,6 @@ static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
}
free(xpath);
- xpath = NULL;
}
cmtcfg_req->cmt_stats->last_batch_cnt = num_chgs;
@@ -1338,8 +1341,7 @@ static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn)
FOREACH_MGMTD_BE_CLIENT_ID (id) {
if (cmtcfg_req->subscr_info.xpath_subscr[id]) {
adapter = mgmt_be_get_adapter_by_id(id);
- if (mgmt_be_create_txn(adapter, txn->txn_id)
- != 0) {
+ if (mgmt_be_send_txn_req(adapter, txn->txn_id, true)) {
(void)mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_INTERNAL_ERROR,
"Could not send TXN_CREATE to backend adapter");
@@ -1372,9 +1374,8 @@ static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn)
return 0;
}
-static int
-mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
- struct mgmt_be_client_adapter *adapter)
+static int mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter)
{
struct mgmt_commit_cfg_req *cmtcfg_req;
struct mgmt_txn_be_cfg_batch *cfg_btch;
@@ -1396,10 +1397,10 @@ mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
cfg_req.cfgdata_reqs = cfg_btch->cfg_datap;
cfg_req.num_reqs = cfg_btch->num_cfg_data;
indx++;
- if (mgmt_be_send_cfg_data_create_req(
- adapter, txn->txn_id, cfg_btch->batch_id, &cfg_req,
- indx == num_batches ? true : false)
- != 0) {
+ if (mgmt_be_send_cfgdata_req(
+ adapter, txn->txn_id, cfg_btch->batch_id,
+ cfg_req.cfgdata_reqs, cfg_req.num_reqs,
+ indx == num_batches ? true : false)) {
(void)mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_INTERNAL_ERROR,
"Internal Error! Could not send config data to backend!");
@@ -1419,7 +1420,7 @@ mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
}
/*
- * This could ne the last Backend Client to send CFGDATA_CREATE_REQ to.
+ * This could be the last Backend Client to send CFGDATA_CREATE_REQ to.
* Try moving the commit to next phase.
*/
mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
@@ -1431,24 +1432,16 @@ static int
mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
struct mgmt_be_client_adapter *adapter)
{
- struct mgmt_commit_cfg_req *cmtcfg_req;
- struct mgmt_txn_be_cfg_batch *cfg_btch;
+ struct mgmt_commit_cfg_req *cmtcfg_req =
+ &txn->commit_cfg_req->req.commit_cfg;
- assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
+ assert(txn->type == MGMTD_TXN_TYPE_CONFIG);
+ assert(!mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id]));
- cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
- if (cmtcfg_req->subscr_info.xpath_subscr[adapter->id]) {
- adapter = mgmt_be_get_adapter_by_id(adapter->id);
- (void)mgmt_be_destroy_txn(adapter, txn->txn_id);
-
- FOREACH_TXN_CFG_BATCH_IN_LIST (
- &txn->commit_cfg_req->req.commit_cfg
- .curr_batches[adapter->id],
- cfg_btch)
- cfg_btch->comm_phase = MGMTD_COMMIT_PHASE_TXN_DELETE;
- }
+ if (!cmtcfg_req->subscr_info.xpath_subscr[adapter->id])
+ return 0;
- return 0;
+ return mgmt_be_send_txn_req(adapter, txn->txn_id, false);
}
static void mgmt_txn_cfg_commit_timedout(struct event *thread)
@@ -1512,8 +1505,7 @@ static int mgmt_txn_send_be_cfg_apply(struct mgmt_txn_ctx *txn)
return -1;
btch_list = &cmtcfg_req->curr_batches[id];
- if (mgmt_be_send_cfg_apply_req(adapter, txn->txn_id)
- != 0) {
+ if (mgmt_be_send_cfgapply_req(adapter, txn->txn_id)) {
(void)mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_INTERNAL_ERROR,
"Could not send CFG_APPLY_REQ to backend adapter");
@@ -2261,11 +2253,6 @@ uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type)
return txn ? txn->txn_id : MGMTD_TXN_ID_NONE;
}
-bool mgmt_txn_id_is_valid(uint64_t txn_id)
-{
- return mgmt_txn_id2ctx(txn_id) ? true : false;
-}
-
void mgmt_destroy_txn(uint64_t *txn_id)
{
struct mgmt_txn_ctx *txn;
@@ -2278,17 +2265,6 @@ void mgmt_destroy_txn(uint64_t *txn_id)
*txn_id = MGMTD_TXN_ID_NONE;
}
-enum mgmt_txn_type mgmt_get_txn_type(uint64_t txn_id)
-{
- struct mgmt_txn_ctx *txn;
-
- txn = mgmt_txn_id2ctx(txn_id);
- if (!txn)
- return MGMTD_TXN_TYPE_NONE;
-
- return txn->type;
-}
-
int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
Mgmtd__DatastoreId ds_id,
struct mgmt_ds_ctx *ds_ctx,
@@ -2542,7 +2518,7 @@ int mgmt_txn_notify_be_cfgdata_reply(
{
struct mgmt_txn_ctx *txn;
struct mgmt_txn_be_cfg_batch *cfg_btch;
- struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
txn = mgmt_txn_id2ctx(txn_id);
if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
@@ -2642,26 +2618,6 @@ int mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
return 0;
}
-int mgmt_txn_send_commit_config_reply(uint64_t txn_id,
- enum mgmt_result result,
- const char *error_if_any)
-{
- struct mgmt_txn_ctx *txn;
-
- txn = mgmt_txn_id2ctx(txn_id);
- if (!txn)
- return -1;
-
- if (!txn->commit_cfg_req) {
- MGMTD_TXN_ERR("NO commit in-progress txn-id: %" PRIu64
- " session-id: %" PRIu64,
- txn->txn_id, txn->session_id);
- return -1;
- }
-
- return mgmt_txn_send_commit_cfg_reply(txn, result, error_if_any);
-}
-
int mgmt_txn_send_get_config_req(uint64_t txn_id, uint64_t req_id,
Mgmtd__DatastoreId ds_id,
struct mgmt_ds_ctx *ds_ctx,
diff --git a/mgmtd/mgmt_txn.h b/mgmtd/mgmt_txn.h
index be781ab954..1a9f6d8502 100644
--- a/mgmtd/mgmt_txn.h
+++ b/mgmtd/mgmt_txn.h
@@ -101,16 +101,6 @@ extern uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type);
extern void mgmt_destroy_txn(uint64_t *txn_id);
/*
- * Check if transaction is valid given an ID.
- */
-extern bool mgmt_txn_id_is_valid(uint64_t txn_id);
-
-/*
- * Returns the type of transaction given an ID.
- */
-extern enum mgmt_txn_type mgmt_get_txn_type(uint64_t txn_id);
-
-/*
* Send set-config request to be processed later in transaction.
*
* txn_id
@@ -186,10 +176,6 @@ extern int mgmt_txn_send_commit_config_req(uint64_t txn_id, uint64_t req_id,
bool validate_only, bool abort,
bool implicit);
-extern int mgmt_txn_send_commit_config_reply(uint64_t txn_id,
- enum mgmt_result result,
- const char *error_if_any);
-
/*
* Send get-config request to be processed later in transaction.
*
diff --git a/mgmtd/mgmt_vty.c b/mgmtd/mgmt_vty.c
index 93c5145d71..6a6f32353d 100644
--- a/mgmtd/mgmt_vty.c
+++ b/mgmtd/mgmt_vty.c
@@ -157,9 +157,7 @@ DEFPY(mgmt_set_config_data, mgmt_set_config_data_cmd,
vty->cfg_changes[0].operation = NB_OP_CREATE;
vty->num_cfg_changes = 1;
- vty->no_implicit_commit = true;
- vty_mgmt_send_config_data(vty);
- vty->no_implicit_commit = false;
+ vty_mgmt_send_config_data(vty, false);
return CMD_SUCCESS;
}
@@ -176,9 +174,7 @@ DEFPY(mgmt_delete_config_data, mgmt_delete_config_data_cmd,
vty->cfg_changes[0].operation = NB_OP_DESTROY;
vty->num_cfg_changes = 1;
- vty->no_implicit_commit = true;
- vty_mgmt_send_config_data(vty);
- vty->no_implicit_commit = false;
+ vty_mgmt_send_config_data(vty, false);
return CMD_SUCCESS;
}
diff --git a/ospfd/ospf_interface.c b/ospfd/ospf_interface.c
index 2c66cb3cfc..9e6acdbf0d 100644
--- a/ospfd/ospf_interface.c
+++ b/ospfd/ospf_interface.c
@@ -1346,18 +1346,28 @@ static int ospf_ifp_create(struct interface *ifp)
if (IS_DEBUG_OSPF(zebra, ZEBRA_INTERFACE))
zlog_debug(
- "Zebra: interface add %s vrf %s[%u] index %d flags %llx metric %d mtu %d speed %u",
+ "Zebra: interface add %s vrf %s[%u] index %d flags %llx metric %d mtu %d speed %u status 0x%x",
ifp->name, ifp->vrf->name, ifp->vrf->vrf_id,
ifp->ifindex, (unsigned long long)ifp->flags,
- ifp->metric, ifp->mtu, ifp->speed);
+ ifp->metric, ifp->mtu, ifp->speed, ifp->status);
assert(ifp->info);
oii = ifp->info;
oii->curr_mtu = ifp->mtu;
- if (IF_DEF_PARAMS(ifp)
- && !OSPF_IF_PARAM_CONFIGURED(IF_DEF_PARAMS(ifp), type)) {
+ /* Change ospf type param based on following
+ * condition:
+ * ospf type params is not set (first creation),
+ * OR ospf param type is changed based on
+ * link event, currently only handle for
+ * loopback interface type, for other ospf interface,
+ * type can be set from user config which needs to be
+ * preserved.
+ */
+ if (IF_DEF_PARAMS(ifp) &&
+ (!OSPF_IF_PARAM_CONFIGURED(IF_DEF_PARAMS(ifp), type) ||
+ if_is_loopback(ifp))) {
SET_IF_PARAM(IF_DEF_PARAMS(ifp), type);
IF_DEF_PARAMS(ifp)->type = ospf_default_iftype(ifp);
}
diff --git a/ospfd/ospf_route.c b/ospfd/ospf_route.c
index 75868056ad..cdb1eb0095 100644
--- a/ospfd/ospf_route.c
+++ b/ospfd/ospf_route.c
@@ -684,6 +684,8 @@ void ospf_intra_add_stub(struct route_table *rt, struct router_lsa_link *link,
__func__);
}
}
+ if (rn->info)
+ ospf_route_free(rn->info);
rn->info = or ;
diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c
index 0ee42e0e70..ff17b147e4 100644
--- a/ospfd/ospf_vty.c
+++ b/ospfd/ospf_vty.c
@@ -3621,6 +3621,8 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf,
struct route_node *rn;
uint32_t bandwidth = ifp->bandwidth ? ifp->bandwidth : ifp->speed;
struct ospf_if_params *params;
+ json_object *json_ois = NULL;
+ json_object *json_oi = NULL;
/* Is interface up? */
if (use_json) {
@@ -3671,17 +3673,32 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf,
}
}
+ if (use_json) {
+ json_ois = json_object_new_object();
+ json_object_object_add(json_interface_sub, "interfaceIp",
+ json_ois);
+ }
+
for (rn = route_top(IF_OIFS(ifp)); rn; rn = route_next(rn)) {
struct ospf_interface *oi = rn->info;
if (oi == NULL)
continue;
+#if CONFDATE > 20240601
+ CPP_NOTICE(
+ "Use all fields following ospfEnabled from interfaceIp hierarchy")
+#endif
+
+ json_oi = json_object_new_object();
+
if (CHECK_FLAG(oi->connected->flags, ZEBRA_IFA_UNNUMBERED)) {
- if (use_json)
+ if (use_json) {
json_object_boolean_true_add(json_interface_sub,
"ifUnnumbered");
- else
+ json_object_boolean_true_add(json_oi,
+ "ifUnnumbered");
+ } else
vty_out(vty, " This interface is UNNUMBERED,");
} else {
struct in_addr dest;
@@ -3695,6 +3712,13 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf,
json_object_int_add(json_interface_sub,
"ipAddressPrefixlen",
oi->address->prefixlen);
+
+ json_object_string_addf(
+ json_oi, "ipAddress", "%pI4",
+ &oi->address->u.prefix4);
+ json_object_int_add(json_oi,
+ "ipAddressPrefixlen",
+ oi->address->prefixlen);
} else
vty_out(vty, " Internet Address %pFX,",
oi->address);
@@ -3717,17 +3741,29 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf,
}
if (use_json) {
- json_object_string_add(
- json_interface_sub,
- "ospfIfType", dstr);
- if (oi->type == OSPF_IFTYPE_VIRTUALLINK)
+ json_object_string_add(json_interface_sub,
+ "ospfIfType", dstr);
+
+ json_object_string_add(json_oi, "ospfIfType",
+ dstr);
+
+ if (oi->type == OSPF_IFTYPE_VIRTUALLINK) {
json_object_string_addf(
json_interface_sub, "vlinkPeer",
"%pI4", &dest);
- else
+
+ json_object_string_addf(json_oi,
+ "vlinkPeer",
+ "%pI4", &dest);
+ } else {
json_object_string_addf(
json_interface_sub,
"localIfUsed", "%pI4", &dest);
+
+ json_object_string_addf(json_oi,
+ "localIfUsed",
+ "%pI4", &dest);
+ }
} else
vty_out(vty, " %s %pI4,", dstr,
&dest);
@@ -3735,10 +3771,18 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf,
if (use_json) {
json_object_string_add(json_interface_sub, "area",
ospf_area_desc_string(oi->area));
- if (OSPF_IF_PARAM(oi, mtu_ignore))
+
+ json_object_string_add(json_oi, "area",
+ ospf_area_desc_string(oi->area));
+
+ if (OSPF_IF_PARAM(oi, mtu_ignore)) {
+ json_object_boolean_true_add(
+ json_oi, "mtuMismatchDetect");
json_object_boolean_true_add(
json_interface_sub,
"mtuMismatchDetect");
+ }
+
json_object_string_addf(json_interface_sub, "routerId",
"%pI4", &ospf->router_id);
json_object_string_add(json_interface_sub,
@@ -3746,14 +3790,26 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf,
ospf_network_type_str[oi->type]);
json_object_int_add(json_interface_sub, "cost",
oi->output_cost);
- json_object_int_add(
- json_interface_sub, "transmitDelaySecs",
- OSPF_IF_PARAM(oi, transmit_delay));
+ json_object_int_add(json_interface_sub,
+ "transmitDelaySecs",
+ OSPF_IF_PARAM(oi, transmit_delay));
json_object_string_add(json_interface_sub, "state",
lookup_msg(ospf_ism_state_msg,
oi->state, NULL));
json_object_int_add(json_interface_sub, "priority",
PRIORITY(oi));
+
+ json_object_string_addf(json_oi, "routerId", "%pI4",
+ &ospf->router_id);
+ json_object_string_add(json_oi, "networkType",
+ ospf_network_type_str[oi->type]);
+ json_object_int_add(json_oi, "cost", oi->output_cost);
+ json_object_int_add(json_oi, "transmitDelaySecs",
+ OSPF_IF_PARAM(oi, transmit_delay));
+ json_object_string_add(json_oi, "state",
+ lookup_msg(ospf_ism_state_msg,
+ oi->state, NULL));
+ json_object_int_add(json_oi, "priority", PRIORITY(oi));
} else {
vty_out(vty, " Area %s\n",
ospf_area_desc_string(oi->area));
@@ -3791,6 +3847,13 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf,
json_interface_sub, "drAddress",
"%pI4",
&nbr->address.u.prefix4);
+
+ json_object_string_addf(
+ json_oi, "drId", "%pI4",
+ &nbr->router_id);
+ json_object_string_addf(
+ json_oi, "drAddress", "%pI4",
+ &nbr->address.u.prefix4);
} else {
vty_out(vty,
" Designated Router (ID) %pI4",
@@ -3816,6 +3879,13 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf,
json_interface_sub,
"bdrAddress", "%pI4",
&nbr->address.u.prefix4);
+
+ json_object_string_addf(
+ json_oi, "bdrId", "%pI4",
+ &nbr->router_id);
+ json_object_string_addf(
+ json_oi, "bdrAddress", "%pI4",
+ &nbr->address.u.prefix4);
} else {
vty_out(vty,
" Backup Designated Router (ID) %pI4,",
@@ -3831,28 +3901,43 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf,
if (oi->params
&& ntohl(oi->params->network_lsa_seqnum)
!= OSPF_INITIAL_SEQUENCE_NUMBER) {
- if (use_json)
+ if (use_json) {
json_object_int_add(
json_interface_sub,
"networkLsaSequence",
ntohl(oi->params->network_lsa_seqnum));
- else
+
+ json_object_int_add(
+ json_oi, "networkLsaSequence",
+ ntohl(oi->params->network_lsa_seqnum));
+ } else {
vty_out(vty,
" Saved Network-LSA sequence number 0x%x\n",
ntohl(oi->params->network_lsa_seqnum));
+ }
}
if (use_json) {
if (OI_MEMBER_CHECK(oi, MEMBER_ALLROUTERS)
|| OI_MEMBER_CHECK(oi, MEMBER_DROUTERS)) {
- if (OI_MEMBER_CHECK(oi, MEMBER_ALLROUTERS))
+ if (OI_MEMBER_CHECK(oi, MEMBER_ALLROUTERS)) {
json_object_boolean_true_add(
json_interface_sub,
"mcastMemberOspfAllRouters");
- if (OI_MEMBER_CHECK(oi, MEMBER_DROUTERS))
+
+ json_object_boolean_true_add(
+ json_oi,
+ "mcastMemberOspfAllRouters");
+ }
+ if (OI_MEMBER_CHECK(oi, MEMBER_DROUTERS)) {
json_object_boolean_true_add(
json_interface_sub,
"mcastMemberOspfDesignatedRouters");
+
+ json_object_boolean_true_add(
+ json_oi,
+ "mcastMemberOspfDesignatedRouters");
+ }
}
} else {
vty_out(vty, " Multicast group memberships:");
@@ -3868,23 +3953,38 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf,
}
if (use_json) {
- if (OSPF_IF_PARAM(oi, fast_hello) == 0)
+ if (OSPF_IF_PARAM(oi, fast_hello) == 0) {
json_object_int_add(
json_interface_sub, "timerMsecs",
OSPF_IF_PARAM(oi, v_hello) * 1000);
- else
+
+ json_object_int_add(json_oi, "timerMsecs",
+ OSPF_IF_PARAM(oi, v_hello) *
+ 1000);
+ } else {
json_object_int_add(
json_interface_sub, "timerMsecs",
1000 / OSPF_IF_PARAM(oi, fast_hello));
- json_object_int_add(json_interface_sub,
- "timerDeadSecs",
+
+ json_object_int_add(
+ json_oi, "timerMsecs",
+ 1000 / OSPF_IF_PARAM(oi, fast_hello));
+ }
+ json_object_int_add(json_interface_sub, "timerDeadSecs",
OSPF_IF_PARAM(oi, v_wait));
- json_object_int_add(json_interface_sub,
- "timerWaitSecs",
+ json_object_int_add(json_interface_sub, "timerWaitSecs",
OSPF_IF_PARAM(oi, v_wait));
json_object_int_add(
json_interface_sub, "timerRetransmitSecs",
OSPF_IF_PARAM(oi, retransmit_interval));
+
+ json_object_int_add(json_oi, "timerDeadSecs",
+ OSPF_IF_PARAM(oi, v_wait));
+ json_object_int_add(json_oi, "timerWaitSecs",
+ OSPF_IF_PARAM(oi, v_wait));
+ json_object_int_add(
+ json_oi, "timerRetransmitSecs",
+ OSPF_IF_PARAM(oi, retransmit_interval));
} else {
vty_out(vty, " Timer intervals configured,");
vty_out(vty, " Hello ");
@@ -3913,17 +4013,23 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf,
json_object_int_add(json_interface_sub,
"timerHelloInMsecs",
time_store);
+ json_object_int_add(json_oi,
+ "timerHelloInMsecs",
+ time_store);
} else
vty_out(vty, " Hello due in %s\n",
ospf_timer_dump(oi->t_hello, timebuf,
sizeof(timebuf)));
} else /* passive-interface is set */
{
- if (use_json)
+ if (use_json) {
json_object_boolean_true_add(
json_interface_sub,
"timerPassiveIface");
- else
+
+ json_object_boolean_true_add(
+ json_oi, "timerPassiveIface");
+ } else
vty_out(vty,
" No Hellos (Passive interface)\n");
}
@@ -3934,13 +4040,17 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf,
json_object_int_add(json_interface_sub,
"nbrAdjacentCount",
ospf_nbr_count(oi, NSM_Full));
+
+ json_object_int_add(json_oi, "nbrCount",
+ ospf_nbr_count(oi, 0));
+ json_object_int_add(json_oi, "nbrAdjacentCount",
+ ospf_nbr_count(oi, NSM_Full));
} else
vty_out(vty,
" Neighbor Count is %d, Adjacent neighbor count is %d\n",
ospf_nbr_count(oi, 0),
ospf_nbr_count(oi, NSM_Full));
-
params = IF_DEF_PARAMS(ifp);
if (params &&
OSPF_IF_PARAM_CONFIGURED(params, v_gr_hello_delay)) {
@@ -3948,6 +4058,9 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf,
json_object_int_add(json_interface_sub,
"grHelloDelaySecs",
params->v_gr_hello_delay);
+
+ json_object_int_add(json_oi, "grHelloDelaySecs",
+ params->v_gr_hello_delay);
} else
vty_out(vty,
" Graceful Restart hello delay: %us\n",
@@ -3958,16 +4071,29 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf,
/* OSPF Authentication information */
ospf_interface_auth_show(vty, oi, json_interface_sub, use_json);
+
+ ospf_interface_auth_show(vty, oi, json_oi, use_json);
if (oi->type == OSPF_IFTYPE_POINTOMULTIPOINT) {
- if (use_json)
+ if (use_json) {
json_object_boolean_add(json_interface_sub,
"p2mpDelayReflood",
oi->p2mp_delay_reflood);
- else
+
+ json_object_boolean_add(json_oi,
+ "p2mpDelayReflood",
+ oi->p2mp_delay_reflood);
+ } else {
vty_out(vty,
" %sDelay reflooding LSAs received on P2MP interface\n",
oi->p2mp_delay_reflood ? "" : "Don't ");
+ }
}
+
+ /* Add ospf_interface object to main json blob using SIP as key
+ */
+ if (use_json)
+ json_object_object_addf(json_ois, json_oi, "%pI4",
+ &oi->address->u.prefix4);
}
}
diff --git a/pbrd/pbr_zebra.c b/pbrd/pbr_zebra.c
index 097c9f2964..53a02e14a5 100644
--- a/pbrd/pbr_zebra.c
+++ b/pbrd/pbr_zebra.c
@@ -516,7 +516,7 @@ pbr_encode_pbr_map_sequence_vrf(struct stream *s,
stream_putl(s, pbr_vrf->vrf->data.l.table_id);
}
-static void pbr_encode_pbr_map_sequence(struct stream *s,
+static bool pbr_encode_pbr_map_sequence(struct stream *s,
struct pbr_map_sequence *pbrms,
struct interface *ifp)
{
@@ -549,7 +549,14 @@ static void pbr_encode_pbr_map_sequence(struct stream *s,
stream_putl(s, pbr_nht_get_table(pbrms->nhgrp_name));
else if (pbrms->nhg)
stream_putl(s, pbr_nht_get_table(pbrms->internal_nhg_name));
+ else {
+ /* Not valid for install without table */
+ return false;
+ }
+
stream_put(s, ifp->name, INTERFACE_NAMSIZ);
+
+ return true;
}
bool pbr_send_pbr_map(struct pbr_map_sequence *pbrms,
@@ -593,11 +600,13 @@ bool pbr_send_pbr_map(struct pbr_map_sequence *pbrms,
install ? "Installing" : "Deleting", pbrm->name, pbrms->seqno,
install, pmi->ifp->name, pmi->delete);
- pbr_encode_pbr_map_sequence(s, pbrms, pmi->ifp);
-
- stream_putw_at(s, 0, stream_get_endp(s));
-
- zclient_send_message(zclient);
+ if (pbr_encode_pbr_map_sequence(s, pbrms, pmi->ifp)) {
+ stream_putw_at(s, 0, stream_get_endp(s));
+ zclient_send_message(zclient);
+ } else {
+ DEBUGD(&pbr_dbg_zebra, "%s: %s seq %u encode failed, skipped",
+ __func__, pbrm->name, pbrms->seqno);
+ }
return true;
}
diff --git a/pimd/pim6_cmd.c b/pimd/pim6_cmd.c
index 94e8b874f7..262ce86c29 100644
--- a/pimd/pim6_cmd.c
+++ b/pimd/pim6_cmd.c
@@ -743,7 +743,7 @@ DEFPY (interface_ipv6_mld_query_max_response_time,
IPV6_STR
IFACE_MLD_STR
IFACE_MLD_QUERY_MAX_RESPONSE_TIME_STR
- "Query response value in milliseconds\n")
+ "Query response value in deci-seconds\n")
{
return gm_process_query_max_response_time_cmd(vty, qmrt_str);
}
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index b1beb45630..f26fd818b5 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -1762,3 +1762,61 @@ void pim_iface_init(void)
if_zapi_callbacks(pim_ifp_create, pim_ifp_up, pim_ifp_down,
pim_ifp_destroy);
}
+
+static void pim_if_membership_clear(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ if (pim_ifp->pim_enable && pim_ifp->gm_enable)
+ return;
+
+ pim_ifchannel_membership_clear(ifp);
+}
+
+void pim_pim_interface_delete(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ return;
+
+ pim_ifp->pim_enable = false;
+
+ pim_if_membership_clear(ifp);
+
+ /*
+ * pim_sock_delete() removes all neighbors from
+ * pim_ifp->pim_neighbor_list.
+ */
+ pim_sock_delete(ifp, "pim unconfigured on interface");
+ pim_upstream_nh_if_update(pim_ifp->pim, ifp);
+
+ if (!pim_ifp->gm_enable) {
+ pim_if_addr_del_all(ifp);
+ pim_if_delete(ifp);
+ }
+}
+
+void pim_gm_interface_delete(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ return;
+
+ pim_ifp->gm_enable = false;
+
+ pim_if_membership_clear(ifp);
+
+#if PIM_IPV == 4
+ igmp_sock_delete_all(ifp);
+#else
+ gm_ifp_teardown(ifp);
+#endif
+
+ if (!pim_ifp->pim_enable)
+ pim_if_delete(ifp);
+}
diff --git a/pimd/pim_iface.h b/pimd/pim_iface.h
index 973840a753..0312f719d3 100644
--- a/pimd/pim_iface.h
+++ b/pimd/pim_iface.h
@@ -243,5 +243,7 @@ bool pim_if_is_vrf_device(struct interface *ifp);
int pim_if_ifchannel_count(struct pim_interface *pim_ifp);
void pim_iface_init(void);
+void pim_pim_interface_delete(struct interface *ifp);
+void pim_gm_interface_delete(struct interface *ifp);
#endif /* PIM_IFACE_H */
diff --git a/pimd/pim_igmpv3.c b/pimd/pim_igmpv3.c
index 15078dd1ec..18a9fb7c6c 100644
--- a/pimd/pim_igmpv3.c
+++ b/pimd/pim_igmpv3.c
@@ -319,14 +319,6 @@ void igmp_source_free(struct gm_source *source)
XFREE(MTYPE_PIM_IGMP_GROUP_SOURCE, source);
}
-static void source_channel_oil_detach(struct gm_source *source)
-{
- if (source->source_channel_oil) {
- pim_channel_oil_del(source->source_channel_oil, __func__);
- source->source_channel_oil = NULL;
- }
-}
-
/*
igmp_source_delete: stop forwarding, and delete the source
igmp_source_forward_stop: stop forwarding, but keep the source
@@ -355,6 +347,7 @@ void igmp_source_delete(struct gm_source *source)
source_timer_off(group, source);
igmp_source_forward_stop(source);
+ source->source_channel_oil = NULL;
/* sanity check that forwarding has been disabled */
if (IGMP_SOURCE_TEST_FORWARDING(source->source_flags)) {
@@ -371,8 +364,6 @@ void igmp_source_delete(struct gm_source *source)
/* warning only */
}
- source_channel_oil_detach(source);
-
/*
notice that listnode_delete() can't be moved
into igmp_source_free() because the later is
diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c
index d7e4154558..30d84710e6 100644
--- a/pimd/pim_nb_config.c
+++ b/pimd/pim_nb_config.c
@@ -45,20 +45,6 @@ MACRO_REQUIRE_SEMICOLON()
#define yang_dnode_get_pimaddr yang_dnode_get_ipv4
#endif /* PIM_IPV != 6 */
-static void pim_if_membership_clear(struct interface *ifp)
-{
- struct pim_interface *pim_ifp;
-
- pim_ifp = ifp->info;
- assert(pim_ifp);
-
- if (pim_ifp->pim_enable && pim_ifp->gm_enable) {
- return;
- }
-
- pim_ifchannel_membership_clear(ifp);
-}
-
/*
* When PIM is disabled on interface, IGMPv3 local membership
* information is not injected into PIM interface state.
@@ -161,32 +147,6 @@ static int pim_cmd_interface_add(struct interface *ifp)
return 1;
}
-static int pim_cmd_interface_delete(struct interface *ifp)
-{
- struct pim_interface *pim_ifp = ifp->info;
-
- if (!pim_ifp)
- return 1;
-
- pim_ifp->pim_enable = false;
-
- pim_if_membership_clear(ifp);
-
- /*
- * pim_sock_delete() removes all neighbors from
- * pim_ifp->pim_neighbor_list.
- */
- pim_sock_delete(ifp, "pim unconfigured on interface");
- pim_upstream_nh_if_update(pim_ifp->pim, ifp);
-
- if (!pim_ifp->gm_enable) {
- pim_if_addr_del_all(ifp);
- pim_if_delete(ifp);
- }
-
- return 1;
-}
-
static int interface_pim_use_src_cmd_worker(struct interface *ifp,
pim_addr source_addr, char *errmsg, size_t errmsg_len)
{
@@ -278,7 +238,7 @@ static int pim_rp_cmd_worker(struct pim_instance *pim, pim_addr rp_addr,
if (result == PIM_RP_NO_PATH) {
snprintfrr(errmsg, errmsg_len,
"No Path to RP address specified: %pPA", &rp_addr);
- return NB_ERR_INCONSISTENCY;
+ return NB_OK;
}
if (result == PIM_GROUP_OVERLAP) {
@@ -1573,12 +1533,7 @@ int lib_interface_pim_address_family_destroy(struct nb_cb_destroy_args *args)
if (!pim_ifp)
return NB_OK;
- if (!pim_cmd_interface_delete(ifp)) {
- snprintf(args->errmsg, args->errmsg_len,
- "Unable to delete interface information %s",
- ifp->name);
- return NB_ERR_INCONSISTENCY;
- }
+ pim_pim_interface_delete(ifp);
}
return NB_OK;
@@ -1626,11 +1581,7 @@ int lib_interface_pim_address_family_pim_enable_modify(struct nb_cb_modify_args
if (!pim_ifp)
return NB_ERR_INCONSISTENCY;
- if (!pim_cmd_interface_delete(ifp)) {
- snprintf(args->errmsg, args->errmsg_len,
- "Unable to delete interface information");
- return NB_ERR_INCONSISTENCY;
- }
+ pim_pim_interface_delete(ifp);
}
break;
}
@@ -2565,7 +2516,6 @@ int lib_interface_gmp_address_family_create(struct nb_cb_create_args *args)
int lib_interface_gmp_address_family_destroy(struct nb_cb_destroy_args *args)
{
struct interface *ifp;
- struct pim_interface *pim_ifp;
switch (args->event) {
case NB_EV_VALIDATE:
@@ -2574,19 +2524,7 @@ int lib_interface_gmp_address_family_destroy(struct nb_cb_destroy_args *args)
break;
case NB_EV_APPLY:
ifp = nb_running_get_entry(args->dnode, NULL, true);
- pim_ifp = ifp->info;
-
- if (!pim_ifp)
- return NB_OK;
-
- pim_ifp->gm_enable = false;
-
- pim_if_membership_clear(ifp);
-
- pim_if_addr_del_all_igmp(ifp);
-
- if (!pim_ifp->pim_enable)
- pim_if_delete(ifp);
+ pim_gm_interface_delete(ifp);
}
return NB_OK;
@@ -2600,7 +2538,6 @@ int lib_interface_gmp_address_family_enable_modify(
{
struct interface *ifp;
bool gm_enable;
- struct pim_interface *pim_ifp;
int mcast_if_count;
const char *ifp_name;
const struct lyd_node *if_dnode;
@@ -2630,25 +2567,8 @@ int lib_interface_gmp_address_family_enable_modify(
if (gm_enable)
return pim_cmd_gm_start(ifp);
- else {
- pim_ifp = ifp->info;
-
- if (!pim_ifp)
- return NB_ERR_INCONSISTENCY;
-
- pim_ifp->gm_enable = false;
-
- pim_if_membership_clear(ifp);
-
-#if PIM_IPV == 4
- pim_if_addr_del_all_igmp(ifp);
-#else
- gm_ifp_teardown(ifp);
-#endif
-
- if (!pim_ifp->pim_enable)
- pim_if_delete(ifp);
- }
+ else
+ pim_gm_interface_delete(ifp);
}
return NB_OK;
}
diff --git a/pimd/pim_register.c b/pimd/pim_register.c
index b5d9df6f2a..2e554de7af 100644
--- a/pimd/pim_register.c
+++ b/pimd/pim_register.c
@@ -85,7 +85,7 @@ void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg, pim_addr src,
zlog_debug("%s: No pinfo!", __func__);
return;
}
- if (pim_msg_send(pinfo->pim_sock_fd, src, originator, buffer,
+ if (pim_msg_send(pinfo->pim->reg_sock, src, originator, buffer,
b1length + PIM_MSG_REGISTER_STOP_LEN, ifp)) {
if (PIM_DEBUG_PIM_TRACE) {
zlog_debug(
diff --git a/pimd/pim_tib.c b/pimd/pim_tib.c
index 6ffea868d8..4081786c1e 100644
--- a/pimd/pim_tib.c
+++ b/pimd/pim_tib.c
@@ -163,4 +163,6 @@ void tib_sg_gm_prune(struct pim_instance *pim, pim_sgaddr sg,
per-interface (S,G) state.
*/
pim_ifchannel_local_membership_del(oif, &sg);
+
+ pim_channel_oil_del(*oilp, __func__);
}
diff --git a/pimd/pim_upstream.c b/pimd/pim_upstream.c
index 8fa7b7cf96..82ba9a55a0 100644
--- a/pimd/pim_upstream.c
+++ b/pimd/pim_upstream.c
@@ -181,7 +181,7 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
if (PIM_DEBUG_PIM_TRACE)
zlog_debug(
- "%s(%s): Delete %s[%s] ref count: %d , flags: %d c_oil ref count %d (Pre decrement)",
+ "%s(%s): Delete %s[%s] ref count: %d, flags: %d c_oil ref count %d (Pre decrement)",
__func__, name, up->sg_str, pim->vrf->name,
up->ref_count, up->flags,
up->channel_oil->oil_ref_count);
@@ -2056,7 +2056,7 @@ static void pim_upstream_sg_running(void *arg)
// No packet can have arrived here if this is the case
if (!up->channel_oil->installed) {
if (PIM_DEBUG_TRACE)
- zlog_debug("%s: %s%s is not installed in mroute",
+ zlog_debug("%s: %s[%s] is not installed in mroute",
__func__, up->sg_str, pim->vrf->name);
return;
}
diff --git a/redhat/frr.spec.in b/redhat/frr.spec.in
index 4dec84b8fb..656df20cce 100644
--- a/redhat/frr.spec.in
+++ b/redhat/frr.spec.in
@@ -799,9 +799,33 @@ sed -i 's/ -M rpki//' %{_sysconfdir}/frr/daemons
%changelog
-* Tue Feb 07 2023 Martin Winter <mwinter@opensourcerouting.org> - %{version}
-
-* Tue Feb 07 2023 Donatas Abraitis <donatas@opensourcerouting.org> - 8.5
+* Tue Jun 06 2023 Martin Winter <mwinter@opensourcerouting.org> - %{version}
+
+* Tue Jun 06 2023 Jafar Al-Gharaibeh <jafar@atcorp.com> - 9.0
+
+* Fri Mar 10 2023 Jafar Al-Gharaibeh <jafar@atcorp.com> - 8.5
+- Major Highlights:
+- Add support for per-VRF SRv6 SID
+- Add BGP labeled-unicast Add-Path functionality
+- Implementation of SNMP BGP4v2-MIB (IPv6 support) for better network management and monitoring
+- Add BGP new command neighbor path-attribute discard
+- Add BGP new command neighbor path-attribute treat-as-withdraw
+- Implement L3 route-target auto/wildcard configuration
+- Implement BGP ACCEPT_OWN Community Attribute (rfc7611)
+- Implement The Accumulated IGP Metric Attribute for BGP (rfc7311)
+- Implement graceful-shutdown command per neighbor
+- Add BGP new command to configure TCP keepalives for a peer bgp tcp-keepalive
+- Traffic control (TC) ZAPI implementation
+- SRv6 uSID (microSID) implementation
+- Start deprecating start-shell, ssh, and telnet commands due to security reasons
+- Add VRRPv3 an ability to disable IPv4 pseudo-header checksum
+- BFD integration for static routes
+- Allow protocols to configure BFD sessions with automatic source selection
+- Allow zero-length opaque LSAs for OSPF (rfc5250)
+- Add ISIS new command set-overload-bit on-startup
+- PIMv6 BSM support
+- For a full list of new features and bug fixes, please refer to:
+- https://frrouting.org/release/
* Tue Nov 01 2022 Jafar Al-Gharaibeh <jafar@atcorp.com> - 8.4
- New BGP command (neighbor PEER soo) to configure SoO to prevent routing loops and suboptimal routing on dual-homed sites.
diff --git a/staticd/static_main.c b/staticd/static_main.c
index f6b7847602..9101a95b17 100644
--- a/staticd/static_main.c
+++ b/staticd/static_main.c
@@ -56,11 +56,11 @@ struct event_loop *master;
struct mgmt_be_client *mgmt_be_client;
static struct frr_daemon_info staticd_di;
+
/* SIGHUP handler. */
static void sighup(void)
{
- zlog_info("SIGHUP received");
- vty_read_config(NULL, staticd_di.config_file, config_default);
+ zlog_info("SIGHUP received and ignored");
}
/* SIGINT / SIGTERM handler. */
diff --git a/tests/topotests/babel_topo1/r1/babeld.conf b/tests/topotests/babel_topo1/r1/babeld.conf
index 372d2edff1..4058362cc3 100644
--- a/tests/topotests/babel_topo1/r1/babeld.conf
+++ b/tests/topotests/babel_topo1/r1/babeld.conf
@@ -1,4 +1,3 @@
-log file eigrpd.log
interface r1-eth0
babel hello-interval 1000
diff --git a/tests/topotests/babel_topo1/r2/babeld.conf b/tests/topotests/babel_topo1/r2/babeld.conf
index 8a36dda5f8..bae4e59e0b 100644
--- a/tests/topotests/babel_topo1/r2/babeld.conf
+++ b/tests/topotests/babel_topo1/r2/babeld.conf
@@ -1,4 +1,3 @@
-log file eigrpd.log
!
interface r2-eth0
babel hello-interval 1000
diff --git a/tests/topotests/babel_topo1/r3/babeld.conf b/tests/topotests/babel_topo1/r3/babeld.conf
index 1e9dc261f5..bfda3622dd 100644
--- a/tests/topotests/babel_topo1/r3/babeld.conf
+++ b/tests/topotests/babel_topo1/r3/babeld.conf
@@ -1,4 +1,3 @@
-log file eigrpd.log
!
interface r3-eth0
babel hello-interval 1000
diff --git a/tests/topotests/bgp_always_compare_med/bgp_always_compare_med_topo1.json b/tests/topotests/bgp_always_compare_med/bgp_always_compare_med_topo1.json
new file mode 100644
index 0000000000..4156c6d0f7
--- /dev/null
+++ b/tests/topotests/bgp_always_compare_med/bgp_always_compare_med_topo1.json
@@ -0,0 +1,152 @@
+{
+ "address_types": ["ipv4", "ipv6"],
+ "ipv4base": "192.168.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start":{"ipv4":"192.168.0.0", "v4mask":24, "ipv6":"fd00::", "v6mask":64},
+ "lo_prefix":{"ipv4":"1.0.", "v4mask":32, "ipv6":"2001:DB8:F::", "v6mask":128},
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r1": {}}},
+ "r3": {"dest_link": {"r1": {}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r1": {
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }}},
+ "r3": {"dest_link": {"r1": {
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }}}
+ }
+ }
+ }
+ }
+ },
+ "route_maps": {
+ "rmap_global": [{
+ "action": "permit",
+ "set": {
+ "ipv6": {
+ "nexthop": "prefer-global"
+ }
+ }
+ }]
+ },
+ "static_routes":[
+ {
+ "network":"192.168.20.1/32",
+ "next_hop":"Null0"
+ },
+ {
+ "network":"192:168:20::1/128",
+ "next_hop":"Null0"
+ }]
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1": {"ipv4": "auto", "ipv6": "auto"},
+ "r4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {"dest_link": {"r2": {}}},
+ "r4": {"dest_link": {"r2": {}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {"dest_link": {"r2": {}}},
+ "r4": {"dest_link": {"r2": {}}}
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1": {"ipv4": "auto", "ipv6": "auto"},
+ "r4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "300",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {"dest_link": {"r3": {}}},
+ "r4": {"dest_link": {"r3": {}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {"dest_link": {"r3": {}}},
+ "r4": {"dest_link": {"r3": {}}}
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "400",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r4": {}}},
+ "r3": {"dest_link": {"r4": {}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r4": {}}},
+ "r3": {"dest_link": {"r4": {}}}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp_always_compare_med/test_bgp_always_compare_med_topo1.py b/tests/topotests/bgp_always_compare_med/test_bgp_always_compare_med_topo1.py
new file mode 100644
index 0000000000..fb72f4331d
--- /dev/null
+++ b/tests/topotests/bgp_always_compare_med/test_bgp_always_compare_med_topo1.py
@@ -0,0 +1,1118 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2023 by VMware, Inc. ("VMware")
+#
+#
+################################################################################
+# Following tests are performed to validate BGP always compare MED functionality
+################################################################################
+"""
+1. Verify the BGP always compare MED functionality in between eBGP Peers
+2. Verify the BGP always compare MED functionality in between eBGP Peers with by changing different AD values
+3. Verify the BGP always compare MED functionality in between eBGP Peers by changing MED values in middle routers
+4. Verify that BGP Always compare MED functionality by restarting BGP, Zebra and FRR services and clear BGP and
+ shutdown BGP neighbor
+5. Verify BGP always compare MED functionality by performing shut/noshut on the interfaces in between BGP neighbors
+"""
+
+import os
+import sys
+import time
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ create_static_routes,
+ write_test_footer,
+ reset_config_on_routers,
+ verify_rib,
+ step,
+ check_address_types,
+ check_router_status,
+ create_static_routes,
+ create_prefix_lists,
+ create_route_maps,
+ kill_router_daemons,
+ shutdown_bringup_interface,
+ stop_router,
+ start_router,
+ delete_route_maps,
+)
+
+from lib.topolog import logger
+from lib.bgp import verify_bgp_convergence, verify_bgp_rib, create_router_bgp, clear_bgp
+from lib.topojson import build_config_from_json
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+# Reading the data from JSON File for topology creation
+topo = None
+
+# Global variables
+ADDR_TYPES = check_address_types()
+NETWORK1_1 = {"ipv4": "192.168.20.1/32", "ipv6": "192:168:20::1/128"}
+NETWORK1_2 = {"ipv4": "192.168.30.1/32", "ipv6": "192:168:30::1/128"}
+NETWORK1_3 = {"ipv4": "192.168.40.1/32", "ipv6": "192:168:40::1/128"}
+NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/bgp_always_compare_med_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ global ADDR_TYPES
+ ADDR_TYPES = check_address_types()
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module : Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+##########################################################################################################
+#
+# Local API
+#
+##########################################################################################################
+
+
+def initial_configuration(tgen, tc_name):
+ """
+ API to do initial set of configuration
+ """
+
+ step(
+ "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config"
+ )
+
+ step("Configure static routes in R4")
+ for addr_type in ADDR_TYPES:
+ input_static_r4 = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": NETWORK1_1[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+
+ logger.info("Configure static routes")
+ result = create_static_routes(tgen, input_static_r4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure redistribute static in R4")
+ input_static_redist_r4 = {
+ "r4": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {"redistribute": [{"redist_type": "static"}]}
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_static_redist_r4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ # Create prefix list
+ input_dict_23 = {
+ "r2": {
+ "prefix_lists": {
+ addr_type: {
+ "pf_ls_r2_{}".format(addr_type): [
+ {"network": NETWORK1_1[addr_type], "action": "permit"}
+ ]
+ }
+ }
+ },
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_ls_r3_{}".format(addr_type): [
+ {"network": NETWORK1_1[addr_type], "action": "permit"}
+ ]
+ }
+ }
+ },
+ }
+ result = create_prefix_lists(tgen, input_dict_23)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ # Create route map
+ input_dict_23 = {
+ "r2": {
+ "route_maps": {
+ "RMAP_MED_R2": [
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_ls_r2_{}".format(addr_type)
+ }
+ },
+ "set": {"med": 300},
+ }
+ ]
+ }
+ },
+ "r3": {
+ "route_maps": {
+ "RMAP_MED_R3": [
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_ls_r3_{}".format(addr_type)
+ }
+ },
+ "set": {"med": 200},
+ }
+ ]
+ }
+ },
+ }
+ result = create_route_maps(tgen, input_dict_23)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ input_dict_r2_r3 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {
+ "route_maps": [
+ {
+ "name": "RMAP_MED_R2",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "RMAP_MED_R3",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, input_dict_r2_r3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+
+##########################################################################################################
+#
+# Testcases
+#
+##########################################################################################################
+
+
+def test_verify_bgp_always_compare_med_functionality_bw_eBGP_peers_p0(request):
+ """
+ Verify the BGP always compare MED functionality in between eBGP Peers
+ """
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+ initial_configuration(tgen, tc_name)
+
+ step(
+ "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config"
+ )
+ step(
+ "Verify that IPv4 and IPv6 eBGP neighbors are configured in between routers by following "
+ "commands and verify that best path chosen by lowest MED value"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'multi-path as-path relax' command at R1.")
+ configure_bgp = {
+ "r1": {"bgp": {"local_as": "100", "bestpath": {"aspath": "multipath-relax"}}}
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'multi-path as-path relax' command, "
+ "its also chooses lowest MED to reach destination."
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh1 = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+ nh2 = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=[nh1, nh2])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'bgp always-compare-med' command at R1.")
+ input_dict_r1 = {"r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": True}}}
+ result = create_router_bgp(tgen, topo, input_dict_r1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'bgp always-compare-med', its chooses lowest MED value path"
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Remove 'bgp always-compare-med' command at R1.")
+ input_dict_r1 = {
+ "r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": False}}
+ }
+ result = create_router_bgp(tgen, topo, input_dict_r1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that 'bgp always-compare-med' command is removed")
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh1 = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+ nh2 = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=[nh1, nh2])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Remove 'multi-path as-path relax' command at R1")
+ configure_bgp = {
+ "r1": {
+ "bgp": {
+ "local_as": "100",
+ "bestpath": {"aspath": "multipath-relax", "delete": True},
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify route selection after removing 'multi-path as-path relax' command")
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_verify_bgp_always_compare_med_functionality_bw_eBGP_peers_by_changing_AD_values_p0(
+ request,
+):
+ """
+ Verify the BGP always compare MED functionality in between eBGP Peers with by changing different AD values.
+ """
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+ initial_configuration(tgen, tc_name)
+
+ step(
+ "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config"
+ )
+ step(
+ "Verify that IPv4 and IPv6 eBGP neighbors are configured in between routers by following "
+ "commands and verify that best path chosen by lowest MED value"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'bgp always-compare-med' command at R1.")
+ input_dict_r1 = {"r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": True}}}
+ result = create_router_bgp(tgen, topo, input_dict_r1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'bgp always-compare-med', its chooses lowest MED value path"
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure AD value=100 at R2 and AD value=200 at R3 towards R1")
+ input_dict_1 = {
+ "r2": {
+ "bgp": {
+ "local_as": 200,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 100, "ibgp": 100, "local": 100}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 100, "ibgp": 100, "local": 100}
+ }
+ },
+ },
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 300,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200}
+ }
+ },
+ },
+ }
+ },
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that inspite of AD values, always lowest MED value is getting "
+ "selected at destination router R1"
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_verify_bgp_always_compare_med_functionality_bw_eBGP_peers_by_changing_MED_values_p1(
+ request,
+):
+ """
+ Verify the BGP always compare MED functionality in between eBGP Peers by changing MED values in middle routers
+ """
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+ initial_configuration(tgen, tc_name)
+
+ step(
+ "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config"
+ )
+ step(
+ "Verify that IPv4 and IPv6 eBGP neighbors are configured in between routers by following "
+ "commands and verify that best path chosen by lowest MED value"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'multi-path as-path relax' command at R1.")
+ configure_bgp = {
+ "r1": {"bgp": {"local_as": "100", "bestpath": {"aspath": "multipath-relax"}}}
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'multi-path as-path relax' command, "
+ "its also chooses lowest MED to reach destination."
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh1 = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+ nh2 = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=[nh1, nh2])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'bgp always-compare-med' command at R1.")
+ input_dict_r1 = {"r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": True}}}
+ result = create_router_bgp(tgen, topo, input_dict_r1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'bgp always-compare-med', its chooses lowest MED value path"
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Change the MED value 150 in R2 router.")
+ input_dict = {"r2": {"route_maps": ["RMAP_MED_R2"]}}
+ result = delete_route_maps(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_2 = {
+ "r2": {
+ "route_maps": {
+ "RMAP_MED_R2": [
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_ls_r2_{}".format(addr_type)
+ }
+ },
+ "set": {"med": 150},
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that after changing MED, its chooses lowest MED value path")
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Change the MED value 100 in R3 router.")
+ input_dict = {"r3": {"route_maps": ["RMAP_MED_R3"]}}
+ result = delete_route_maps(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_3 = {
+ "r3": {
+ "route_maps": {
+ "RMAP_MED_R3": [
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_ls_r3_{}".format(addr_type)
+ }
+ },
+ "set": {"med": 100},
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that after changing MED, its chooses lowest MED value path")
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_verify_bgp_always_compare_med_functionality_by_restarting_daemons_clear_bgp_shut_neighbors_p1(
+ request,
+):
+ """
+ Verify that BGP Always compare MED functionality by restarting BGP, Zebra and FRR services and clear BGP and shutdown BGP neighbor
+ """
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+ initial_configuration(tgen, tc_name)
+
+ step(
+ "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config"
+ )
+ step(
+ "Verify that IPv4 and IPv6 eBGP neighbors are configured in between routers by following "
+ "commands and verify that best path chosen by lowest MED value"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'multi-path as-path relax' command at R1.")
+ configure_bgp = {
+ "r1": {"bgp": {"local_as": "100", "bestpath": {"aspath": "multipath-relax"}}}
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'multi-path as-path relax' command, "
+ "its also chooses lowest MED to reach destination."
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh1 = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+ nh2 = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=[nh1, nh2])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'bgp always-compare-med' command at R1.")
+ input_dict_r1 = {"r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": True}}}
+ result = create_router_bgp(tgen, topo, input_dict_r1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'bgp always-compare-med', its chooses lowest MED value path"
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Restart the BGPd/Zebra/FRR service on R1")
+ for daemon in ["bgpd", "zebra", "frr"]:
+ if daemon == "frr":
+ stop_router(tgen, "r1")
+ start_router(tgen, "r1")
+ else:
+ kill_router_daemons(tgen, "r1", daemon)
+
+ step(
+ "Verify after restarting dameons and frr services, its chooses lowest MED value path"
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Clear bgp on R1")
+ clear_bgp(tgen, None, "r1")
+
+ step("Verify after clearing BGP, its chooses lowest MED value path")
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Perform BGP neighborship shut/no shut")
+ for action, keyword in zip([True, False], ["shut", "noshut"]):
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {"r1": {"shutdown": action}}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify after {} BGP, its chooses lowest MED value path".format(keyword))
+ if action:
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ else:
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_verify_bgp_always_compare_med_functionality_by_shut_noshut_interfaces_bw_bgp_neighbors_p1(
+ request,
+):
+ """
+ Verify BGP always compare MED functionality by performing shut/noshut on the interfaces in between BGP neighbors
+ """
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+ initial_configuration(tgen, tc_name)
+
+ step(
+ "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config"
+ )
+ step(
+ "Verify that IPv4 and IPv6 eBGP neighbors are configured in between routers by following "
+ "commands and verify that best path chosen by lowest MED value"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'multi-path as-path relax' command at R1.")
+ configure_bgp = {
+ "r1": {"bgp": {"local_as": "100", "bestpath": {"aspath": "multipath-relax"}}}
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'multi-path as-path relax' command, "
+ "its also chooses lowest MED to reach destination."
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh1 = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+ nh2 = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=[nh1, nh2])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'bgp always-compare-med' command at R1.")
+ input_dict_r1 = {"r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": True}}}
+ result = create_router_bgp(tgen, topo, input_dict_r1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'bgp always-compare-med', its chooses lowest MED value path"
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for action, keyword in zip([False, True], ["Shut", "No Shut"]):
+ step(
+ "{} the interface on the link between R3 & R4 and R2 & R4 routers".format(
+ keyword
+ )
+ )
+ intf2_4 = topo["routers"]["r2"]["links"]["r4"]["interface"]
+ intf3_4 = topo["routers"]["r3"]["links"]["r4"]["interface"]
+ for dut, intf in zip(["r2", "r3"], [intf2_4, intf3_4]):
+ shutdown_bringup_interface(tgen, dut, intf, action)
+
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ if action:
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ else:
+ result = verify_bgp_rib(
+ tgen, addr_type, "r1", input_static_r1, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Routes are still present in BGP table\n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, addr_type, "r1", input_static_r1, next_hop=nh, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Routes are still present in FIB \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_default_originate_timer/__init__.py b/tests/topotests/bgp_default_originate_timer/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_default_originate_timer/__init__.py
diff --git a/tests/topotests/bgp_default_originate_timer/r1/bgpd.conf b/tests/topotests/bgp_default_originate_timer/r1/bgpd.conf
new file mode 100644
index 0000000000..f2a1c9005a
--- /dev/null
+++ b/tests/topotests/bgp_default_originate_timer/r1/bgpd.conf
@@ -0,0 +1,18 @@
+router bgp 65001
+ no bgp ebgp-requires-policy
+ bgp default-originate timer 3600
+ neighbor 192.168.1.2 remote-as external
+ neighbor 192.168.1.2 timers 1 3
+ neighbor 192.168.1.2 timers connect 1
+ neighbor 192.168.2.2 remote-as external
+ neighbor 192.168.2.2 timers 1 3
+ neighbor 192.168.2.2 timers connect 1
+ address-family ipv4
+ neighbor 192.168.1.2 default-originate route-map default
+ exit-address-family
+!
+bgp community-list standard r3 seq 5 permit 65003:1
+!
+route-map default permit 10
+ match community r3
+exit
diff --git a/tests/topotests/bgp_default_originate_timer/r1/zebra.conf b/tests/topotests/bgp_default_originate_timer/r1/zebra.conf
new file mode 100644
index 0000000000..3692361fb3
--- /dev/null
+++ b/tests/topotests/bgp_default_originate_timer/r1/zebra.conf
@@ -0,0 +1,7 @@
+!
+interface r1-eth0
+ ip address 192.168.1.1/24
+!
+interface r1-eth1
+ ip address 192.168.2.1/24
+!
diff --git a/tests/topotests/bgp_default_originate_timer/r2/bgpd.conf b/tests/topotests/bgp_default_originate_timer/r2/bgpd.conf
new file mode 100644
index 0000000000..7ca65a94a1
--- /dev/null
+++ b/tests/topotests/bgp_default_originate_timer/r2/bgpd.conf
@@ -0,0 +1,6 @@
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
+ neighbor 192.168.1.1 timers 1 3
+ neighbor 192.168.1.1 timers connect 1
+!
diff --git a/tests/topotests/bgp_default_originate_timer/r2/zebra.conf b/tests/topotests/bgp_default_originate_timer/r2/zebra.conf
new file mode 100644
index 0000000000..0c95656663
--- /dev/null
+++ b/tests/topotests/bgp_default_originate_timer/r2/zebra.conf
@@ -0,0 +1,4 @@
+!
+interface r2-eth0
+ ip address 192.168.1.2/24
+!
diff --git a/tests/topotests/bgp_default_originate_timer/r3/bgpd.conf b/tests/topotests/bgp_default_originate_timer/r3/bgpd.conf
new file mode 100644
index 0000000000..0a37913d73
--- /dev/null
+++ b/tests/topotests/bgp_default_originate_timer/r3/bgpd.conf
@@ -0,0 +1,12 @@
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.1 remote-as external
+ neighbor 192.168.2.1 timers 1 3
+ neighbor 192.168.2.1 timers connect 1
+ address-family ipv4 unicast
+ redistribute connected route-map r1
+ exit-address-family
+!
+route-map r1 permit 10
+ set community 65003:1
+exit
diff --git a/tests/topotests/bgp_default_originate_timer/r3/zebra.conf b/tests/topotests/bgp_default_originate_timer/r3/zebra.conf
new file mode 100644
index 0000000000..20801f937e
--- /dev/null
+++ b/tests/topotests/bgp_default_originate_timer/r3/zebra.conf
@@ -0,0 +1,7 @@
+!
+interface lo
+ ip address 10.10.10.10/32
+!
+interface r3-eth0
+ ip address 192.168.2.2/24
+!
diff --git a/tests/topotests/bgp_default_originate_timer/test_bgp_default_originate_timer.py b/tests/topotests/bgp_default_originate_timer/test_bgp_default_originate_timer.py
new file mode 100644
index 0000000000..b2ba936fb1
--- /dev/null
+++ b/tests/topotests/bgp_default_originate_timer/test_bgp_default_originate_timer.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# Copyright (c) 2023 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+
+"""
+Check if `bgp default-originate timer` commands takes an effect:
+1. Set bgp default-originate timer 3600
+2. No default route is advertised because the timer is running for 3600 seconds
+3. We reduce it to 10 seconds
+4. Default route is advertised
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import step
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_default_originate_timer():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+ r2 = tgen.gears["r2"]
+ r3 = tgen.gears["r3"]
+
+ def _bgp_default_received_from_r1():
+ output = json.loads(r2.vtysh_cmd("show bgp ipv4 unicast 0.0.0.0/0 json"))
+ expected = {
+ "paths": [
+ {
+ "nexthops": [
+ {
+ "hostname": "r1",
+ "ip": "192.168.1.1",
+ }
+ ],
+ }
+ ],
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_default_received_from_r1)
+ _, result = topotest.run_and_expect(test_func, not None, count=30, wait=1)
+ assert result is not None, "Seen default route received from r1, but should not"
+
+ step("Set BGP default-originate timer to 10 seconds")
+ r1.vtysh_cmd(
+ """
+ configure terminal
+ router bgp
+ bgp default-originate timer 10
+ """
+ )
+
+ step("Trigger BGP UPDATE from r3")
+ r3.vtysh_cmd(
+ """
+ configure terminal
+ route-map r1 permit 10
+ set metric 1
+ """
+ )
+
+ test_func = functools.partial(_bgp_default_received_from_r1)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assert result is None, "Did not see default route received from r1, but should"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/P1/bgpd.conf b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/P1/bgpd.conf
new file mode 100644
index 0000000000..cdf4cb4feb
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/P1/bgpd.conf
@@ -0,0 +1 @@
+!
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/P1/ospfd.conf b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/P1/ospfd.conf
new file mode 100644
index 0000000000..2db7edb806
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/P1/ospfd.conf
@@ -0,0 +1,13 @@
+!
+router ospf
+ network 10.20.0.0/16 area 0
+ network 10.20.20.20/32 area 0
+!
+int P1-eth0
+ ip ospf hello-interval 2
+ ip ospf dead-interval 10
+!
+int P1-eth1
+ ip ospf hello-interval 2
+ ip ospf dead-interval 10
+!
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/P1/zebra.conf b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/P1/zebra.conf
new file mode 100644
index 0000000000..95b5da8402
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/P1/zebra.conf
@@ -0,0 +1,7 @@
+!
+interface lo
+ ip address 10.20.20.20/32
+interface P1-eth0
+ ip address 10.20.1.2/24
+interface P1-eth1
+ ip address 10.20.2.2/24
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/bgp.l2vpn.evpn.vni.json b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/bgp.l2vpn.evpn.vni.json
new file mode 100644
index 0000000000..9f93635c21
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/bgp.l2vpn.evpn.vni.json
@@ -0,0 +1,19 @@
+{
+ "vni":101,
+ "type":"L2",
+ "inKernel":"True",
+ "rd":"10.10.10.10:101",
+ "originatorIp":"10.10.10.10",
+ "mcastGroup":"0.0.0.0",
+ "siteOfOrigin":"65000:0",
+ "advertiseGatewayMacip":"Disabled",
+ "advertiseSviMacIp":"Active",
+ "sviInterface":"br101",
+ "importRts":[
+ "65000:101"
+ ],
+ "exportRts":[
+ "65000:101"
+ ]
+}
+
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/bgpd.conf b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/bgpd.conf
new file mode 100644
index 0000000000..f839443025
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/bgpd.conf
@@ -0,0 +1,18 @@
+router bgp 65000
+ timers 3 9
+ bgp router-id 10.10.10.10
+ no bgp default ipv4-unicast
+ neighbor 10.30.30.30 remote-as 65000
+ neighbor 10.30.30.30 update-source lo
+ neighbor 10.30.30.30 timers 3 10
+ !
+ address-family l2vpn evpn
+ neighbor 10.30.30.30 activate
+ advertise-all-vni
+ advertise-svi-ip
+ vni 101
+ rd 10.10.10.10:101
+ route-target import 65000:101
+ route-target export 65000:101
+ exit-vni
+ advertise-svi-ip
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/evpn.vni.json b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/evpn.vni.json
new file mode 100644
index 0000000000..4bea8b384f
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/evpn.vni.json
@@ -0,0 +1,17 @@
+{
+ "vni":101,
+ "type":"L2",
+ "tenantVrf":"VRF-A",
+ "vxlanInterface":"vxlan101",
+ "vtepIp":"10.10.10.10",
+ "mcastGroup":"0.0.0.0",
+ "advertiseGatewayMacip":"No",
+ "numRemoteVteps":1,
+ "remoteVteps":[
+ {
+ "ip":"10.30.30.30",
+ "flood":"HER"
+ }
+ ]
+}
+
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/ospfd.conf b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/ospfd.conf
new file mode 100644
index 0000000000..f1c2b42dc1
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/ospfd.conf
@@ -0,0 +1,9 @@
+!
+router ospf
+ network 10.20.0.0/16 area 0
+ network 10.10.10.10/32 area 0
+!
+int PE1-eth1
+ ip ospf hello-interval 2
+ ip ospf dead-interval 10
+!
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/zebra.conf b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/zebra.conf
new file mode 100644
index 0000000000..e2699475c9
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE1/zebra.conf
@@ -0,0 +1,8 @@
+!
+log file zebra.log
+!
+interface lo
+ ip address 10.10.10.10/32
+interface PE1-eth1
+ ip address 10.20.1.1/24
+!
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/bgp.l2vpn.evpn.vni.json b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/bgp.l2vpn.evpn.vni.json
new file mode 100644
index 0000000000..63ac730144
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/bgp.l2vpn.evpn.vni.json
@@ -0,0 +1,19 @@
+{
+ "vni":101,
+ "type":"L2",
+ "inKernel":"True",
+ "rd":"10.30.30.30:101",
+ "originatorIp":"10.30.30.30",
+ "mcastGroup":"0.0.0.0",
+ "siteOfOrigin":"65000:0",
+ "advertiseGatewayMacip":"Disabled",
+ "advertiseSviMacIp":"Active",
+ "sviInterface":"br101",
+ "importRts":[
+ "65000:101"
+ ],
+ "exportRts":[
+ "65000:101"
+ ]
+}
+
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/bgpd.conf b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/bgpd.conf
new file mode 100644
index 0000000000..9a0830d8a3
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/bgpd.conf
@@ -0,0 +1,18 @@
+router bgp 65000
+ timers bgp 3 9
+ bgp router-id 10.30.30.30
+ no bgp default ipv4-unicast
+ neighbor 10.10.10.10 remote-as 65000
+ neighbor 10.10.10.10 update-source lo
+ neighbor 10.10.10.10 timers 3 10
+ !
+ address-family l2vpn evpn
+ neighbor 10.10.10.10 activate
+ advertise-all-vni
+ advertise-svi-ip
+ vni 101
+ rd 10.30.30.30:101
+ route-target import 65000:101
+ route-target export 65000:101
+ exit-vni
+ advertise-svi-ip
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/evpn.vni.json b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/evpn.vni.json
new file mode 100644
index 0000000000..5566fff954
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/evpn.vni.json
@@ -0,0 +1,16 @@
+{
+ "vni":101,
+ "type":"L2",
+ "tenantVrf":"VRF-A",
+ "vxlanInterface":"vxlan101",
+ "vtepIp":"10.30.30.30",
+ "mcastGroup":"0.0.0.0",
+ "advertiseGatewayMacip":"No",
+ "numRemoteVteps":1,
+ "remoteVteps":[
+ {
+ "ip":"10.10.10.10",
+ "flood":"HER"
+ }
+ ]
+}
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/ospfd.conf b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/ospfd.conf
new file mode 100644
index 0000000000..065c993303
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/ospfd.conf
@@ -0,0 +1,9 @@
+!
+router ospf
+ network 10.20.0.0/16 area 0
+ network 10.30.30.30/32 area 0
+!
+int PE2-eth0
+ ip ospf hello-interval 2
+ ip ospf dead-interval 10
+!
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/zebra.conf b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/zebra.conf
new file mode 100644
index 0000000000..9738916ab0
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/PE2/zebra.conf
@@ -0,0 +1,6 @@
+!
+interface lo
+ ip address 10.30.30.30/32
+interface PE2-eth0
+ ip address 10.20.2.3/24
+!
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host1/bgpd.conf b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host1/bgpd.conf
new file mode 100644
index 0000000000..cdf4cb4feb
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host1/bgpd.conf
@@ -0,0 +1 @@
+!
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host1/ospfd.conf b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host1/ospfd.conf
new file mode 100644
index 0000000000..cdf4cb4feb
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host1/ospfd.conf
@@ -0,0 +1 @@
+!
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host1/zebra.conf b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host1/zebra.conf
new file mode 100644
index 0000000000..91fae9eeba
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host1/zebra.conf
@@ -0,0 +1,3 @@
+!
+int host1-eth0
+ ip address 10.10.1.55/24
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host2/bgpd.conf b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host2/bgpd.conf
new file mode 100644
index 0000000000..cdf4cb4feb
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host2/bgpd.conf
@@ -0,0 +1 @@
+!
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host2/ospfd.conf b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host2/ospfd.conf
new file mode 100644
index 0000000000..cdf4cb4feb
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host2/ospfd.conf
@@ -0,0 +1 @@
+!
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host2/zebra.conf b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host2/zebra.conf
new file mode 100644
index 0000000000..df9adeb3b5
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/host2/zebra.conf
@@ -0,0 +1,3 @@
+!
+interface host2-eth0
+ ip address 10.10.1.56/24
diff --git a/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/test_bgp_evpn_vxlan_macvrf_soo.py b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/test_bgp_evpn_vxlan_macvrf_soo.py
new file mode 100755
index 0000000000..558f7379e9
--- /dev/null
+++ b/tests/topotests/bgp_evpn_vxlan_macvrf_soo_topo1/test_bgp_evpn_vxlan_macvrf_soo.py
@@ -0,0 +1,839 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# test_bgp_evpn_vxlan_macvrf_soo.py
+#
+# May 10 2023, Trey Aspelund <taspelund@nvidia.com>
+#
+# Copyright (C) 2023 NVIDIA Corporation
+#
+# Test MAC-VRF Site-of-Origin feature.
+# Ensure:
+# - routes received with SoO are installed w/o "mac-vrf soo" config
+# - invalid "mac-vrf soo" config is rejected
+# - valid "mac-vrf soo" config is applied to local VNIs
+# - valid "mac-vrf soo" is set for locally originated type-2/3 routes
+# - routes received with SoO are unimported/uninstalled from L2VNI/zebra
+# - routes received with SoO are unimported/uninstalled from L3VNI/RIB
+# - routes received with SoO are still present in global EVPN loc-rib
+#
+
+import os
+import sys
+import json
+from functools import partial
+from time import sleep
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from lib.common_config import step
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.ospfd]
+
+
+def build_topo(tgen):
+ "Build function"
+
+ # Create routers
+ tgen.add_router("P1")
+ tgen.add_router("PE1")
+ tgen.add_router("PE2")
+ tgen.add_router("host1")
+ tgen.add_router("host2")
+
+ # Host1-PE1
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["host1"])
+ switch.add_link(tgen.gears["PE1"])
+
+ # PE1-P1
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["PE1"])
+ switch.add_link(tgen.gears["P1"])
+
+ # P1-PE2
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["P1"])
+ switch.add_link(tgen.gears["PE2"])
+
+ # PE2-host2
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["PE2"])
+ switch.add_link(tgen.gears["host2"])
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ pe1 = tgen.gears["PE1"]
+ pe2 = tgen.gears["PE2"]
+ p1 = tgen.gears["P1"]
+ host1 = tgen.gears["host1"]
+ host2 = tgen.gears["host2"]
+
+ # Setup PEs with:
+ # - vrf: VRF-A
+ # - l3vni 404: vxlan404 / br404
+ # - l2vni 101: vxlan101 / br101
+
+ ## Setup VRF
+ # pe1
+ pe1.run("ip link add VRF-A type vrf table 4000")
+ pe1.run("ip link set VRF-A up")
+ # pe2
+ pe2.run("ip link add VRF-A type vrf table 4000")
+ pe2.run("ip link set VRF-A up")
+
+ ## Setup L3VNI bridge/vxlan
+ # pe1
+ pe1.run("ip link add name br404 type bridge stp_state 0")
+ pe1.run("ip link set dev br404 addr aa:bb:cc:00:11:ff")
+ pe1.run("ip link set dev br404 master VRF-A addrgenmode none")
+ pe1.run("ip link set dev br404 up")
+ pe1.run(
+ "ip link add vxlan404 type vxlan id 404 dstport 4789 local 10.10.10.10 nolearning"
+ )
+ pe1.run("ip link set dev vxlan404 master br404 addrgenmode none")
+ pe1.run("ip link set dev vxlan404 type bridge_slave neigh_suppress on learning off")
+ pe1.run("ip link set dev vxlan404 up")
+ # pe2
+ pe2.run("ip link add name br404 type bridge stp_state 0")
+ pe2.run("ip link set dev br404 addr aa:bb:cc:00:22:ff")
+ pe2.run("ip link set dev br404 master VRF-A addrgenmode none")
+ pe2.run("ip link set dev br404 up")
+ pe2.run(
+ "ip link add vxlan404 type vxlan id 404 dstport 4789 local 10.30.30.30 nolearning"
+ )
+ pe2.run("ip link set dev vxlan404 master br404 addrgenmode none")
+ pe2.run("ip link set dev vxlan404 type bridge_slave neigh_suppress on learning off")
+ pe2.run("ip link set dev vxlan404 up")
+
+ ## Setup L2VNI bridge/vxlan + L2 PE/CE link
+ # pe1
+ pe1.run("ip link add name br101 type bridge stp_state 0")
+ pe1.run("ip addr add 10.10.1.1/24 dev br101")
+ pe1.run("ip link set dev br101 addr aa:bb:cc:00:11:aa")
+ pe1.run("ip link set dev br101 master VRF-A")
+ pe1.run("ip link set dev br101 up")
+ pe1.run(
+ "ip link add vxlan101 type vxlan id 101 dstport 4789 local 10.10.10.10 nolearning"
+ )
+ pe1.run("ip link set dev vxlan101 master br101")
+ pe1.run("ip link set dev vxlan101 type bridge_slave neigh_suppress on learning off")
+ pe1.run("ip link set dev vxlan101 up")
+ pe1.run("ip link set dev PE1-eth0 master br101")
+ pe1.run("ip link set dev PE1-eth0 up")
+ # pe2
+ pe2.run("ip link add name br101 type bridge stp_state 0")
+ pe2.run("ip addr add 10.10.1.3/24 dev br101")
+ pe2.run("ip link set dev br101 addr aa:bb:cc:00:22:ff")
+ pe2.run("ip link set dev br101 master VRF-A")
+ pe2.run("ip link set dev br101 up")
+ pe2.run(
+ "ip link add vxlan101 type vxlan id 101 dstport 4789 local 10.30.30.30 nolearning"
+ )
+ pe2.run("ip link set dev vxlan101 master br101")
+ pe2.run("ip link set dev vxlan101 type bridge_slave neigh_suppress on learning off")
+ pe2.run("ip link set dev vxlan101 up")
+ pe2.run("ip link set dev PE2-eth1 master br101")
+ pe2.run("ip link set dev PE2-eth1 up")
+
+ ## Enable IPv4 Routing
+ p1.run("sysctl -w net.ipv4.ip_forward=1")
+ pe1.run("sysctl -w net.ipv4.ip_forward=1")
+ pe2.run("sysctl -w net.ipv4.ip_forward=1")
+
+ ## tell hosts to send GARP upon IPv4 addr assignment
+ host1.run("sysctl -w net.ipv4.conf.host1-eth0.arp_announce=1")
+ host2.run("sysctl -w net.ipv4.conf.host2-eth0.arp_announce=1")
+
+ ## Load FRR config on all nodes and start topo
+ router_list = tgen.routers()
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def show_vni_json_elide_ifindex(pe, vni, expected):
+ output_json = pe.vtysh_cmd("show evpn vni {} json".format(vni), isjson=True)
+ if "ifindex" in output_json:
+ output_json.pop("ifindex")
+
+ return topotest.json_cmp(output_json, expected)
+
+
+def check_vni_macs_present(tgen, router, vni, maclist):
+ result = router.vtysh_cmd("show evpn mac vni {} json".format(vni), isjson=True)
+ for rname, ifname in maclist:
+ m = tgen.net.macs[(rname, ifname)]
+ if m not in result["macs"]:
+ return "MAC ({}) for interface {} on {} missing on {} from {}".format(
+ m, ifname, rname, router.name, json.dumps(result, indent=4)
+ )
+ return None
+
+
+def test_pe1_converge_evpn():
+ "Wait for protocol convergence"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ pe1 = tgen.gears["PE1"]
+ json_file = "{}/{}/evpn.vni.json".format(CWD, pe1.name)
+ expected = json.loads(open(json_file).read())
+
+ test_func = partial(show_vni_json_elide_ifindex, pe1, 101, expected)
+ _, result = topotest.run_and_expect(test_func, None, count=45, wait=1)
+ assertmsg = '"{}" JSON output mismatches'.format(pe1.name)
+
+ # Let's ensure that the hosts have actually tried talking to
+ # each other. Otherwise under certain startup conditions
+ # they may not actually do any l2 arp'ing and as such
+ # the bridges won't know about the hosts on their networks
+ host1 = tgen.gears["host1"]
+ host1.run("ping -c 1 10.10.1.56")
+ host2 = tgen.gears["host2"]
+ host2.run("ping -c 1 10.10.1.55")
+
+ test_func = partial(
+ check_vni_macs_present,
+ tgen,
+ pe1,
+ 101,
+ (("host1", "host1-eth0"), ("host2", "host2-eth0")),
+ )
+
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ if result:
+ logger.warning("%s", result)
+ assert None, '"{}" missing expected MACs'.format(pe1.name)
+
+
+def test_pe2_converge_evpn():
+ "Wait for protocol convergence"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ pe2 = tgen.gears["PE2"]
+ json_file = "{}/{}/evpn.vni.json".format(CWD, pe2.name)
+ expected = json.loads(open(json_file).read())
+
+ test_func = partial(show_vni_json_elide_ifindex, pe2, 101, expected)
+ _, result = topotest.run_and_expect(test_func, None, count=45, wait=1)
+ assertmsg = '"{}" JSON output mismatches'.format(pe2.name)
+ assert result is None, assertmsg
+
+ test_func = partial(
+ check_vni_macs_present,
+ tgen,
+ pe2,
+ 101,
+ (("host1", "host1-eth0"), ("host2", "host2-eth0")),
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ if result:
+ logger.warning("%s", result)
+ assert None, '"{}" missing expected MACs'.format(pe2.name)
+
+
+def mac_learn_test(host, local):
+ "check the host MAC gets learned by the VNI"
+
+ host_output = host.vtysh_cmd("show interface {}-eth0".format(host.name))
+ int_lines = host_output.splitlines()
+ for line in int_lines:
+ line_items = line.split(": ")
+ if "HWaddr" in line_items[0]:
+ mac = line_items[1]
+ break
+
+ mac_output = local.vtysh_cmd("show evpn mac vni 101 mac {} json".format(mac))
+ mac_output_json = json.loads(mac_output)
+ assertmsg = "Local MAC output does not match interface mac {}".format(mac)
+ assert mac_output_json[mac]["type"] == "local", assertmsg
+
+
+def mac_test_local_remote(local, remote):
+ "test MAC transfer between local and remote"
+
+ local_output = local.vtysh_cmd("show evpn mac vni all json")
+ remote_output = remote.vtysh_cmd("show evpn mac vni all json")
+ local_output_vni = local.vtysh_cmd("show evpn vni detail json")
+ local_output_json = json.loads(local_output)
+ remote_output_json = json.loads(remote_output)
+ local_output_vni_json = json.loads(local_output_vni)
+
+ for vni in local_output_json:
+ mac_list = local_output_json[vni]["macs"]
+ for mac in mac_list:
+ if mac_list[mac]["type"] == "local" and mac_list[mac]["intf"] != "br101":
+ assertmsg = "JSON output mismatches local: {} remote: {}".format(
+ local_output_vni_json[0]["vtepIp"],
+ remote_output_json[vni]["macs"][mac]["remoteVtep"],
+ )
+ assert (
+ remote_output_json[vni]["macs"][mac]["remoteVtep"]
+ == local_output_vni_json[0]["vtepIp"]
+ ), assertmsg
+
+
+def test_learning_pe1():
+ "test MAC learning on PE1"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ host1 = tgen.gears["host1"]
+ pe1 = tgen.gears["PE1"]
+ mac_learn_test(host1, pe1)
+
+
+def test_learning_pe2():
+ "test MAC learning on PE2"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ host2 = tgen.gears["host2"]
+ pe2 = tgen.gears["PE2"]
+ mac_learn_test(host2, pe2)
+
+
+def test_local_remote_mac_pe1():
+ "Test MAC transfer PE1 local and PE2 remote"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ pe1 = tgen.gears["PE1"]
+ pe2 = tgen.gears["PE2"]
+ mac_test_local_remote(pe1, pe2)
+
+
+def test_local_remote_mac_pe2():
+ "Test MAC transfer PE2 local and PE1 remote"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ pe1 = tgen.gears["PE1"]
+ pe2 = tgen.gears["PE2"]
+ mac_test_local_remote(pe2, pe1)
+
+
+def ip_learn_test(tgen, host, local, remote, ip_addr):
+ "check the host IP gets learned by the VNI"
+ host_output = host.vtysh_cmd("show interface {}-eth0".format(host.name))
+ int_lines = host_output.splitlines()
+ for line in int_lines:
+ line_items = line.split(": ")
+ if "HWaddr" in line_items[0]:
+ mac = line_items[1]
+ break
+ print(host_output)
+
+ # check we have a local association between the MAC and IP
+ local_output = local.vtysh_cmd("show evpn mac vni 101 mac {} json".format(mac))
+ print(local_output)
+ local_output_json = json.loads(local_output)
+ mac_type = local_output_json[mac]["type"]
+ assertmsg = "Failed to learn local IP address on host {}".format(host.name)
+ assert local_output_json[mac]["neighbors"] != "none", assertmsg
+ learned_ip = local_output_json[mac]["neighbors"]["active"][0]
+
+ assertmsg = "local learned mac wrong type: {} ".format(mac_type)
+ assert mac_type == "local", assertmsg
+
+ assertmsg = (
+ "learned address mismatch with configured address host: {} learned: {}".format(
+ ip_addr, learned_ip
+ )
+ )
+ assert ip_addr == learned_ip, assertmsg
+
+ # now lets check the remote
+ count = 0
+ converged = False
+ while count < 30:
+ remote_output = remote.vtysh_cmd(
+ "show evpn mac vni 101 mac {} json".format(mac)
+ )
+ print(remote_output)
+ remote_output_json = json.loads(remote_output)
+ type = remote_output_json[mac]["type"]
+ if not remote_output_json[mac]["neighbors"] == "none":
+ # due to a kernel quirk, learned IPs can be inactive
+ if (
+ remote_output_json[mac]["neighbors"]["active"]
+ or remote_output_json[mac]["neighbors"]["inactive"]
+ ):
+ converged = True
+ break
+ count += 1
+ sleep(1)
+
+ print("tries: {}".format(count))
+ assertmsg = "{} remote learned mac no address: {} ".format(host.name, mac)
+ # some debug for this failure
+ if not converged == True:
+ log_output = remote.run("cat zebra.log")
+ print(log_output)
+
+ assert converged == True, assertmsg
+ if remote_output_json[mac]["neighbors"]["active"]:
+ learned_ip = remote_output_json[mac]["neighbors"]["active"][0]
+ else:
+ learned_ip = remote_output_json[mac]["neighbors"]["inactive"][0]
+ assertmsg = "remote learned mac wrong type: {} ".format(type)
+ assert type == "remote", assertmsg
+
+ assertmsg = "remote learned address mismatch with configured address host: {} learned: {}".format(
+ ip_addr, learned_ip
+ )
+ assert ip_addr == learned_ip, assertmsg
+
+
+def test_ip_pe1_learn():
+ "run the IP learn test for PE1"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ host1 = tgen.gears["host1"]
+ pe1 = tgen.gears["PE1"]
+ pe2 = tgen.gears["PE2"]
+ # pe2.vtysh_cmd("debug zebra vxlan")
+ # pe2.vtysh_cmd("debug zebra kernel")
+ # lets populate that arp cache
+ host1.run("ping -c1 10.10.1.1")
+ ip_learn_test(tgen, host1, pe1, pe2, "10.10.1.55")
+ # tgen.mininet_cli()
+
+
+def test_ip_pe2_learn():
+ "run the IP learn test for PE2"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ host2 = tgen.gears["host2"]
+ pe1 = tgen.gears["PE1"]
+ pe2 = tgen.gears["PE2"]
+ # pe1.vtysh_cmd("debug zebra vxlan")
+ # pe1.vtysh_cmd("debug zebra kernel")
+ # lets populate that arp cache
+ host2.run("ping -c1 10.10.1.3")
+ ip_learn_test(tgen, host2, pe2, pe1, "10.10.1.56")
+ # tgen.mininet_cli()
+
+
+def is_installed(json_paths, soo):
+ """
+ check if any path has been selected as best.
+ optionally check for matching SoO on bestpath.
+ """
+ best = False
+ soo_present = False
+ for path in json_paths:
+ path = path[0]
+ # sometimes "bestpath" is a bool, other times it's a dict
+ # either way, the key isn't present when the bool is false...
+ # so we may as well just check for the key's existence
+ best = "bestpath" in path
+ path_keys = path.keys()
+ if best:
+ if soo:
+ soo_present = soo in path["extendedCommunity"]["string"]
+ break
+ return (best and soo_present) if soo else best
+
+
+def change_soo(pe, soo, vni):
+ soo_cmd_str = "mac-vrf soo "
+ if soo:
+ soo_cmd_str += soo
+ else:
+ soo_cmd_str = "no " + soo_cmd_str
+ pe.vtysh_cmd(
+ """
+ configure terminal
+ router bgp 65000
+ address-family l2vpn evpn
+ {}
+ """.format(
+ soo_cmd_str
+ )
+ )
+ bgp_l2vni = get_bgp_l2vni_fields(pe, vni)
+ l2vni_soo = bgp_l2vni[2]
+ return l2vni_soo == soo
+
+
+def get_evpn_rt_json_str(vni, rd, oip=None, mac=None, ip=None):
+ "convert evpn route fields into a route string + global/l2vni cli syntax"
+ # type-3
+ if oip:
+ rt_str = "[3]:[0]:[32]:[{}]".format(oip)
+ global_rt_cmd = "show bgp l2vpn evpn route rd {} type 3 json".format(rd)
+ l2vni_rt_cmd = "show bgp vni {} type 3 vtep {} json".format(vni, oip)
+ # type-2
+ else:
+ rt_str = "[2]:[0]:[48]:[{}]".format(mac)
+ global_rt_cmd = "show bgp l2vpn evpn route rd {} type 2".format(rd)
+ l2vni_rt_cmd = "show bgp vni {} type 2 mac {}".format(vni, mac)
+ if ip:
+ ip_len = 128 if ":" in ip else 32
+ rt_str += ":[{}]:[{}]".format(ip_len, ip)
+ l2vni_rt_cmd = "show bgp vni {} type 2 ip {}".format(vni, ip)
+ global_rt_cmd += " json"
+ l2vni_rt_cmd += " json"
+ return [rt_str, global_rt_cmd, l2vni_rt_cmd]
+
+
+def get_evpn_rt_json(pe, vni, rd, oip=None, mac=None, ip=None):
+ "get json global/l2vni json blobs for the corresponding evpn route"
+ rt = get_evpn_rt_json_str(vni, rd, oip, mac, ip)
+ rt_str = rt.pop(0)
+ global_rt_cmd = rt.pop(0)
+ l2vni_rt_cmd = rt.pop(0)
+ logger.info(
+ "collecting global/l2vni evpn routes for pfx {} on {}".format(rt_str, pe.name)
+ )
+ global_rt_json = pe.vtysh_cmd(global_rt_cmd, isjson=True)
+ logger.info("global evpn route for pfx {} on {}".format(rt_str, pe.name))
+ logger.info(global_rt_json)
+ l2vni_rt_json = pe.vtysh_cmd(l2vni_rt_cmd, isjson=True)
+ logger.info("l2vni evpn route for pfx {} on {}".format(rt_str, pe.name))
+ logger.info(l2vni_rt_json)
+ return [rt_str, global_rt_json, l2vni_rt_json]
+
+
+def get_bgp_l2vni_fields(pe, vni):
+ bgp_vni_output = pe.vtysh_cmd(
+ "show bgp l2vpn evpn vni {} json".format(vni), isjson=True
+ )
+ rd = bgp_vni_output["rd"]
+ oip = bgp_vni_output["originatorIp"]
+ soo = bgp_vni_output["siteOfOrigin"]
+ return [rd, oip, soo]
+
+
+def rt_test(pe, vni, rd, oip, mac, ip, soo):
+ """
+ Check installation status of a given route.
+ @pe = router where bgp routes are collected from
+ @vni = l2vni
+ @rd = rd of the route
+ @oip = originator-ip, set only for type-3 route
+ @mac = nlri mac, set only for type-2
+ @ip = nlri ip, optionally set for type-2
+ @soo = MAC-VRF SoO string, set if SoO needs to be
+ on the rt to be considered installed.
+ """
+ rt = get_evpn_rt_json(pe, vni, rd, oip, mac, ip)
+ rt_str = rt.pop(0)
+ rt_global_json = rt.pop(0)
+ rt_l2vni_json = rt.pop(0)
+
+ if (
+ not rt_global_json
+ or rd not in rt_global_json
+ or rt_str not in rt_global_json[rd]
+ ):
+ global_installed = False
+ else:
+ global_json_paths = rt_global_json[rd][rt_str]["paths"]
+ global_installed = is_installed(global_json_paths, soo)
+ if not rt_l2vni_json:
+ l2vni_installed = False
+ else:
+ if not oip:
+ # json for RT2s in l2vni don't key by route string
+ l2vni_json_paths = rt_l2vni_json["paths"]
+ l2vni_installed = is_installed(l2vni_json_paths, soo)
+ elif rt_str in rt_l2vni_json and "paths" in rt_l2vni_json[rt_str]:
+ l2vni_json_paths = rt_l2vni_json[rt_str]["paths"]
+ l2vni_installed = is_installed(l2vni_json_paths, soo)
+ else:
+ l2vni_installed = False
+ return [global_installed, l2vni_installed]
+
+
+def test_macvrf_soo():
+ "Test MAC-VRF Site-of-Origin on pe1"
+ l2vni = 101
+ l3vni = 404
+ soo = "65000:0"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ host1 = tgen.gears["host1"]
+ host2 = tgen.gears["host2"]
+ pe1 = tgen.gears["PE1"]
+ pe2 = tgen.gears["PE2"]
+
+ # Collect pe2 RD/Originator-IP
+ pe2_bgp_vni = get_bgp_l2vni_fields(pe2, l2vni)
+ pe2_rd = pe2_bgp_vni[0]
+ pe2_oip = pe2_bgp_vni[1]
+ # Collect local addrs
+ h2_mac = host2.run("ip -br link show host2-eth0").split()[2]
+ h2_ip = host2.run("ip -4 -br addr show host2-eth0").split()[2].split("/")[0]
+ pe2_mac = pe2.run("ip -br link show br101").split()[2]
+ pe2_ip = pe2.run("ip -4 -br addr show br101").split()[2].split("/")[0]
+ # Route fields
+ pe2_svi_parms = [l2vni, pe2_rd, None, pe2_mac, pe2_ip]
+ pe2_imet_parms = [l2vni, pe2_rd, pe2_oip, None, None]
+ host2_mac_parms = [l2vni, pe2_rd, None, h2_mac, None]
+ host2_neigh_parms = [l2vni, pe2_rd, None, h2_mac, h2_ip]
+ # Route strings
+ pe2_svi_rt_str, _, _ = get_evpn_rt_json_str(*pe2_svi_parms)
+ pe2_imet_rt_str, _, _ = get_evpn_rt_json_str(*pe2_imet_parms)
+ host2_mac_rt_str, _, _ = get_evpn_rt_json_str(*host2_mac_parms)
+ host2_neigh_rt_str, _, _ = get_evpn_rt_json_str(*host2_neigh_parms)
+
+ ## trigger mac/arp learn
+ host1.run("ping -c1 10.10.1.1")
+ host2.run("ping -c1 10.10.1.3")
+
+ step("Test pe2/host2 routes are installed on pe1 (global/l2vni)")
+
+ # expected state:
+ # - global table: present w/o soo
+ # - l2vni table: present w/o soo
+ assertmsg = "{} missing on {} in {}{} evpn table(s)"
+ global_parms = [pe2.name, "global", ""]
+ l2vni_parms = [pe2.name, "l2vni", l2vni]
+ # pe2's type-2 for l2vni 101 svi mac/ip
+ test_f = partial(rt_test, pe2, *pe2_svi_parms, None)
+ _, res = topotest.run_and_expect(test_f, [True, True], count=30, wait=1)
+ assert res[0] == True, assertmsg.format(pe2_svi_rt_str, *global_parms)
+ assert res[1] == True, assertmsg.format(pe2_svi_rt_str, *l2vni_parms)
+ # pe2's type-3 for l2vni 101
+ test_f = partial(rt_test, pe2, *pe2_imet_parms, None)
+ _, res = topotest.run_and_expect(test_f, [True, True], count=30, wait=1)
+ assert res[0] == True, assertmsg.format(pe2_imet_rt_str, *global_parms)
+ assert res[1] == True, assertmsg.format(pe2_imet_rt_str, *l2vni_parms)
+ # mac-only type-2 for host2
+ test_f = partial(rt_test, pe1, *host2_mac_parms, None)
+ _, res = topotest.run_and_expect(test_f, [True, True], count=30, wait=1)
+ assert res[0] == True, assertmsg.format(host2_mac_rt_str, *global_parms)
+ assert res[1] == True, assertmsg.format(host2_mac_rt_str, *l2vni_parms)
+ # mac+ip type-2 for host2
+ test_f = partial(rt_test, pe1, *host2_neigh_parms, None)
+ _, res = topotest.run_and_expect(test_f, [True, True], count=30, wait=1)
+ assert res[0] == True, assertmsg.format(host2_neigh_rt_str, *global_parms)
+ assert res[1] == True, assertmsg.format(host2_neigh_rt_str, *l2vni_parms)
+
+ step("Add valid SoO config to pe2")
+ test_f = partial(change_soo, pe2, soo, l2vni)
+ _, res = topotest.run_and_expect(test_f, True, count=10, wait=1)
+ assertmsg = "soo '{}' not properly applied on {}".format(soo, pe2.name)
+ assert res == True, assertmsg
+
+ step("Test valid config applied to L2VNI on pe2")
+ ## expected state:
+ ## - global table: present w/ soo
+ ## - l2vni table: present w/ soo
+ assertmsg = "{} not originated with soo {} by {} in {}{} evpn table(s)"
+ global_parms = [soo, pe2.name, "global", ""]
+ l2vni_parms = [soo, pe2.name, "l2vni", l2vni]
+ # type-2 for l2vni 101 svi mac/ip
+ test_f = partial(rt_test, pe2, *pe2_svi_parms, soo)
+ _, res = topotest.run_and_expect(test_f, [True, True], count=30, wait=1)
+ assert res[0] == True, assertmsg.format(pe2_svi_rt_str, *global_parms)
+ assert res[1] == True, assertmsg.format(pe2_svi_rt_str, *l2vni_parms)
+ # type-3 for l2vni 101
+ test_f = partial(rt_test, pe2, *pe2_imet_parms, soo)
+ _, res = topotest.run_and_expect(test_f, [True, True], count=30, wait=1)
+ assert res[0] == True, assertmsg.format(pe2_imet_rt_str, *global_parms)
+ assert res[1] == True, assertmsg.format(pe2_imet_rt_str, *l2vni_parms)
+
+ step("Test invalid SoO config on pe2")
+ test_f = partial(change_soo, pe2, "1:1:1", l2vni)
+ _, res = topotest.run_and_expect(test_f, False, count=10, wait=1)
+ assertmsg = "soo '1:1:1' should not have been allowed on {}".format(pe2.name)
+ assert res == False, assertmsg
+
+ step("Test valid SoO applied to host2 routes (mac-only + mac/ip) on pe2")
+
+ ## expected state:
+ ## - global table: present w/ soo
+ ## - l2vni table: present w/ soo
+ assertmsg = "{} not originated with soo {} by {} in {}{} evpn table(s)"
+ global_parms = [soo, pe1.name, "global", ""]
+ l2vni_parms = [soo, pe1.name, "l2vni", l2vni]
+ # mac-only type-2 for host2
+ test_f = partial(rt_test, pe2, *host2_mac_parms, soo)
+ _, res = topotest.run_and_expect(test_f, [True, True], count=30, wait=1)
+ assert res[0] == True, assertmsg.format(host2_mac_rt_str, *global_parms)
+ assert res[1] == True, assertmsg.format(host2_mac_rt_str, *l2vni_parms)
+ # mac+ip type-2 for host2
+ test_f = partial(rt_test, pe2, *host2_neigh_parms, soo)
+ _, res = topotest.run_and_expect(test_f, [True, True], count=30, wait=1)
+ assert res[0] == True, assertmsg.format(host2_neigh_rt_str, *global_parms)
+ assert res[1] == True, assertmsg.format(host2_neigh_rt_str, *l2vni_parms)
+
+ step("Add valid SoO to pe1")
+ test_f = partial(change_soo, pe1, soo, l2vni)
+ _, res = topotest.run_and_expect(test_f, True, count=10, wait=1)
+ assertmsg = "soo '{}' not properly applied on {}".format(soo, pe1.name)
+ assert res == True, assertmsg
+
+ step("Test pe2's routes are filtered from l2vni on pe1.")
+ ## expected state:
+ ## - global table: present w/ soo
+ ## - l2vni table: not present
+ global_assertmsg = "{} with soo {} from {} missing from global evpn table"
+ l2vni_assertmsg = "{} with soo {} from {} not filtered from {}{} evpn table"
+ global_parms = [soo, pe1.name, "global", ""]
+ l2vni_parms = [soo, pe1.name, "l2vni", l2vni]
+ # pe2's svi route
+ test_f = partial(rt_test, pe1, *pe2_svi_parms, soo)
+ _, res = topotest.run_and_expect(test_f, [True, False], count=30, wait=1)
+ assert res[0] == True, global_assertmsg.format(pe2_svi_rt_str, *global_parms)
+ assert res[1] == False, l2vni_assertmsg.format(pe2_svi_rt_str, *l2vni_parms)
+ # pe2's imet route
+ test_f = partial(rt_test, pe1, *pe2_imet_parms, soo)
+ _, res = topotest.run_and_expect(test_f, [True, False], count=30, wait=1)
+ assert res[0] == True, global_assertmsg.format(pe2_imet_rt_str, *global_parms)
+ assert res[1] == False, l2vni_assertmsg.format(pe2_imet_rt_str, *l2vni_parms)
+ # mac-only type-2 for host2
+ test_f = partial(rt_test, pe1, *host2_mac_parms, soo)
+ _, res = topotest.run_and_expect(test_f, [True, False], count=30, wait=1)
+ assert res[0] == True, global_assertmsg.format(host2_mac_rt_str, *global_parms)
+ assert res[1] == False, l2vni_assertmsg.format(host2_mac_rt_str, *l2vni_parms)
+ # mac+ip type-2 for host2
+ test_f = partial(rt_test, pe1, *host2_neigh_parms, soo)
+ _, res = topotest.run_and_expect(test_f, [True, False], count=30, wait=1)
+ assert res[0] == True, global_assertmsg.format(host2_neigh_rt_str, *global_parms)
+ assert res[1] == False, l2vni_assertmsg.format(host2_neigh_rt_str, *l2vni_parms)
+
+ step("Remove SoO from pe1")
+ test_f = partial(change_soo, pe1, "", l2vni)
+ _, res = topotest.run_and_expect(test_f, True, count=10, wait=1)
+ assertmsg = "soo '{}' not properly removed from {}".format(soo, pe1.name)
+ assert res == True, assertmsg
+
+ step("Test pe2/host2 routes are installed on pe1 (global/l2vni)")
+ ## expected state:
+ ## - global table: present w/ soo
+ ## - l2vni table: present w/ soo
+ assertmsg = "{} with soo {} missing on {} in {}{} evpn table"
+ global_parms = [soo, pe1.name, "global", ""]
+ l2vni_parms = [soo, pe1.name, "l2vni", l2vni]
+ # pe2's type-2 for l2vni 101 svi mac/ip
+ test_f = partial(rt_test, pe1, *pe2_svi_parms, soo)
+ _, res = topotest.run_and_expect(test_f, [True, True], count=30, wait=1)
+ assert res[0] == True, assertmsg.format(pe2_svi_rt_str, *global_parms)
+ assert res[1] == True, assertmsg.format(pe2_svi_rt_str, *l2vni_parms)
+ # pe2's type-3 for l2vni 101
+ test_f = partial(rt_test, pe1, *pe2_imet_parms, soo)
+ _, res = topotest.run_and_expect(test_f, [True, True], count=30, wait=1)
+ assert res[0] == True, assertmsg.format(pe2_imet_rt_str, *global_parms)
+ assert res[1] == True, assertmsg.format(pe2_imet_rt_str, *l2vni_parms)
+ # mac-only type-2 for host2
+ test_f = partial(rt_test, pe1, *host2_mac_parms, soo)
+ _, res = topotest.run_and_expect(test_f, [True, True], count=30, wait=1)
+ assert res[0] == True, assertmsg.format(host2_mac_rt_str, *global_parms)
+ assert res[1] == True, assertmsg.format(host2_mac_rt_str, *l2vni_parms)
+ # mac+ip type-2 for host2
+ test_f = partial(rt_test, pe1, *host2_neigh_parms, soo)
+ _, res = topotest.run_and_expect(test_f, [True, True], count=30, wait=1)
+ assert res[0] == True, assertmsg.format(host2_neigh_rt_str, *global_parms)
+ assert res[1] == True, assertmsg.format(host2_neigh_rt_str, *l2vni_parms)
+
+ step("Remove SoO from pe2")
+ test_f = partial(change_soo, pe2, "", l2vni)
+ _, res = topotest.run_and_expect(test_f, True, count=10, wait=1)
+ assertmsg = "soo '{}' not properly removed from {}".format(soo, pe2.name)
+ assert res == True, assertmsg
+
+ step("Test pe2's 'self' routes are installed on pe1 (global/l2vni)")
+ ## expected state:
+ ## - global table: present w/o soo
+ ## - l2vni table: present w/o soo
+ assertmsg = "{} missing on {} in {}{} evpn table(s)"
+ global_parms = [pe1.name, "global", ""]
+ l2vni_parms = [pe1.name, "l2vni", l2vni]
+ # pe2's type-2 for l2vni 101 svi mac/ip
+ test_f = partial(rt_test, pe1, *pe2_svi_parms, None)
+ _, res = topotest.run_and_expect(test_f, [True, True], count=30, wait=1)
+ assert res[0] == True, assertmsg.format(pe2_svi_rt_str, *global_parms)
+ assert res[1] == True, assertmsg.format(pe2_svi_rt_str, *l2vni_parms)
+ # pe2's type-3 for l2vni 101
+ test_f = partial(rt_test, pe1, *pe2_imet_parms, None)
+ _, res = topotest.run_and_expect(test_f, [True, True], count=30, wait=1)
+ assert res[0] == True, assertmsg.format(pe2_imet_rt_str, *global_parms)
+ assert res[1] == True, assertmsg.format(pe2_imet_rt_str, *l2vni_parms)
+ # mac-only type-2 for host2
+ test_f = partial(rt_test, pe1, *host2_mac_parms, None)
+ _, res = topotest.run_and_expect(test_f, [True, True], count=30, wait=1)
+ assert res[0] == True, assertmsg.format(host2_mac_rt_str, *global_parms)
+ assert res[1] == True, assertmsg.format(host2_mac_rt_str, *l2vni_parms)
+ # mac+ip type-2 for host2
+ test_f = partial(rt_test, pe1, *host2_neigh_parms, None)
+ _, res = topotest.run_and_expect(test_f, [True, True], count=30, wait=1)
+ assert res[0] == True, assertmsg.format(host2_neigh_rt_str, *global_parms)
+ assert res[1] == True, assertmsg.format(host2_neigh_rt_str, *l2vni_parms)
+
+ # tgen.mininet_cli()
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_flowspec/test_bgp_flowspec_topo.py b/tests/topotests/bgp_flowspec/test_bgp_flowspec_topo.py
index fd675dc8ae..b1bec1034b 100644
--- a/tests/topotests/bgp_flowspec/test_bgp_flowspec_topo.py
+++ b/tests/topotests/bgp_flowspec/test_bgp_flowspec_topo.py
@@ -51,6 +51,7 @@ sys.path.append(os.path.join(CWD, "../"))
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
+from lib.common_config import generate_support_bundle
# Required to instantiate the topology builder class.
@@ -139,7 +140,9 @@ def test_bgp_convergence():
)
_, res = topotest.run_and_expect(test_func, None, count=90, wait=0.5)
assertmsg = "BGP router network did not converge"
- assert res is None, assertmsg
+ if res is not None:
+ generate_support_bundle()
+ assert res is None, assertmsg
def test_bgp_flowspec():
@@ -183,7 +186,6 @@ def test_bgp_flowspec():
if __name__ == "__main__":
-
args = ["-s"] + sys.argv[1:]
ret = pytest.main(args)
diff --git a/tests/topotests/bgp_gr_functionality_topo3/bgp_gr_functionality_topo3.py b/tests/topotests/bgp_gr_functionality_topo3/test_bgp_gr_functionality_topo3.py
index 593a8d6417..593a8d6417 100644
--- a/tests/topotests/bgp_gr_functionality_topo3/bgp_gr_functionality_topo3.py
+++ b/tests/topotests/bgp_gr_functionality_topo3/test_bgp_gr_functionality_topo3.py
diff --git a/tests/topotests/bgp_local_as_dotplus_private_remove/test_bgp_local_as_dotplus_private_remove.py b/tests/topotests/bgp_local_as_dotplus_private_remove/test_bgp_local_as_dotplus_private_remove.py
index efecad3eb2..930fd791b0 100644
--- a/tests/topotests/bgp_local_as_dotplus_private_remove/test_bgp_local_as_dotplus_private_remove.py
+++ b/tests/topotests/bgp_local_as_dotplus_private_remove/test_bgp_local_as_dotplus_private_remove.py
@@ -31,13 +31,14 @@ used together with `remove-private-AS`.
import os
import sys
import json
-import time
import pytest
+import functools
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
+from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
pytestmark = [pytest.mark.bgpd]
@@ -84,29 +85,43 @@ def test_bgp_remove_private_as():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- def _bgp_converge(router):
- while True:
- output = json.loads(
- tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")
- )
- if output["192.168.255.1"]["bgpState"] == "Established":
- time.sleep(1)
- return True
-
- def _bgp_as_path(router):
- output = json.loads(
- tgen.gears[router].vtysh_cmd("show ip bgp 172.16.255.254/32 json")
- )
- if output["prefix"] == "172.16.255.254/32":
- return output["paths"][0]["aspath"]["segments"][0]["list"]
-
- if _bgp_converge("r2"):
- assert len(_bgp_as_path("r2")) == 1
- assert '0.65000' not in _bgp_as_path("r2")
-
- if _bgp_converge("r4"):
- assert len(_bgp_as_path("r4")) == 2
- assert '0.3000' in _bgp_as_path("r4")
+ r2 = tgen.gears["r2"]
+ r4 = tgen.gears["r4"]
+
+ def _bgp_converge():
+ output = json.loads(r2.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
+ expected = {
+ "192.168.255.1": {
+ "bgpState": "Established",
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Can't converge initially"
+
+ def _bgp_as_path(router, asn_path, asn_length):
+ output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.254/32 json"))
+ expected = {
+ "paths": [
+ {
+ "aspath": {
+ "string": asn_path,
+ "length": asn_length,
+ }
+ }
+ ]
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_as_path, r2, "0.500", 1)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Private ASNs not stripped"
+
+ test_func = functools.partial(_bgp_as_path, r4, "0.500 0.3000", 2)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Private ASNs not stripped"
if __name__ == "__main__":
diff --git a/tests/topotests/bgp_peer_group/r3/bgpd.conf b/tests/topotests/bgp_peer_group/r3/bgpd.conf
index eb2fca15fb..5a1340fb0b 100644
--- a/tests/topotests/bgp_peer_group/r3/bgpd.conf
+++ b/tests/topotests/bgp_peer_group/r3/bgpd.conf
@@ -1,7 +1,11 @@
!
router bgp 65003
- neighbor PG peer-group
- neighbor PG remote-as external
- neighbor PG timers 3 10
- neighbor 192.168.255.1 peer-group PG
+ no bgp ebgp-requires-policy
+ neighbor PG peer-group
+ neighbor PG remote-as external
+ neighbor PG timers 3 10
+ neighbor 192.168.255.1 peer-group PG
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
!
diff --git a/tests/topotests/bgp_peer_group/test_bgp_peer-group.py b/tests/topotests/bgp_peer_group/test_bgp_peer-group.py
index e8c3feb76f..a91fade049 100644
--- a/tests/topotests/bgp_peer_group/test_bgp_peer-group.py
+++ b/tests/topotests/bgp_peer_group/test_bgp_peer-group.py
@@ -74,9 +74,26 @@ def test_bgp_peer_group():
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_peer_group_configured)
- success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed bgp convergence in r1"
- assert result is None, 'Failed bgp convergence in "{}"'.format(tgen.gears["r1"])
+ def _bgp_peer_group_check_advertised_routes():
+ output = json.loads(
+ tgen.gears["r3"].vtysh_cmd("show ip bgp neighbor PG advertised-routes json")
+ )
+ expected = {
+ "advertisedRoutes": {
+ "192.168.255.0/24": {
+ "valid": True,
+ "best": True,
+ }
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_peer_group_check_advertised_routes)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed checking advertised routes from r3"
if __name__ == "__main__":
diff --git a/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py b/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py
index ef9200b197..fd8a78b485 100644
--- a/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py
+++ b/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py
@@ -94,7 +94,6 @@ def test_bgp_route():
expected,
)
_, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
- assertmsg = '"r3" JSON output mismatches'
assert result is None, assertmsg
json_file = "{}/r3/v4_route3.json".format(CWD)
@@ -103,10 +102,11 @@ def test_bgp_route():
test_func = partial(
topotest.router_json_cmp,
r3,
- "show ip route 10.0.0.3 json",
+ "show ip route 60.0.0.0 json",
expected,
)
_, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert result is None, assertmsg
def test_bgp_better_admin_won():
diff --git a/tests/topotests/bgp_vpnv4_asbr/__init__.py b/tests/topotests/bgp_vpnv4_asbr/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/__init__.py
diff --git a/tests/topotests/bgp_vpnv4_asbr/h1/zebra.conf b/tests/topotests/bgp_vpnv4_asbr/h1/zebra.conf
new file mode 100644
index 0000000000..22372242d3
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/h1/zebra.conf
@@ -0,0 +1,7 @@
+log stdout
+ip route 172.31.1.0/24 172.31.0.1
+ip route 172.31.2.0/24 172.31.0.1
+interface h1-eth0
+ ip address 172.31.0.10/24
+!
+
diff --git a/tests/topotests/bgp_vpnv4_asbr/h2/zebra.conf b/tests/topotests/bgp_vpnv4_asbr/h2/zebra.conf
new file mode 100644
index 0000000000..d650bc831a
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/h2/zebra.conf
@@ -0,0 +1,6 @@
+log stdout
+ip route 172.31.0.0/24 172.31.1.1
+interface h2-eth0
+ ip address 172.31.1.10/24
+!
+
diff --git a/tests/topotests/bgp_vpnv4_asbr/h3/zebra.conf b/tests/topotests/bgp_vpnv4_asbr/h3/zebra.conf
new file mode 100644
index 0000000000..5676485849
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/h3/zebra.conf
@@ -0,0 +1,6 @@
+log stdout
+ip route 172.31.0.0/24 172.31.2.1
+interface h3-eth0
+ ip address 172.31.2.10/24
+!
+
diff --git a/tests/topotests/bgp_vpnv4_asbr/r1/bgp_ipv4_routes.json b/tests/topotests/bgp_vpnv4_asbr/r1/bgp_ipv4_routes.json
new file mode 100644
index 0000000000..184ab312b6
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/r1/bgp_ipv4_routes.json
@@ -0,0 +1,49 @@
+{
+ "vrfName": "vrf1",
+ "localAS": 65500,
+ "routes":
+ {
+ "172.31.0.10/32": [
+ {
+ "prefix": "172.31.0.10",
+ "prefixLen": 32,
+ "network": "172.31.0.10\/32",
+ "nhVrfName": "default",
+ "nexthops": [
+ {
+ "ip": "192.168.0.3",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ },
+ {
+ "prefix": "172.31.0.10",
+ "prefixLen": 32,
+ "network": "172.31.0.10\/32",
+ "nhVrfName": "default",
+ "nexthops": [
+ {
+ "ip": "192.168.0.2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "172.31.0.1/32": [
+ {
+ "prefix": "172.31.0.1",
+ "prefixLen": 32,
+ "network": "172.31.0.1\/32",
+ "nexthops": [
+ {
+ "ip": "0.0.0.0",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+}
diff --git a/tests/topotests/bgp_vpnv4_asbr/r1/bgpd.conf b/tests/topotests/bgp_vpnv4_asbr/r1/bgpd.conf
new file mode 100644
index 0000000000..3bbcc20e9e
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/r1/bgpd.conf
@@ -0,0 +1,29 @@
+router bgp 65500
+ bgp router-id 192.0.2.1
+ no bgp ebgp-requires-policy
+ neighbor 192.0.2.100 remote-as 65500
+ neighbor 192.0.2.100 update-source lo
+ neighbor 192.168.0.100 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192.168.0.100 activate
+ no neighbor 192.0.2.100 activate
+ network 192.0.2.1/32
+ exit-address-family
+ address-family ipv4 labeled-unicast
+ neighbor 192.168.0.100 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 192.0.2.100 activate
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 192.0.2.1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 101
+ rd vpn export 444:1
+ rt vpn both 52:100
+ export vpn
+ import vpn
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_vpnv4_asbr/r1/zebra.conf b/tests/topotests/bgp_vpnv4_asbr/r1/zebra.conf
new file mode 100644
index 0000000000..2f12b722b8
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/r1/zebra.conf
@@ -0,0 +1,10 @@
+log stdout
+interface lo
+ ip address 192.0.2.1/32
+!
+interface r1-eth1 vrf vrf1
+ ip address 172.31.0.1/24
+!
+interface r1-eth0
+ ip address 192.168.0.1/24
+!
diff --git a/tests/topotests/bgp_vpnv4_asbr/r2/bgpd.conf b/tests/topotests/bgp_vpnv4_asbr/r2/bgpd.conf
new file mode 100644
index 0000000000..4c84d52bd9
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/r2/bgpd.conf
@@ -0,0 +1,31 @@
+debug bgp nht
+debug bgp zebra
+debug bgp labelpool
+router bgp 65500
+ bgp router-id 192.0.2.2
+ no bgp ebgp-requires-policy
+ neighbor 192.0.2.100 remote-as 65500
+ neighbor 192.0.2.100 update-source lo
+ neighbor 192.168.0.100 remote-as 65500
+ neighbor 192.168.1.200 remote-as 65502
+ address-family ipv4 unicast
+ no neighbor 192.168.0.100 activate
+ no neighbor 192.168.1.200 activate
+ network 192.0.2.2/32
+ exit-address-family
+ address-family ipv4 labeled-unicast
+ neighbor 192.168.0.100 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 192.0.2.100 activate
+ neighbor 192.0.2.100 next-hop-self
+ neighbor 192.168.1.200 activate
+ exit-address-family
+!
+interface r2-eth1
+ mpls bgp forwarding
+ mpls bgp l3vpn-multi-domain-switching
+!
+interface r2-eth0
+ mpls bgp l3vpn-multi-domain-switching
+!
diff --git a/tests/topotests/bgp_vpnv4_asbr/r2/ipv4_vpn_summary.json b/tests/topotests/bgp_vpnv4_asbr/r2/ipv4_vpn_summary.json
new file mode 100644
index 0000000000..d33c5f5691
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/r2/ipv4_vpn_summary.json
@@ -0,0 +1,24 @@
+{
+ "routerId":"192.0.2.2",
+ "as":65500,
+ "vrfId":0,
+ "vrfName":"default",
+ "peerCount":2,
+ "peers":{
+ "192.0.2.100":{
+ "remoteAs":65500,
+ "localAs":65500,
+ "version":4,
+ "state":"Established",
+ "peerState":"OK"
+ },
+ "192.168.1.200":{
+ "remoteAs":65502,
+ "localAs":65500,
+ "version":4,
+ "state":"Established",
+ "peerState":"OK"
+ }
+ },
+ "totalPeers":2
+}
diff --git a/tests/topotests/bgp_vpnv4_asbr/r2/zebra.conf b/tests/topotests/bgp_vpnv4_asbr/r2/zebra.conf
new file mode 100644
index 0000000000..43508a4c6a
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/r2/zebra.conf
@@ -0,0 +1,13 @@
+log stdout
+ip route 192.168.1.3/32 r2-eth1
+interface lo
+ ip address 192.0.2.2/32
+!
+interface r2-eth0
+ ip address 192.168.0.2/24
+ mpls enable
+!
+interface r2-eth1
+ ip address 192.168.1.2/24
+ mpls enable
+!
diff --git a/tests/topotests/bgp_vpnv4_asbr/r3/bgpd.conf b/tests/topotests/bgp_vpnv4_asbr/r3/bgpd.conf
new file mode 100644
index 0000000000..c5d5727fba
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/r3/bgpd.conf
@@ -0,0 +1,25 @@
+router bgp 65501
+ bgp router-id 192.0.2.3
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.200 remote-as 65502
+ address-family ipv4 unicast
+ no neighbor 192.168.1.200 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 192.168.1.200 activate
+ exit-address-family
+!
+router bgp 65501 vrf vrf1
+ bgp router-id 192.0.2.3
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 102
+ rd vpn export 444:3
+ rt vpn both 52:100
+ export vpn
+ import vpn
+ exit-address-family
+!
+interface r3-eth0
+ mpls bgp forwarding
+!
diff --git a/tests/topotests/bgp_vpnv4_asbr/r3/zebra.conf b/tests/topotests/bgp_vpnv4_asbr/r3/zebra.conf
new file mode 100644
index 0000000000..6376785f80
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/r3/zebra.conf
@@ -0,0 +1,14 @@
+log stdout
+ip route 192.168.1.3/32 r3-eth0
+interface r3-eth1 vrf vrf1
+ ip address 172.31.1.1/24
+!
+interface r3-eth2 vrf vrf1
+ ip address 172.31.2.1/24
+!
+interface r3-eth3 vrf vrf1
+ ip address 172.31.3.1/24
+!
+interface r3-eth0
+ ip address 192.168.1.3/24
+!
diff --git a/tests/topotests/bgp_vpnv4_asbr/rr100/bgpd.conf b/tests/topotests/bgp_vpnv4_asbr/rr100/bgpd.conf
new file mode 100644
index 0000000000..845d71bc7e
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/rr100/bgpd.conf
@@ -0,0 +1,29 @@
+router bgp 65500
+ bgp router-id 192.0.2.100
+ no bgp ebgp-requires-policy
+ neighbor 192.0.2.2 remote-as 65500
+ neighbor 192.0.2.2 update-source lo
+ neighbor 192.168.0.2 remote-as 65500
+ neighbor 192.0.2.1 remote-as 65500
+ neighbor 192.0.2.1 update-source lo
+ neighbor 192.168.0.1 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192.168.0.1 activate
+ no neighbor 192.0.2.1 activate
+ no neighbor 192.168.0.2 activate
+ no neighbor 192.0.2.2 activate
+ network 192.0.2.100/32
+ exit-address-family
+ address-family ipv4 labeled-unicast
+ neighbor 192.168.0.1 activate
+ neighbor 192.168.0.2 activate
+ neighbor 192.168.0.1 route-reflector-client
+ neighbor 192.168.0.2 route-reflector-client
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 192.0.2.1 activate
+ neighbor 192.0.2.2 activate
+ neighbor 192.0.2.1 route-reflector-client
+ neighbor 192.0.2.2 route-reflector-client
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_vpnv4_asbr/rr100/zebra.conf b/tests/topotests/bgp_vpnv4_asbr/rr100/zebra.conf
new file mode 100644
index 0000000000..2fa5285182
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/rr100/zebra.conf
@@ -0,0 +1,7 @@
+log stdout
+interface lo
+ ip address 192.0.2.100/32
+!
+interface rr100-eth0
+ ip address 192.168.0.100/24
+!
diff --git a/tests/topotests/bgp_vpnv4_asbr/rs200/bgpd.conf b/tests/topotests/bgp_vpnv4_asbr/rs200/bgpd.conf
new file mode 100644
index 0000000000..fa3cb54228
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/rs200/bgpd.conf
@@ -0,0 +1,19 @@
+debug bgp nht
+debug bgp zebra
+debug bgp labelpool
+router bgp 65502
+ bgp router-id 192.0.2.200
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.3 remote-as 65501
+ neighbor 192.168.1.2 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192.168.1.2 activate
+ no neighbor 192.168.1.3 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 192.168.1.3 activate
+ neighbor 192.168.1.2 activate
+ neighbor 192.168.1.3 route-server-client
+ neighbor 192.168.1.2 route-server-client
+ exit-address-family
+! \ No newline at end of file
diff --git a/tests/topotests/bgp_vpnv4_asbr/rs200/zebra.conf b/tests/topotests/bgp_vpnv4_asbr/rs200/zebra.conf
new file mode 100644
index 0000000000..98793ca003
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/rs200/zebra.conf
@@ -0,0 +1,4 @@
+log stdout
+interface rs200-eth0
+ ip address 192.168.1.200/24
+!
diff --git a/tests/topotests/bgp_vpnv4_asbr/test_bgp_vpnv4_asbr.py b/tests/topotests/bgp_vpnv4_asbr/test_bgp_vpnv4_asbr.py
new file mode 100644
index 0000000000..7b0dc1cff9
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_asbr/test_bgp_vpnv4_asbr.py
@@ -0,0 +1,912 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_bgp_vpnv4_asbr.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2023 by 6WIND
+#
+
+"""
+ test_bgp_vpnv4_asbr.py: Test the FRR BGP daemon with rfc4364 option 10b
+ r1, r2, and r100 are in an iBGP AS, while r2, r3 do an eBGP peering
+ h1 is a host behind r1 VRF1, and {h2,h3} are hosts behind r3 VRF1
+ The test demonstrates the connectivity across the network between h1 and h3.
+
+
+ +----------+ +----+--------+ +--------+ +--------+-----+
+ | |172.31.0.0|vrf | r1 |192.168.0.0/24| r2 |192.168.1.0/24|r3 | vrf |
+ | h1 +----------+ | 1+------+-------+ +------+-------+3 | +--- 172.31.3.0/24
+ | 10 | |VRF1|AS65500 | | | AS65500| | |AS65501 |VRF1 |
+ +----------+ +-------------+ | +--------+ | +--------+--+-++
+ 192.0.2.1 | 192.0.2.2 | 172| |
+ +----------+ +----+--------+ 31| |
+ |rr100 | |rs200/AS65502| 1| |
+ +----------+ +-------------+ 0| |
+ 192.0.2.100 +--------+ /24| |
+ | | +----------+----+ |
+ |h3 | | | |
+ |10 | | h2 | |
+ +---+----+ | 10 | |
+ | +----------+ |
+ |172.31.2.0/24 |
+ +--------------------------------+
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import pytest
+import functools
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ "Build function"
+
+ # Allocate 8 devices
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_router("r3")
+ tgen.add_router("h1")
+ tgen.add_router("h2")
+ tgen.add_router("h3")
+ tgen.add_router("rr100")
+ tgen.add_router("rs200")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["rr100"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["h1"])
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["rs200"])
+
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["h2"])
+
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["h3"])
+
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r3"])
+
+
+def _populate_iface():
+ tgen = get_topogen()
+ cmds_list = [
+ "ip link add vrf1 type vrf table 10",
+ "echo 100000 > /proc/sys/net/mpls/platform_labels",
+ "ip link set dev vrf1 up",
+ "ip link set dev {0}-eth1 master vrf1",
+ "echo 1 > /proc/sys/net/mpls/conf/{0}-eth0/input",
+ ]
+
+ for rname in ("r1", "r3"):
+ for cmd in cmds_list:
+ input = cmd.format(rname)
+ logger.info("input: " + cmd)
+ output = tgen.net[rname].cmd(cmd.format(rname))
+ logger.info("output: " + output)
+
+ cmds_list = [
+ "ip link set dev {0}-eth2 master vrf1",
+ "ip link set dev {0}-eth3 master vrf1",
+ ]
+ for cmd in cmds_list:
+ input = cmd.format("r3")
+ logger.info("input: " + input)
+ output = tgen.net["r3"].cmd(input)
+ logger.info("output: " + output)
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ _populate_iface()
+
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ if rname in ("r1", "r2", "r3", "rr100", "rs200"):
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ # Initialize all routers.
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ tgen.stop_topology()
+
+
+def bgp_vpnv4_prefix_check(router, rd, prefix, label, nexthop):
+ """
+ Dump and check 'show bgp ipv4 vpn <prefix> json' output. An assert is triggered in case test fails
+ * 'router': the router to check
+ * 'rd': The route distinguisher expected
+ * 'prefix': The prefix expected
+ * 'label': The label expected associated with the ('rd','prefix') tuple
+ * 'nexthop': The nexthop expected associated with the ('rd','prefix') tuple
+ """
+
+ def _check(router, prefix, rd, label, nexthop):
+ dump = router.vtysh_cmd("show bgp ipv4 vpn {} json".format(prefix), isjson=True)
+ if not dump:
+ return "{0}, {1}, route distinguisher {2} not present".format(
+ router.name, prefix, rd
+ )
+ for dumped_rd, pathes in dump.items():
+ if dumped_rd != rd:
+ continue
+ for path in pathes["paths"]:
+ if "remoteLabel" not in path.keys():
+ return "{0}, {1}, rd {2}, remoteLabel not present".format(
+ router.name, prefix, rd
+ )
+ if str(path["remoteLabel"]) != label:
+ continue
+
+ if "nexthops" not in path.keys():
+ return "{0}, {1}, rd {2}, no nexthops present".format(
+ router.name, prefix, rd
+ )
+
+ for nh in path["nexthops"]:
+ if "ip" not in nh.keys():
+ return "{0}, {1}, rd {2}, no ipv4 nexthop available".format(
+ router.name, prefix, rd
+ )
+ if nh["ip"] != nexthop:
+ continue
+ return None
+ return "{0}, {1}, rd {2}, remoteLabel {3}, nexthop {4} not found".format(
+ router.name, prefix, rd, label, nexthop
+ )
+
+ func = functools.partial(_check, router, prefix, rd, label, nexthop)
+ success, result = topotest.run_and_expect(func, None, count=20, wait=0.5)
+ assert_msg = "{}, show bgp ipv4 vpn {}, rd {}, label {} nexthop {}".format(
+ router.name, prefix, rd, label, nexthop
+ )
+ assert result is None, assert_msg + " not found"
+ logger.info(assert_msg + " found")
+
+
+def mpls_table_check_entry(router, out_label, out_nexthop):
+ """
+ Dump and check 'show mpls table json' output. An assert is triggered in case test fails
+ * 'router': the router to check
+ * 'out_label': The outgoing label expected
+ * 'out_nexthop': The outgoing nexthop expected
+ """
+ logger.info("Checking MPLS labels on {}".format(router.name))
+ dump = router.vtysh_cmd("show mpls table json", isjson=True)
+ for in_label, label_info in dump.items():
+ for nh in label_info["nexthops"]:
+ if nh["type"] != "BGP" or "installed" not in nh.keys():
+ continue
+ if "nexthop" in nh.keys():
+ if nh["nexthop"] != out_nexthop:
+ continue
+ if "outLabelStack" in nh.keys():
+ if out_label not in nh["outLabelStack"]:
+ continue
+ logger.info(
+ "{}, show mpls table, entry in_label {} out_label {} out_nexthop {} found".format(
+ router.name, in_label, nh["outLabelStack"], nh["nexthop"]
+ )
+ )
+ return in_label
+ assert (
+ 0
+ ), "{}, show mpls table, entry matching in_label {} out_label {} out_nexthop {} not found".format(
+ router.name, in_label, out_label, out_nexthop
+ )
+ return None
+
+
+def check_ping(name, dest_addr, expect_connected):
+ """
+ Assert that ping to dest_addr is expected
+ * 'name': the router to set the ping from
+ * 'dest_addr': The destination ip address to ping
+ * 'expect_connected': True if ping is expected to pass
+ """
+
+ def _check(name, dest_addr, match):
+ tgen = get_topogen()
+ output = tgen.gears[name].run("ping {} -c 1 -w 1".format(dest_addr))
+ logger.info(output)
+ assert match in output, "ping fail"
+
+ match = ", {} packet loss".format("0%" if expect_connected else "100%")
+ logger.info("[+] check {} {} {}".format(name, dest_addr, match))
+ tgen = get_topogen()
+ func = functools.partial(_check, name, dest_addr, match)
+ success, result = topotest.run_and_expect(func, None, count=20, wait=0.5)
+ assert result is None, "Failed"
+
+
+def check_show_bgp_vpn_prefix_found(
+ router, ipversion, prefix, rd, label=None, nexthop=None
+):
+ """
+ Check if a given vpn prefix is present in the BGP RIB
+ * 'router': the router to check BGP VPN RIB
+ * 'ipversion': The ip version to check: ipv4 or ipv6
+ * 'prefix': the IP prefix to check
+ * 'rd': the route distinguisher to check
+ * 'label: the label to check
+ """
+ output = json.loads(
+ router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
+ )
+ if label:
+ if nexthop:
+ expected = {
+ rd: {
+ "prefix": prefix,
+ "paths": [{"remoteLabel": label, "nexthops": [{"ip": nexthop}]}],
+ }
+ }
+ else:
+ expected = {rd: {"prefix": prefix, "paths": [{"remoteLabel": label}]}}
+ else:
+ if nexthop:
+ expected = {
+ rd: {"prefix": prefix, "paths": [{"nexthops": [{"ip": nexthop}]}]}
+ }
+ else:
+ expected = {rd: {"prefix": prefix}}
+ return topotest.json_cmp(output, expected)
+
+
+def check_show_bgp_vpn_prefix_not_found(router, ipversion, prefix, rd, label=None):
+ """
+ Check if a given vpn prefix is not present in the BGP RIB
+ * 'router': the router to check BGP VPN RIB
+ * 'ipversion': The ip version to check: ipv4 or ipv6
+ * 'prefix': the IP prefix to check
+ * 'rd': the route distinguisher to check
+ * 'label: the label to check
+ """
+ output = json.loads(
+ router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
+ )
+ if label:
+ expected = {rd: {"prefix": prefix, "paths": [{"remoteLabel": label}]}}
+ else:
+ expected = {rd: {"prefix": prefix}}
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ return "not good"
+ return None
+
+
+def check_show_mpls_table_entry_label_not_found(router, inlabel):
+ output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
+ expected = {"inLabel": inlabel, "installed": True}
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ return "not good"
+ return None
+
+
+def check_show_bgp_vpn_ok(router, vpnv4_entries):
+ """
+ Check on router that BGP l3vpn entries are present
+ Check there is an MPLS entry bound to that BGP L3VPN entry
+ Extract the Label value and check on the distributed router the BGP L3VPN entry
+ If check fail, an assert is triggered.
+ * 'router': the router to check BGP VPN RIB
+ * 'vpnv4_entries': dictionary that contains the list of prefixes, and the distributed router to look after
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ vpnv4_nexthops = {"r1": "192.0.2.2", "r3": "192.168.1.2"}
+ vpnv4_nht = {"192.0.2.1": "192.168.0.1", "192.168.1.3": "192.168.1.3"}
+ label_ip_entries = {}
+
+ def _return_remote_label_nh_rd(router, prefix):
+ dump = router.vtysh_cmd("show bgp ipv4 vpn {} json".format(prefix), isjson=True)
+ assert_msg = (
+ "{}, prefix {} not available or label not found",
+ router.name,
+ prefix,
+ )
+ assert dump, assert_msg
+ for rd, pathes in dump.items():
+ for path in pathes["paths"]:
+ if "remoteLabel" not in path.keys():
+ assert 0, assert_msg
+ for nh in path["nexthops"]:
+ if "ip" in nh.keys():
+ return path["remoteLabel"], nh["ip"], rd
+ assert 0, assert_msg
+
+ def _check_nexthop_available(router, prefix):
+ dump = router.vtysh_cmd("show bgp ipv4 vpn {} json".format(prefix), isjson=True)
+ if not dump:
+ return "{0}, {1}, route distinguisher not present".format(
+ router.name, prefix
+ )
+ for rd, pathes in dump.items():
+ for path in pathes["paths"]:
+ if "remoteLabel" not in path.keys():
+ return "{0}, {1}, remoteLabel not present".format(
+ router.name, prefix
+ )
+ if "nexthops" not in path.keys():
+ return "{0}, {1}, no nexthop available".format(router.name, prefix)
+ return None
+
+ for prefix, rname_to_test in vpnv4_entries.items():
+ func = functools.partial(_check_nexthop_available, router, prefix)
+ success, result = topotest.run_and_expect(func, None, count=20, wait=0.5)
+ assert result is None, "Failed to detect prefix {} on router {}".format(
+ prefix, router.name
+ )
+
+ for prefix, rname_to_test in vpnv4_entries.items():
+ l3vpn_label, l3vpn_nh, l3vpn_rd = _return_remote_label_nh_rd(router, prefix)
+ logger.info(
+ "{0}, {1}, label value is {2}, nh is {3}".format(
+ router.name, prefix, l3vpn_label, l3vpn_nh
+ )
+ )
+ in_label = mpls_table_check_entry(router, l3vpn_label, vpnv4_nht[l3vpn_nh])
+ label_ip_entries[prefix] = in_label
+
+ bgp_vpnv4_prefix_check(
+ tgen.gears[rname_to_test],
+ l3vpn_rd,
+ prefix,
+ in_label,
+ vpnv4_nexthops[rname_to_test],
+ )
+
+ return label_ip_entries
+
+
+def test_protocols_convergence():
+ """
+ Assert that all protocols have converged
+ Check that Labels are as expected in r1, r2,and r3
+ Check ping connectivity between h1 and h2
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # check that r2 peerings are ok
+ logger.info("Checking BGP ipv4 vpn summary for r2")
+ router = tgen.gears["r2"]
+ json_file = "{}/{}/ipv4_vpn_summary.json".format(CWD, router.name)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp ipv4 vpn summary json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+
+def test_mpls_setup_ok():
+ """
+ tests for the r1 to r3 direction: checks for prefix=('172.31.1.0/24','172.31.2.0/24','172.31.3.0/24')
+ r2. get label from 'prefix'
+ check that r2. show mpls table has an entry with outbound label set to the label from 172.31.1.0/24
+ r2. get label from mpls entry
+ check that r1: show bgp ipv4 vpn 172.31.1.0/24 has label from r2.mpls entry
+ tests for the r3 to r1 direction
+ r2. get label from 172.31.0.0/24
+ check that r2. show mpls table has an entry with outbound label set that includes the label from 172.31.0.0/24
+ r2. get label from mpls entry
+ check that r3: show bgp ipv4 vpn 172.31.0.0/24 has label from r2.mpls entry
+ check that h1. ping 172.31.1.10 (h2) is ok.
+ check that h1. ping 172.31.2.10 (h3) is ok.
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears["r2"]
+
+ # diagnostic
+ logger.info("Dumping mplsvpn nexthop table")
+ router.vtysh_cmd("show bgp mplsvpn-nh-label-bind detail", isjson=False)
+
+ vpnv4_checks = {
+ "172.31.1.0/24": "r1",
+ "172.31.2.0/24": "r1",
+ "172.31.3.0/24": "r1",
+ "172.31.0.0/24": "r3",
+ }
+ logger.info(
+ "{}, check that 'show bgp ipv4 vpn' and 'show mpls table' are set accordingly on all devices".format(
+ router.name
+ )
+ )
+ check_show_bgp_vpn_ok(router, vpnv4_checks)
+
+ logger.info("h1, check that ping from h1 to (h2,h3) is ok")
+ check_ping("h1", "172.31.1.10", True)
+ check_ping("h1", "172.31.2.10", True)
+
+
+def test_r3_prefixes_removed():
+ """
+ Remove BGP redistributed updates from r3.
+ Check that the BGP VPN updates from the updates are not present on r2.
+ Check that the 'show bgp ipv4 vpn' and 'show mpls table' are ok for 172.31.3.0/24
+ Remove the 172.31.3.0/24 update from BGP on r3.
+ Check that the BGP VPN updates from r3 are not present on r2.
+ Check that the 'show mpls table' entry previously seen disappeared
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears["r3"]
+ logger.info("{}, keeping only 172.31.3.0/24 network".format(router.name))
+ router.vtysh_cmd("configure terminal\ninterface r3-eth1 vrf vrf1\nshutdown\n")
+ router.vtysh_cmd("configure terminal\ninterface r3-eth2 vrf vrf1\nshutdown\n")
+
+ router = tgen.gears["r2"]
+ logger.info(
+ "{}, check that 'show bgp ipv4 vpn' has only 172.31.3.0/24 network from r3".format(
+ router.name
+ )
+ )
+
+ for prefix in ("172.31.1.0/24", "172.31.2.0/24"):
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_not_found,
+ router,
+ "ipv4",
+ prefix,
+ "444:3",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "{}, vpnv4 update {} still present".format(router.name, prefix)
+
+ # diagnostic
+ logger.info("Dumping mplsvpn nexthop table")
+ router.vtysh_cmd("show bgp mplsvpn-nh-label-bind detail", isjson=False)
+
+ prefix = "172.31.3.0/24"
+ logger.info(
+ "{}, check that 'show bgp ipv4 vpn' and 'show mpls table' are set accordingly on r2 and on r1".format(
+ router.name
+ )
+ )
+ vpnv4_checks = {
+ prefix: "r1",
+ }
+ label_ip_entries = check_show_bgp_vpn_ok(router, vpnv4_checks)
+
+ router = tgen.gears["r3"]
+ logger.info("{}, removing {} network".format(router.name, prefix))
+ router.vtysh_cmd("configure terminal\ninterface r3-eth3 vrf vrf1\nshutdown\n")
+
+ router = tgen.gears["r2"]
+ logger.info(
+ "{}, check that 'show bgp ipv4 vpn' has not {} network from r3".format(
+ router.name, prefix
+ )
+ )
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_not_found,
+ router,
+ "ipv4",
+ prefix,
+ "444:3",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "{}, vpnv4 update {} still present".format(router.name, prefix)
+
+ logger.info(
+ "{}, check that 'show mpls table {}' is not present".format(
+ router.name, label_ip_entries[prefix]
+ )
+ )
+ test_func = functools.partial(
+ check_show_mpls_table_entry_label_not_found, router, label_ip_entries[prefix]
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r1, mpls entry with in_label {} still present".format(
+ label_ip_entries[prefix]
+ )
+
+
+def test_r3_prefixes_added_back():
+ """
+ Add back the 172.31.3.0/24 network from r3
+ Check on r2 that MPLS switching entry appears when the 1st BGP update is received
+ Check the IP connectivity (h1,h2) and (h1,h3)
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears["r3"]
+ prefix = "172.31.3.0/24"
+ logger.info("{}, restoring the {} network from r3".format(router.name, prefix))
+ router.vtysh_cmd("configure terminal\ninterface r3-eth3 vrf vrf1\nno shutdown\n")
+
+ router = tgen.gears["r2"]
+ logger.info(
+ "{}, check that 'show bgp ipv4 vpn' has {} network from r3".format(
+ router.name, prefix
+ )
+ )
+
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ router,
+ "ipv4",
+ prefix,
+ "444:3",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "{}, vpnv4 update {} not present".format(router.name, prefix)
+
+ logger.info(
+ "{}, check that 'show bgp ipv4 vpn' and 'show mpls table' are set accordingly on r2 and on r1".format(
+ router.name
+ )
+ )
+ vpnv4_checks = {
+ prefix: "r1",
+ }
+ check_show_bgp_vpn_ok(router, vpnv4_checks)
+
+ router = tgen.gears["r3"]
+ logger.info(
+ "{}, restoring the redistribute connected prefixes from r3".format(router.name)
+ )
+ router.vtysh_cmd("configure terminal\ninterface r3-eth1 vrf vrf1\nno shutdown\n")
+ router.vtysh_cmd("configure terminal\ninterface r3-eth2 vrf vrf1\nno shutdown\n")
+ router = tgen.gears["r2"]
+ for prefix in ("172.31.1.0/24", "172.31.2.0/24"):
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ router,
+ "ipv4",
+ prefix,
+ "444:3",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "{}, vpnv4 update {} not present".format(router.name, prefix)
+
+ # diagnostic
+ logger.info("Dumping mplsvpn nexthop table")
+ tgen.gears["r2"].vtysh_cmd("show bgp mplsvpn-nh-label-bind detail", isjson=False)
+
+
+def test_unconfigure_nexthop_change_nexthop_self():
+ """
+ Get the list of labels advertised from r2 to r1
+ On r2, disable next-hop-self for 192.0.2.100 neighbor
+ Check that the list of labels are not present in 'show mpls table'
+ Check that r1 received the prefixes with the original (next-hop,label)
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears["r2"]
+ vpnv4_checks = {
+ "172.31.1.0/24": "r1",
+ "172.31.2.0/24": "r1",
+ "172.31.3.0/24": "r1",
+ }
+ logger.info(
+ "{}, Get the list of labels allocated for prefixes from r3".format(router.name)
+ )
+ label_ip_entries = check_show_bgp_vpn_ok(router, vpnv4_checks)
+
+ logger.info(
+ "{}, disable next-hop-self for 192.0.2.100 neighbor".format(router.name)
+ )
+ router = tgen.gears["r2"]
+ router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65500\naddress-family ipv4 vpn\nno neighbor 192.0.2.100 next-hop-self\n"
+ )
+
+ for prefix, label in label_ip_entries.items():
+ logger.info(
+ "{}, check mpls entry for {} with in_label {} is not present'".format(
+ router.name, prefix, label
+ )
+ )
+ test_func = functools.partial(
+ check_show_mpls_table_entry_label_not_found, router, label
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r1, mpls entry for {} with in_label {} still present".format(
+ prefix, label
+ )
+
+ router = tgen.gears["r1"]
+ for prefix, label in label_ip_entries.items():
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_not_found,
+ router,
+ "ipv4",
+ prefix,
+ "444:3",
+ label=label,
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "{}, mpls vpn update {} label {} is present".format(
+ router.name, prefix, label
+ )
+ for prefix, label in label_ip_entries.items():
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ router,
+ "ipv4",
+ prefix,
+ "444:3",
+ nexthop="192.168.1.3",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "{}, mpls vpn update {} label {} is present".format(
+ router.name, prefix, label
+ )
+
+ # diagnostic
+ logger.info("Dumping mplsvpn nexthop table")
+ tgen.gears["r2"].vtysh_cmd("show bgp mplsvpn-nh-label-bind detail", isjson=False)
+
+
+def test_reconfigure_nexthop_change_nexthop_self():
+ """
+ Get the list of labels advertised from r2 to r1
+ On r2, enable next-hop-self for 192.0.2.100 neighbor
+ Check that the list of labels are present in 'show mpls table'
+ Check that r1 received the prefixes with the original (next-hop,label)
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears["r2"]
+ logger.info("{}, enable next-hop-self for 192.0.2.100 neighbor".format(router.name))
+ router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65500\naddress-family ipv4 vpn\nneighbor 192.0.2.100 next-hop-self\n"
+ )
+ vpnv4_checks = {
+ "172.31.1.0/24": "r1",
+ "172.31.2.0/24": "r1",
+ "172.31.3.0/24": "r1",
+ }
+ logger.info(
+ "{}, check that 'show bgp ipv4 vpn' and 'show mpls table' are set accordingly on r2 and on r1".format(
+ router.name
+ )
+ )
+ check_show_bgp_vpn_ok(router, vpnv4_checks)
+
+ logger.info("h1, check that ping from h1 to (h2,h3) is ok")
+ check_ping("h1", "172.31.1.10", True)
+ check_ping("h1", "172.31.2.10", True)
+ # diagnostic
+ logger.info("Dumping mplsvpn nexthop table")
+ router.vtysh_cmd("show bgp mplsvpn-nh-label-bind detail", isjson=False)
+
+
+def test_declare_vpn_network_with_different_label():
+ """
+ declare a vpnv4 network on r3.
+ check that a new VPNv4 entry is received on r2.
+ Check that the list of labels are present in 'show mpls table'
+ Check that r1 received the prefixes with the new (next-hop,label)
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears["r3"]
+ logger.info(
+ "{}, declare static 33.33.33.33/32 network rd 33:33 label 33".format(
+ router.name
+ )
+ )
+ router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65501\nno bgp network import-check\n"
+ )
+ router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65501\naddress-family ipv4 vpn\nnetwork 33.33.33.33/32 rd 444:3 label 33\n"
+ )
+
+ router = tgen.gears["r2"]
+ vpnv4_entries = {
+ "172.31.1.0/24": None,
+ "172.31.2.0/24": None,
+ "172.31.3.0/24": None,
+ "33.33.33.33/32": 33,
+ }
+
+ for prefix, label in vpnv4_entries.items():
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found,
+ router,
+ "ipv4",
+ prefix,
+ "444:3",
+ label=label,
+ nexthop="192.168.1.3",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "{}, vpnv4 update {}, label {} not present".format(
+ router.name, prefix, label
+ )
+
+ vpnv4_checks = {
+ "172.31.1.0/24": "r1",
+ "172.31.2.0/24": "r1",
+ "172.31.3.0/24": "r1",
+ "33.33.33.33/32": "r1",
+ }
+ logger.info(
+ "{}, check that 'show bgp ipv4 vpn' and 'show mpls table' are set accordingly on r2 and on r1".format(
+ router.name
+ )
+ )
+ check_show_bgp_vpn_ok(router, vpnv4_checks)
+
+
+def test_filter_vpn_network_from_r1():
+ """
+ Get the list of labels in 'show mpls table'
+ filter network from r1
+ check that the vpnv4 entry on r2 is not present
+ Check that the associated mpls entry is not present
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears["r2"]
+
+ vpnv4_checks = {
+ "172.31.0.0/24": "r3",
+ }
+ logger.info(
+ "{}, check that 'show bgp ipv4 vpn' and 'show mpls table' are set accordingly on r2 and on r3".format(
+ router.name
+ )
+ )
+ label_ip_entries = check_show_bgp_vpn_ok(router, vpnv4_checks)
+
+ for prefix, label in label_ip_entries.items():
+ logger.info("{}, filter prefix {} from r1".format(router.name, prefix))
+ router.vtysh_cmd(
+ "configure terminal\nroute-map rmap deny 1\nmatch ip next-hop address 192.0.2.1\n"
+ )
+ router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65500\naddress-family ipv4 vpn\nneighbor 192.0.2.100 route-map rmap in\n"
+ )
+ logger.info(
+ "{}, check that prefix {} is not present".format(router.name, prefix)
+ )
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_not_found,
+ router,
+ "ipv4",
+ "172.31.0.0/24",
+ "444:1",
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "{}, vpnv4 update {}, is still present".format(
+ router.name, prefix
+ )
+
+ # diagnostic
+ logger.info("Dumping mplsvpn nexthop table")
+ router.vtysh_cmd("show bgp mplsvpn-nh-label-bind detail", isjson=False)
+
+ logger.info(
+ "{}, check that show mpls table {} is not present".format(
+ router.name, label
+ )
+ )
+ test_func = functools.partial(
+ check_show_mpls_table_entry_label_not_found, router, int(label)
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "r1, mpls entry for {} with in_label {} still present".format(
+ prefix, label
+ )
+
+
+def test_unfilter_vpn_network_from_r1():
+ """
+ unfilter network from r1
+ check that the vpnv4 entry on r2 is present
+ Check that the list of labels are present in 'show mpls table'
+ Check that r3 received the prefixes with the new (next-hop,label)
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears["r2"]
+ prefix = "172.31.0.0/24"
+
+ logger.info("{}, filter prefix {} from r1".format(router.name, prefix))
+ router.vtysh_cmd(
+ "configure terminal\nrouter bgp 65500\naddress-family ipv4 vpn\nno neighbor 192.0.2.100 route-map rmap in\n"
+ )
+
+ logger.info("{}, check that prefix {} is present".format(router.name, prefix))
+ test_func = functools.partial(
+ check_show_bgp_vpn_prefix_found, router, "ipv4", prefix, "444:1"
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "{}, vpnv4 update {}, is not present".format(router.name, prefix)
+
+ vpnv4_checks = {
+ "172.31.0.0/24": "r3",
+ }
+ logger.info(
+ "{}, check that 'show bgp ipv4 vpn' and 'show mpls table' are set accordingly on all devices".format(
+ router.name
+ )
+ )
+ check_show_bgp_vpn_ok(router, vpnv4_checks)
+
+ # diagnostic
+ logger.info("Dumping mplsvpn nexthop table")
+ router.vtysh_cmd("show bgp mplsvpn-nh-label-bind detail", isjson=False)
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/bgpd.conf b/tests/topotests/bgp_vpnv4_noretain/r1/bgpd.conf
index 3d8773b8bf..0709e43edf 100644
--- a/tests/topotests/bgp_vpnv4_noretain/r1/bgpd.conf
+++ b/tests/topotests/bgp_vpnv4_noretain/r1/bgpd.conf
@@ -1,8 +1,12 @@
router bgp 65500
- bgp router-id 1.1.1.1
+ bgp router-id 192.0.2.1
neighbor 10.125.0.2 remote-as 65500
address-family ipv4 unicast
no neighbor 10.125.0.2 activate
+ label vpn export 100
+ rd vpn export 192.0.2.1:0
+ rt vpn import 192.0.2.2:400
+ import vpn
exit-address-family
address-family ipv4 vpn
neighbor 10.125.0.2 activate
@@ -10,15 +14,33 @@ router bgp 65500
exit-address-family
!
router bgp 65500 vrf vrf1
- bgp router-id 1.1.1.1
+ bgp router-id 192.0.2.1
address-family ipv4 unicast
redistribute connected
label vpn export 101
- rd vpn export 444:1
- rt vpn import 51:100 52:100
- rt vpn export 51:100
+ rd vpn export 192.0.2.1:1
+ rt vpn import 192.0.2.2:100
+ rt vpn export 192.0.2.1:100
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65500 vrf vrf3
+ bgp router-id 192.0.2.1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 103
+ rd vpn export 192.0.2.1:3
+ rt vpn export 192.0.2.1:300
export vpn
+ exit-address-family
+!
+router bgp 65500 vrf vrf4
+ bgp router-id 192.0.2.1
+ address-family ipv4 unicast
+ label vpn export 104
+ rd vpn export 192.0.2.1:4
+ rt vpn import 192.0.2.1:300
import vpn
exit-address-family
!
-
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes.json b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes.json
deleted file mode 100644
index 28e153e3de..0000000000
--- a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes.json
+++ /dev/null
@@ -1,68 +0,0 @@
-{
- "vrfId":0,
- "vrfName":"default",
- "routerId":"1.1.1.1",
- "defaultLocPrf":100,
- "localAS":65500,
- "routes":{
- "routeDistinguishers":{
- "444:1":{
- "10.201.0.0/24":[
- {
- "valid":true,
- "bestpath":true,
- "selectionReason":"First path received",
- "pathFrom":"external",
- "prefix":"10.201.0.0",
- "prefixLen":24,
- "network":"10.201.0.0\/24",
- "metric":0,
- "weight":32768,
- "peerId":"(unspec)",
- "path":"",
- "origin":"incomplete",
- "announceNexthopSelf":true,
- "nhVrfName":"vrf1",
- "nexthops":[
- {
- "ip":"0.0.0.0",
- "hostname":"r1",
- "afi":"ipv4",
- "used":true
- }
- ]
- }
- ]
- },
- "444:2":{
- "10.200.0.0/24":[
- {
- "valid":true,
- "bestpath":true,
- "selectionReason":"First path received",
- "pathFrom":"internal",
- "prefix":"10.200.0.0",
- "prefixLen":24,
- "network":"10.200.0.0\/24",
- "metric":0,
- "locPrf":100,
- "weight":0,
- "peerId":"10.125.0.2",
- "path":"",
- "origin":"incomplete",
- "nexthops":[
- {
- "ip":"10.125.0.2",
- "hostname":"r2",
- "afi":"ipv4",
- "used":true
- }
- ]
- }
- ]
- },
- "444:3":{
- }
- }
- }
-}
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_all.json b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_all.json
new file mode 100644
index 0000000000..648bf854ba
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_all.json
@@ -0,0 +1,175 @@
+{
+ "vrfId":0,
+ "vrfName":"default",
+ "routerId":"192.0.2.1",
+ "defaultLocPrf":100,
+ "localAS":65500,
+ "routes":{
+ "routeDistinguishers":{
+ "192.0.2.1:1":{
+ "10.101.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"external",
+ "prefix":"10.101.0.0",
+ "prefixLen":24,
+ "network":"10.101.0.0\/24",
+ "metric":0,
+ "weight":32768,
+ "peerId":"(unspec)",
+ "path":"",
+ "origin":"incomplete",
+ "announceNexthopSelf":true,
+ "nhVrfName":"vrf1",
+ "nexthops":[
+ {
+ "ip":"0.0.0.0",
+ "hostname":"r1",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.1:3":{
+ "10.103.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"external",
+ "prefix":"10.103.0.0",
+ "prefixLen":24,
+ "network":"10.103.0.0\/24",
+ "metric":0,
+ "weight":32768,
+ "peerId":"(unspec)",
+ "path":"",
+ "origin":"incomplete",
+ "announceNexthopSelf":true,
+ "nhVrfName":"vrf3",
+ "nexthops":[
+ {
+ "ip":"0.0.0.0",
+ "hostname":"r1",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.2:1":{
+ "10.201.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "prefix":"10.201.0.0",
+ "prefixLen":24,
+ "network":"10.201.0.0\/24",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"10.125.0.2",
+ "path":"",
+ "origin":"incomplete",
+ "nexthops":[
+ {
+ "ip":"10.125.0.2",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.2:2":{
+ "10.202.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "prefix":"10.202.0.0",
+ "prefixLen":24,
+ "network":"10.202.0.0\/24",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"10.125.0.2",
+ "path":"",
+ "origin":"incomplete",
+ "nexthops":[
+ {
+ "ip":"10.125.0.2",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.2:3":{
+ "10.203.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "prefix":"10.203.0.0",
+ "prefixLen":24,
+ "network":"10.203.0.0\/24",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"10.125.0.2",
+ "path":"",
+ "origin":"incomplete",
+ "nexthops":[
+ {
+ "ip":"10.125.0.2",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.2:4":{
+ "10.204.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "prefix":"10.204.0.0",
+ "prefixLen":24,
+ "network":"10.204.0.0\/24",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"10.125.0.2",
+ "path":"",
+ "origin":"incomplete",
+ "nexthops":[
+ {
+ "ip":"10.125.0.2",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_unfiltered.json b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_no_retain_init.json
index 45f4acce6f..f01607ac4e 100644
--- a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_unfiltered.json
+++ b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_no_retain_init.json
@@ -1,21 +1,21 @@
{
"vrfId":0,
"vrfName":"default",
- "routerId":"1.1.1.1",
+ "routerId":"192.0.2.1",
"defaultLocPrf":100,
"localAS":65500,
"routes":{
"routeDistinguishers":{
- "444:1":{
- "10.201.0.0/24":[
+ "192.0.2.1:1":{
+ "10.101.0.0/24":[
{
"valid":true,
"bestpath":true,
"selectionReason":"First path received",
"pathFrom":"external",
- "prefix":"10.201.0.0",
+ "prefix":"10.101.0.0",
"prefixLen":24,
- "network":"10.201.0.0\/24",
+ "network":"10.101.0.0\/24",
"metric":0,
"weight":32768,
"peerId":"(unspec)",
@@ -34,16 +34,44 @@
}
]
},
- "444:2":{
- "10.200.0.0/24":[
+ "192.0.2.1:3":{
+ "10.103.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"external",
+ "prefix":"10.103.0.0",
+ "prefixLen":24,
+ "network":"10.103.0.0\/24",
+ "metric":0,
+ "weight":32768,
+ "peerId":"(unspec)",
+ "path":"",
+ "origin":"incomplete",
+ "announceNexthopSelf":true,
+ "nhVrfName":"vrf3",
+ "nexthops":[
+ {
+ "ip":"0.0.0.0",
+ "hostname":"r1",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.2:1":{
+ "10.201.0.0/24":[
{
"valid":true,
"bestpath":true,
"selectionReason":"First path received",
"pathFrom":"internal",
- "prefix":"10.200.0.0",
+ "prefix":"10.201.0.0",
"prefixLen":24,
- "network":"10.200.0.0\/24",
+ "network":"10.201.0.0\/24",
"metric":0,
"locPrf":100,
"weight":0,
@@ -61,16 +89,16 @@
}
]
},
- "444:3":{
- "10.210.0.0/24":[
+ "192.0.2.2:4":{
+ "10.204.0.0/24":[
{
"valid":true,
"bestpath":true,
"selectionReason":"First path received",
"pathFrom":"internal",
- "prefix":"10.210.0.0",
+ "prefix":"10.204.0.0",
"prefixLen":24,
- "network":"10.210.0.0\/24",
+ "network":"10.204.0.0\/24",
"metric":0,
"locPrf":100,
"weight":0,
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_no_retain_init_plus_r2_vrf2.json b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_no_retain_init_plus_r2_vrf2.json
new file mode 100644
index 0000000000..6df6c69b8f
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_no_retain_init_plus_r2_vrf2.json
@@ -0,0 +1,148 @@
+{
+ "vrfId":0,
+ "vrfName":"default",
+ "routerId":"192.0.2.1",
+ "defaultLocPrf":100,
+ "localAS":65500,
+ "routes":{
+ "routeDistinguishers":{
+ "192.0.2.1:1":{
+ "10.101.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"external",
+ "prefix":"10.101.0.0",
+ "prefixLen":24,
+ "network":"10.101.0.0\/24",
+ "metric":0,
+ "weight":32768,
+ "peerId":"(unspec)",
+ "path":"",
+ "origin":"incomplete",
+ "announceNexthopSelf":true,
+ "nhVrfName":"vrf1",
+ "nexthops":[
+ {
+ "ip":"0.0.0.0",
+ "hostname":"r1",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.1:3":{
+ "10.103.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"external",
+ "prefix":"10.103.0.0",
+ "prefixLen":24,
+ "network":"10.103.0.0\/24",
+ "metric":0,
+ "weight":32768,
+ "peerId":"(unspec)",
+ "path":"",
+ "origin":"incomplete",
+ "announceNexthopSelf":true,
+ "nhVrfName":"vrf3",
+ "nexthops":[
+ {
+ "ip":"0.0.0.0",
+ "hostname":"r1",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.2:1":{
+ "10.201.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "prefix":"10.201.0.0",
+ "prefixLen":24,
+ "network":"10.201.0.0\/24",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"10.125.0.2",
+ "path":"",
+ "origin":"incomplete",
+ "nexthops":[
+ {
+ "ip":"10.125.0.2",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.2:2":{
+ "10.202.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "prefix":"10.202.0.0",
+ "prefixLen":24,
+ "network":"10.202.0.0\/24",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"10.125.0.2",
+ "path":"",
+ "origin":"incomplete",
+ "nexthops":[
+ {
+ "ip":"10.125.0.2",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.2:4":{
+ "10.204.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "prefix":"10.204.0.0",
+ "prefixLen":24,
+ "network":"10.204.0.0\/24",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"10.125.0.2",
+ "path":"",
+ "origin":"incomplete",
+ "nexthops":[
+ {
+ "ip":"10.125.0.2",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_no_retain_init_plus_r2_vrf3.json b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_no_retain_init_plus_r2_vrf3.json
new file mode 100644
index 0000000000..7a17ff0f16
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_no_retain_init_plus_r2_vrf3.json
@@ -0,0 +1,148 @@
+{
+ "vrfId":0,
+ "vrfName":"default",
+ "routerId":"192.0.2.1",
+ "defaultLocPrf":100,
+ "localAS":65500,
+ "routes":{
+ "routeDistinguishers":{
+ "192.0.2.1:1":{
+ "10.101.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"external",
+ "prefix":"10.101.0.0",
+ "prefixLen":24,
+ "network":"10.101.0.0\/24",
+ "metric":0,
+ "weight":32768,
+ "peerId":"(unspec)",
+ "path":"",
+ "origin":"incomplete",
+ "announceNexthopSelf":true,
+ "nhVrfName":"vrf1",
+ "nexthops":[
+ {
+ "ip":"0.0.0.0",
+ "hostname":"r1",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.1:3":{
+ "10.103.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"external",
+ "prefix":"10.103.0.0",
+ "prefixLen":24,
+ "network":"10.103.0.0\/24",
+ "metric":0,
+ "weight":32768,
+ "peerId":"(unspec)",
+ "path":"",
+ "origin":"incomplete",
+ "announceNexthopSelf":true,
+ "nhVrfName":"vrf3",
+ "nexthops":[
+ {
+ "ip":"0.0.0.0",
+ "hostname":"r1",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.2:1":{
+ "10.201.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "prefix":"10.201.0.0",
+ "prefixLen":24,
+ "network":"10.201.0.0\/24",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"10.125.0.2",
+ "path":"",
+ "origin":"incomplete",
+ "nexthops":[
+ {
+ "ip":"10.125.0.2",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.2:3":{
+ "10.203.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "prefix":"10.203.0.0",
+ "prefixLen":24,
+ "network":"10.203.0.0\/24",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"10.125.0.2",
+ "path":"",
+ "origin":"incomplete",
+ "nexthops":[
+ {
+ "ip":"10.125.0.2",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.2:4":{
+ "10.204.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "prefix":"10.204.0.0",
+ "prefixLen":24,
+ "network":"10.204.0.0\/24",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"10.125.0.2",
+ "path":"",
+ "origin":"incomplete",
+ "nexthops":[
+ {
+ "ip":"10.125.0.2",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/isisd.conf b/tests/topotests/bgp_vpnv4_noretain/r1/isisd.conf
index 6f5cb6ec68..233a6473b3 100644
--- a/tests/topotests/bgp_vpnv4_noretain/r1/isisd.conf
+++ b/tests/topotests/bgp_vpnv4_noretain/r1/isisd.conf
@@ -10,5 +10,5 @@ router isis 1
is-type level-1
net 49.0002.0000.1994.00
segment-routing on
- segment-routing prefix 1.1.1.1/32 index 11
+ segment-routing prefix 192.0.2.1/32 index 11
!
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/zebra.conf b/tests/topotests/bgp_vpnv4_noretain/r1/zebra.conf
index 5b8b1e8ffb..f99cfafe32 100644
--- a/tests/topotests/bgp_vpnv4_noretain/r1/zebra.conf
+++ b/tests/topotests/bgp_vpnv4_noretain/r1/zebra.conf
@@ -1,13 +1,16 @@
log stdout
interface lo
- ip address 1.1.1.1/32
+ ip address 192.0.2.1/32
!
interface r1-gre0
ip address 192.168.0.1/24
!
-interface r1-eth1 vrf vrf1
- ip address 10.201.0.1/24
-!
interface r1-eth0
ip address 10.125.0.1/24
!
+interface r1-eth1 vrf vrf1
+ ip address 10.101.0.1/24
+!
+interface r1-eth3 vrf vrf3
+ ip address 10.103.0.1/24
+! \ No newline at end of file
diff --git a/tests/topotests/bgp_vpnv4_noretain/r2/bgpd.conf b/tests/topotests/bgp_vpnv4_noretain/r2/bgpd.conf
index 235fb31177..729daef2bc 100644
--- a/tests/topotests/bgp_vpnv4_noretain/r2/bgpd.conf
+++ b/tests/topotests/bgp_vpnv4_noretain/r2/bgpd.conf
@@ -1,35 +1,54 @@
router bgp 65500
- bgp router-id 2.2.2.2
+ bgp router-id 192.0.2.2
neighbor 10.125.0.1 remote-as 65500
address-family ipv4 unicast
no neighbor 10.125.0.1 activate
exit-address-family
address-family ipv4 vpn
neighbor 10.125.0.1 activate
- no bgp retain route-target all
exit-address-family
!
router bgp 65500 vrf vrf1
- bgp router-id 2.2.2.2
+ bgp router-id 192.0.2.2
address-family ipv4 unicast
redistribute connected
- label vpn export 102
- rd vpn export 444:2
- rt vpn import 53:100 52:100 51:100
- rt vpn export 52:100
+ label vpn export 201
+ rd vpn export 192.0.2.2:1
+ rt vpn import 192.0.2.1:100 192.0.2.2:100 192.0.2.2:200
+ rt vpn export 192.0.2.2:100
export vpn
import vpn
exit-address-family
!
router bgp 65500 vrf vrf2
- bgp router-id 2.2.2.2
+ bgp router-id 192.0.2.2
address-family ipv4 unicast
redistribute connected
- label vpn export 102
- rd vpn export 444:3
- rt vpn both 53:100 52:100 51:100
- rt vpn both 53:100
+ label vpn export 202
+ rd vpn export 192.0.2.2:2
+ rt vpn import 192.0.2.1:100 192.0.2.2:100 192.0.2.2:200
+ rt vpn export 192.0.2.2:200
export vpn
import vpn
exit-address-family
!
+router bgp 65500 vrf vrf3
+ bgp router-id 192.0.2.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 203
+ rd vpn export 192.0.2.2:3
+ rt vpn export 192.0.2.2:300
+ export vpn
+ exit-address-family
+!
+router bgp 65500 vrf vrf4
+ bgp router-id 192.0.2.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 204
+ rd vpn export 192.0.2.2:4
+ rt vpn export 192.0.2.2:400
+ export vpn
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_vpnv4_noretain/r2/ipv4_vpn_routes_all.json b/tests/topotests/bgp_vpnv4_noretain/r2/ipv4_vpn_routes_all.json
new file mode 100644
index 0000000000..d8b8e88d93
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_noretain/r2/ipv4_vpn_routes_all.json
@@ -0,0 +1,177 @@
+{
+ "vrfId": 0,
+ "vrfName": "default",
+ "routerId":"192.0.2.2",
+ "defaultLocPrf": 100,
+ "localAS": 65500,
+ "routes": {
+ "routeDistinguishers": {
+ "192.0.2.1:1": {
+ "10.101.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "internal",
+ "prefix": "10.101.0.0",
+ "prefixLen": 24,
+ "network": "10.101.0.0/24",
+ "metric": 0,
+ "locPrf": 100,
+ "weight": 0,
+ "peerId": "10.125.0.1",
+ "path": "",
+ "origin": "incomplete",
+ "nexthops": [
+ {
+ "ip": "10.125.0.1",
+ "hostname": "r1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.1:3": {
+ "10.103.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "internal",
+ "prefix": "10.103.0.0",
+ "prefixLen": 24,
+ "network": "10.103.0.0/24",
+ "metric": 0,
+ "locPrf": 100,
+ "weight": 0,
+ "peerId": "10.125.0.1",
+ "path": "",
+ "origin": "incomplete",
+ "nexthops": [
+ {
+ "ip": "10.125.0.1",
+ "hostname": "r1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.2:1": {
+ "10.201.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "network": "10.201.0.0/24",
+ "prefixLen": 24,
+ "prefix": "10.201.0.0",
+ "metric": 0,
+ "weight": 32768,
+ "peerId": "(unspec)",
+ "path": "",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf1",
+ "nexthops": [
+ {
+ "ip": "0.0.0.0",
+ "hostname": "r2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.2:2": {
+ "10.202.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "network": "10.202.0.0/24",
+ "prefixLen": 24,
+ "prefix": "10.202.0.0",
+ "metric": 0,
+ "weight": 32768,
+ "peerId": "(unspec)",
+ "path": "",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf2",
+ "nexthops": [
+ {
+ "ip": "0.0.0.0",
+ "hostname": "r2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.2:3": {
+ "10.203.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "10.203.0.0",
+ "prefixLen": 24,
+ "network": "10.203.0.0/24",
+ "metric": 0,
+ "weight": 32768,
+ "peerId": "(unspec)",
+ "path": "",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf3",
+ "nexthops": [
+ {
+ "ip": "0.0.0.0",
+ "hostname": "r2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "192.0.2.2:4": {
+ "10.204.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "10.204.0.0",
+ "prefixLen": 24,
+ "network": "10.204.0.0/24",
+ "metric": 0,
+ "weight": 32768,
+ "peerId": "(unspec)",
+ "path": "",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf4",
+ "nexthops": [
+ {
+ "ip": "0.0.0.0",
+ "hostname": "r2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp_vpnv4_noretain/r2/ipv4_vpn_summary.json b/tests/topotests/bgp_vpnv4_noretain/r2/ipv4_vpn_summary.json
new file mode 100644
index 0000000000..a4408f1915
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_noretain/r2/ipv4_vpn_summary.json
@@ -0,0 +1,17 @@
+{
+ "routerId":"192.0.2.2",
+ "as":65500,
+ "vrfId":0,
+ "vrfName":"default",
+ "peerCount":1,
+ "peers":{
+ "10.125.0.1":{
+ "remoteAs":65500,
+ "localAs":65500,
+ "version":4,
+ "state":"Established",
+ "peerState":"OK"
+ }
+ },
+ "totalPeers":1
+}
diff --git a/tests/topotests/bgp_vpnv4_noretain/r2/isisd.conf b/tests/topotests/bgp_vpnv4_noretain/r2/isisd.conf
index cbec8c3674..547d10f2bc 100644
--- a/tests/topotests/bgp_vpnv4_noretain/r2/isisd.conf
+++ b/tests/topotests/bgp_vpnv4_noretain/r2/isisd.conf
@@ -10,5 +10,5 @@ router isis 1
is-type level-1
net 49.0002.0000.1995.00
segment-routing on
- segment-routing prefix 2.2.2.2/32 index 22
+ segment-routing prefix 192.0.2.2/32 index 22
!
diff --git a/tests/topotests/bgp_vpnv4_noretain/r2/zebra.conf b/tests/topotests/bgp_vpnv4_noretain/r2/zebra.conf
index 7ec644ac2a..f19ad9d3e8 100644
--- a/tests/topotests/bgp_vpnv4_noretain/r2/zebra.conf
+++ b/tests/topotests/bgp_vpnv4_noretain/r2/zebra.conf
@@ -1,16 +1,22 @@
log stdout
interface lo
- ip address 2.2.2.2/32
+ ip address 192.0.2.2/32
!
interface r2-gre0
ip address 192.168.0.2/24
!
+interface r2-eth0
+ ip address 10.125.0.2/24
+!
interface r2-eth1 vrf vrf1
- ip address 10.200.0.2/24
+ ip address 10.201.0.2/24
!
interface r2-eth2 vrf vrf2
- ip address 10.210.0.2/24
+ ip address 10.202.0.2/24
!
-interface r2-eth0
- ip address 10.125.0.2/24
+interface r2-eth3 vrf vrf3
+ ip address 10.203.0.1/24
+!
+interface r2-eth4 vrf vrf4
+ ip address 10.204.0.1/24
!
diff --git a/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py b/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py
index 9b8ae4b7e3..f665040f7f 100644
--- a/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py
+++ b/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py
@@ -17,6 +17,7 @@ import os
import sys
import json
from functools import partial
+from copy import deepcopy
import pytest
# Save the Current Working Directory to find configuration files.
@@ -34,6 +35,7 @@ from lib.topolog import logger
pytestmark = [pytest.mark.bgpd]
+
def build_topo(tgen):
"Build function"
@@ -53,41 +55,54 @@ def build_topo(tgen):
switch = tgen.add_switch("s4")
switch.add_link(tgen.gears["r2"])
-
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r1"])
+
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s7")
+ switch.add_link(tgen.gears["r1"])
+
+ switch = tgen.add_switch("s8")
+ switch.add_link(tgen.gears["r2"])
+
+
def _populate_iface():
tgen = get_topogen()
cmds_list = [
- 'modprobe mpls_router',
- 'echo 100000 > /proc/sys/net/mpls/platform_labels',
- 'ip link add vrf1 type vrf table 10',
- 'ip link set dev vrf1 up',
- 'ip link set dev {0}-eth1 master vrf1',
- 'echo 1 > /proc/sys/net/mpls/conf/vrf1/input',
+ "modprobe mpls_router",
+ "echo 100000 > /proc/sys/net/mpls/platform_labels",
+ "ip link add vrf1 type vrf table 10",
+ "ip link set dev vrf1 up",
+ "ip link set dev {0}-eth1 master vrf1",
+ "echo 1 > /proc/sys/net/mpls/conf/vrf1/input",
+ "ip link add vrf2 type vrf table 20",
+ "ip link set dev vrf2 up",
+ "ip link set dev {0}-eth2 master vrf2",
+ "echo 1 > /proc/sys/net/mpls/conf/vrf2/input",
+ "ip link add vrf3 type vrf table 30",
+ "ip link set dev vrf3 up",
+ "ip link set dev {0}-eth3 master vrf3",
+ "echo 1 > /proc/sys/net/mpls/conf/vrf3/input",
+ "ip link add vrf4 type vrf table 40",
+ "ip link set dev vrf4 up",
+ "ip link set dev {0}-eth4 master vrf4",
+ "echo 1 > /proc/sys/net/mpls/conf/vrf4/input",
]
- cmds_list_extra = [
- 'ip link add vrf2 type vrf table 20',
- 'ip link set dev vrf2 up',
- 'ip link set dev {0}-eth2 master vrf2',
- 'echo 1 > /proc/sys/net/mpls/conf/vrf2/input',
- ]
-
+
for cmd in cmds_list:
- input = cmd.format('r1', '1', '2')
- logger.info('input: ' + cmd)
- output = tgen.net['r1'].cmd(cmd.format('r1', '1', '2'))
- logger.info('output: ' + output)
+ input = cmd.format("r1", "1", "2")
+ logger.info("input: " + cmd)
+ output = tgen.net["r1"].cmd(cmd.format("r1", "1", "2"))
+ logger.info("output: " + output)
for cmd in cmds_list:
- input = cmd.format('r2', '2', '1')
- logger.info('input: ' + cmd)
- output = tgen.net['r2'].cmd(cmd.format('r2', '2', '1'))
- logger.info('output: ' + output)
+ input = cmd.format("r2", "2", "1")
+ logger.info("input: " + cmd)
+ output = tgen.net["r2"].cmd(cmd.format("r2", "2", "1"))
+ logger.info("output: " + output)
- for cmd in cmds_list_extra:
- input = cmd.format('r2', '2', '1')
- logger.info('input: ' + cmd)
- output = tgen.net['r2'].cmd(cmd.format('r2', '2', '1'))
- logger.info('output: ' + output)
def setup_module(mod):
"Sets up the pytest environment"
@@ -96,7 +111,7 @@ def setup_module(mod):
tgen.start_topology()
router_list = tgen.routers()
- _populate_iface()
+ _populate_iface()
for rname, router in router_list.items():
router.load_config(
@@ -136,27 +151,24 @@ def router_json_cmp_exact_filter(router, cmd, expected):
if "version" in attr:
attr.pop("version")
- return topotest.json_cmp(json_output, expected, exact=True)
+ # filter out RD with no data (e.g. "444:3": {})
+ json_tmp = deepcopy(json_output)
+ for rd, data in json_tmp["routes"]["routeDistinguishers"].items():
+ if len(data.keys()) == 0:
+ json_output["routes"]["routeDistinguishers"].pop(rd)
+ return topotest.json_cmp(json_output, expected, exact=True)
-def test_bgp_no_retain():
- """
- Check bgp no retain route-target all on r1
- """
+def check_show_bgp_ipv4_vpn(rname, json_file):
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ router = tgen.gears[rname]
- # Check IPv4 VPN routing tables on r1
- logger.info("Checking VPNv4 routes for convergence on r1")
- router = tgen.gears["r1"]
- json_file = "{}/{}/ipv4_vpn_routes.json".format(CWD, router.name)
- if not os.path.isfile(json_file):
- logger.info("skipping file {}".format(json_file))
- assert 0, "{} file not found".format(json_file)
- return
+ logger.info("Checking VPNv4 routes for convergence on {}".format(rname))
+ json_file = "{}/{}/{}".format(CWD, router.name, json_file)
expected = json.loads(open(json_file).read())
test_func = partial(
router_json_cmp_exact_filter,
@@ -169,39 +181,306 @@ def test_bgp_no_retain():
assert result is None, assertmsg
-def test_bgp_retain():
+def test_protocols_convergence_step0():
"""
- Apply and check bgp retain route-target all on r1
+ Assert that all protocols have converged
"""
-
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- # Check IPv4 VPN routing tables on r1
- logger.info("Checking VPNv4 routes on r1 after bgp no retain")
- router = tgen.gears["r1"]
- router.vtysh_cmd(
- "configure\nrouter bgp 65500\naddress-family ipv4 vpn\nbgp retain route-target all\n"
- )
- json_file = "{}/{}/ipv4_vpn_routes_unfiltered.json".format(CWD, router.name)
- if not os.path.isfile(json_file):
- logger.info("skipping file {}".format(json_file))
- assert 0, "{} file not found".format(json_file)
- return
-
+ # check that r2 peerings are ok
+ logger.info("Checking BGP ipv4 vpn summary for r2")
+ router = tgen.gears["r2"]
+ json_file = "{}/{}/ipv4_vpn_summary.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
test_func = partial(
- router_json_cmp_exact_filter,
+ topotest.router_json_cmp,
router,
- "show bgp ipv4 vpn json",
+ "show bgp ipv4 vpn summary json",
expected,
)
- _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
+def test_bgp_no_retain_step1():
+ """
+ Check bgp no retain route-target all on r1
+ """
+
+ rname = "r1"
+ check_show_bgp_ipv4_vpn(rname, "ipv4_vpn_routes_no_retain_init.json")
+ check_show_bgp_ipv4_vpn("r2", "ipv4_vpn_routes_all.json")
+
+
+def test_bgp_retain_step2():
+ """
+ Apply and check bgp retain route-target all on r1
+ """
+ rname = "r1"
+ cfg = """
+configure
+router bgp 65500
+ address-family ipv4 vpn
+ bgp retain route-target all
+"""
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears[rname]
+ router.vtysh_cmd(cfg)
+
+ check_show_bgp_ipv4_vpn(rname, "ipv4_vpn_routes_all.json")
+ check_show_bgp_ipv4_vpn("r2", "ipv4_vpn_routes_all.json")
+
+
+def test_bgp_no_retain_step3():
+ """
+ Apply and check no bgp retain route-target all on r1
+ """
+ rname = "r1"
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears["r1"]
+ router.vtysh_cmd(
+ "configure\nrouter bgp 65500\naddress-family ipv4 vpn\nno bgp retain route-target all\n"
+ )
+
+ check_show_bgp_ipv4_vpn(rname, "ipv4_vpn_routes_no_retain_init.json")
+ check_show_bgp_ipv4_vpn("r2", "ipv4_vpn_routes_all.json")
+
+
+def test_bgp_no_retain_add_vrf2_step4():
+ """
+ Add vrf2 on r1 and check bgp vpnv4 table
+ """
+
+ rname = "r1"
+ cfg = """
+configure
+router bgp 65500 vrf vrf2
+ bgp router-id 192.0.2.1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 102
+ rd vpn export 192.0.2.1:200
+ rt vpn import 192.0.2.2:200
+ import vpn
+ exit-address-family
+!
+"""
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears[rname]
+ router.vtysh_cmd(cfg)
+
+ check_show_bgp_ipv4_vpn(rname, "ipv4_vpn_routes_no_retain_init_plus_r2_vrf2.json")
+ check_show_bgp_ipv4_vpn("r2", "ipv4_vpn_routes_all.json")
+
+
+def test_bgp_no_retain_unimport_vrf2_step5():
+ """
+ Unimport to vrf2 on r1 and check bgp vpnv4 table
+ """
+
+ rname = "r1"
+ cfg = """
+configure
+router bgp 65500 vrf vrf2
+ address-family ipv4 unicast
+ no import vpn
+ exit-address-family
+!
+"""
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears[rname]
+ router.vtysh_cmd(cfg)
+
+ check_show_bgp_ipv4_vpn(rname, "ipv4_vpn_routes_no_retain_init.json")
+ check_show_bgp_ipv4_vpn("r2", "ipv4_vpn_routes_all.json")
+
+
+def test_bgp_no_retain_import_vrf2_step6():
+ """
+ Re-import to vrf2 on r1 and check bgp vpnv4 table
+ """
+
+ rname = "r1"
+ cfg = """
+configure
+router bgp 65500 vrf vrf2
+ address-family ipv4 unicast
+ import vpn
+ exit-address-family
+!
+"""
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears[rname]
+ router.vtysh_cmd(cfg)
+
+ check_show_bgp_ipv4_vpn(rname, "ipv4_vpn_routes_no_retain_init_plus_r2_vrf2.json")
+ check_show_bgp_ipv4_vpn("r2", "ipv4_vpn_routes_all.json")
+
+
+def test_bgp_no_retain_import_vrf1_step7():
+ """
+ Import r2 vrf1 into r1 vrf2 and check bgp vpnv4 table
+ """
+
+ rname = "r1"
+ cfg = """
+configure
+router bgp 65500 vrf vrf2
+ address-family ipv4 unicast
+ rt vpn import 192.0.2.1:100
+ exit-address-family
+!
+"""
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears[rname]
+ router.vtysh_cmd(cfg)
+
+ check_show_bgp_ipv4_vpn(rname, "ipv4_vpn_routes_no_retain_init.json")
+ check_show_bgp_ipv4_vpn("r2", "ipv4_vpn_routes_all.json")
+
+
+def test_bgp_no_retain_import_vrf3_step8():
+ """
+ Import r2 vrf3 into r1 vrf2 and check bgp vpnv4 table
+ """
+
+ rname = "r1"
+ cfg = """
+configure
+router bgp 65500 vrf vrf2
+ address-family ipv4 unicast
+ rt vpn import 192.0.2.2:300
+ exit-address-family
+!
+"""
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears[rname]
+ router.vtysh_cmd(cfg)
+
+ check_show_bgp_ipv4_vpn(rname, "ipv4_vpn_routes_no_retain_init_plus_r2_vrf3.json")
+ check_show_bgp_ipv4_vpn("r2", "ipv4_vpn_routes_all.json")
+
+
+def test_bgp_no_retain_unimport_vrf3_step9():
+ """
+ Un-import r2 vrf3 into r1 vrf2 and check bgp vpnv4 table
+ """
+
+ rname = "r1"
+ cfg = """
+configure
+router bgp 65500 vrf vrf2
+ address-family ipv4 unicast
+ no rt vpn import 192.0.2.2:300
+ exit-address-family
+!
+"""
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears[rname]
+ router.vtysh_cmd(cfg)
+
+ check_show_bgp_ipv4_vpn(rname, "ipv4_vpn_routes_no_retain_init.json")
+ check_show_bgp_ipv4_vpn("r2", "ipv4_vpn_routes_all.json")
+
+
+def test_bgp_no_retain_import_vrf3_step10():
+ """
+ Import r2 vrf3 into r1 vrf2 and check bgp vpnv4 table
+ """
+
+ rname = "r1"
+ cfg = """
+configure
+router bgp 65500 vrf vrf2
+ address-family ipv4 unicast
+ rt vpn import 192.0.2.2:300
+ exit-address-family
+!
+"""
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears[rname]
+ router.vtysh_cmd(cfg)
+
+ check_show_bgp_ipv4_vpn(rname, "ipv4_vpn_routes_no_retain_init_plus_r2_vrf3.json")
+ check_show_bgp_ipv4_vpn("r2", "ipv4_vpn_routes_all.json")
+
+
+def test_bgp_no_retain_remove_vrf2_step11():
+ """
+ Import r2 vrf3 into r1 vrf2 and check bgp vpnv4 table
+ """
+
+ rname = "r1"
+ cfg = """
+configure
+no router bgp 65500 vrf vrf2
+"""
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears[rname]
+ router.vtysh_cmd(cfg)
+
+ check_show_bgp_ipv4_vpn(rname, "ipv4_vpn_routes_no_retain_init.json")
+ check_show_bgp_ipv4_vpn("r2", "ipv4_vpn_routes_all.json")
+
+
+def test_bgp_retain_step12():
+ """
+ Configure retain and check bgp vpnv4 table
+ """
+
+ rname = "r1"
+ cfg = """
+configure
+router bgp 65500
+ address-family ipv4 vpn
+ bgp retain route-target all
+"""
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears[rname]
+ router.vtysh_cmd(cfg)
+
+ check_show_bgp_ipv4_vpn(rname, "ipv4_vpn_routes_all.json")
+ check_show_bgp_ipv4_vpn("r2", "ipv4_vpn_routes_all.json")
+
+
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py b/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py
index 966b717ab2..934c2ff16a 100644
--- a/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py
+++ b/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py
@@ -232,54 +232,61 @@ def bgp_vpnv4_table_check_all(router, label_list=None, same=False):
bgp_vpnv4_table_check(router, group=group, label_list=label_list)
-def mpls_table_check(router, blacklist=None, label_list=None, whitelist=None):
- """
- Dump and check 'show mpls table json' output. An assert is triggered in case test fails
- * 'router': the router to check
- * 'blacklist': the list of nexthops (IP or interface) that should not be on output
- * 'label_list': the list of labels that should be in inLabel value
- * 'whitelist': the list of nexthops (IP or interface) that should be on output
- """
+def check_show_mpls_table(router, blacklist=None, label_list=None, whitelist=None):
nexthop_list = []
if blacklist:
nexthop_list.append(blacklist)
- logger.info("Checking MPLS labels on {}".format(router.name))
+
dump = router.vtysh_cmd("show mpls table json", isjson=True)
for in_label, label_info in dump.items():
if label_list is not None:
label_list.add(in_label)
for nh in label_info["nexthops"]:
- assert (
- nh["installed"] == True and nh["type"] == "BGP"
- ), "{}, show mpls table, nexthop is not installed".format(router.name)
- if "nexthop" in nh.keys():
- assert (
- nh["nexthop"] not in nexthop_list
- ), "{}, show mpls table, duplicated or blacklisted nexthop address".format(
+ if nh["installed"] != True or nh["type"] != "BGP":
+ return "{}, show mpls table, nexthop is not installed".format(
router.name
)
+ if "nexthop" in nh.keys():
+ if nh["nexthop"] in nexthop_list:
+ return "{}, show mpls table, duplicated or blacklisted nexthop address".format(
+ router.name
+ )
nexthop_list.append(nh["nexthop"])
elif "interface" in nh.keys():
- assert (
- nh["interface"] not in nexthop_list
- ), "{}, show mpls table, duplicated or blacklisted nexthop interface".format(
- router.name
- )
+ if nh["interface"] in nexthop_list:
+ return "{}, show mpls table, duplicated or blacklisted nexthop interface".format(
+ router.name
+ )
nexthop_list.append(nh["interface"])
else:
- assert (
- 0
- ), "{}, show mpls table, entry with neither nexthop nor interface".format(
+ return "{}, show mpls table, entry with neither nexthop nor interface".format(
router.name
)
if whitelist:
for entry in whitelist:
- assert (
- entry in nexthop_list
- ), "{}, show mpls table, entry with nexthop {} not present in nexthop list".format(
- router.name, entry
- )
+ if entry not in nexthop_list:
+ return "{}, show mpls table, entry with nexthop {} not present in nexthop list".format(
+ router.name, entry
+ )
+ return None
+
+
+def mpls_table_check(router, blacklist=None, label_list=None, whitelist=None):
+ """
+ Dump and check 'show mpls table json' output. An assert is triggered in case test fails
+ * 'router': the router to check
+ * 'blacklist': the list of nexthops (IP or interface) that should not be on output
+ * 'label_list': the list of labels that should be in inLabel value
+ * 'whitelist': the list of nexthops (IP or interface) that should be on output
+ """
+ logger.info("Checking MPLS labels on {}".format(router.name))
+ # Check r2 removed 172.31.0.30 vpnv4 update
+ test_func = functools.partial(
+ check_show_mpls_table, router, blacklist, label_list, whitelist
+ )
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "{}, MPLS labels check fail: {}".format(router.name, result)
def check_show_bgp_vpn_prefix_not_found(router, ipversion, prefix, rd, label=None):
diff --git a/tests/topotests/conftest.py b/tests/topotests/conftest.py
index b78a2f1052..cb25d63a36 100755
--- a/tests/topotests/conftest.py
+++ b/tests/topotests/conftest.py
@@ -4,6 +4,7 @@ Topotest conftest.py file.
"""
# pylint: disable=consider-using-f-string
+import contextlib
import glob
import logging
import os
@@ -12,6 +13,7 @@ import resource
import subprocess
import sys
import time
+from pathlib import Path
import lib.fixtures
import pytest
@@ -41,6 +43,30 @@ except (AttributeError, ImportError):
pass
+# Remove this and use munet version when we move to pytest_asyncio
+@contextlib.contextmanager
+def chdir(ndir, desc=""):
+ odir = os.getcwd()
+ os.chdir(ndir)
+ if desc:
+ logging.debug("%s: chdir from %s to %s", desc, odir, ndir)
+ try:
+ yield
+ finally:
+ if desc:
+ logging.debug("%s: chdir back from %s to %s", desc, ndir, odir)
+ os.chdir(odir)
+
+
+@contextlib.contextmanager
+def log_handler(basename, logpath):
+ topolog.logstart(basename, logpath)
+ try:
+ yield
+ finally:
+ topolog.logfinish(basename, logpath)
+
+
def pytest_addoption(parser):
"""
Add topology-only option to the topology tester. This option makes pytest
@@ -272,6 +298,20 @@ def check_for_memleaks():
@pytest.fixture(autouse=True, scope="module")
+def module_autouse(request):
+ basename = get_test_logdir(request.node.nodeid, True)
+ logdir = Path(topotest.g_pytest_config.option.rundir) / basename
+ logpath = logdir / "exec.log"
+
+ subprocess.check_call("mkdir -p -m 1777 {}".format(logdir), shell=True)
+
+ with log_handler(basename, logpath):
+ sdir = os.path.dirname(os.path.realpath(request.fspath))
+ with chdir(sdir, "module autouse fixture"):
+ yield
+
+
+@pytest.fixture(autouse=True, scope="module")
def module_check_memtest(request):
yield
if request.config.option.valgrind_memleaks:
@@ -282,14 +322,19 @@ def module_check_memtest(request):
check_for_memleaks()
-def pytest_runtest_logstart(nodeid, location):
- # location is (filename, lineno, testname)
- topolog.logstart(nodeid, location, topotest.g_pytest_config.option.rundir)
-
-
-def pytest_runtest_logfinish(nodeid, location):
- # location is (filename, lineno, testname)
- topolog.logfinish(nodeid, location)
+#
+# Disable per test function logging as FRR CI system can't handle it.
+#
+# @pytest.fixture(autouse=True, scope="function")
+# def function_autouse(request):
+# # For tests we actually use the logdir name as the logfile base
+# logbase = get_test_logdir(nodeid=request.node.nodeid, module=False)
+# logbase = os.path.join(topotest.g_pytest_config.option.rundir, logbase)
+# logpath = Path(logbase)
+# path = Path(f"{logpath.parent}/exec-{logpath.name}.log")
+# subprocess.check_call("mkdir -p -m 1777 {}".format(logpath.parent), shell=True)
+# with log_handler(request.node.nodeid, path):
+# yield
@pytest.hookimpl(hookwrapper=True)
@@ -340,8 +385,10 @@ def pytest_configure(config):
os.environ["PYTEST_TOPOTEST_WORKER"] = ""
is_xdist = os.environ["PYTEST_XDIST_MODE"] != "no"
is_worker = False
+ wname = ""
else:
- os.environ["PYTEST_TOPOTEST_WORKER"] = os.environ["PYTEST_XDIST_WORKER"]
+ wname = os.environ["PYTEST_XDIST_WORKER"]
+ os.environ["PYTEST_TOPOTEST_WORKER"] = wname
is_xdist = True
is_worker = True
@@ -375,6 +422,16 @@ def pytest_configure(config):
if not config.getoption("--log-file") and not config.getini("log_file"):
config.option.log_file = os.path.join(rundir, "exec.log")
+ # Handle pytest-xdist each worker get's it's own top level log file
+ # `exec-worker-N.log`
+ if wname:
+ wname = wname.replace("gw", "worker-")
+ cpath = Path(config.option.log_file).absolute()
+ config.option.log_file = f"{cpath.parent}/{cpath.stem}-{wname}{cpath.suffix}"
+ elif is_xdist:
+ cpath = Path(config.option.log_file).absolute()
+ config.option.log_file = f"{cpath.parent}/{cpath.stem}-xdist{cpath.suffix}"
+
# Turn on live logging if user specified verbose and the config has a CLI level set
if config.getoption("--verbose") and not is_xdist and not config.getini("log_cli"):
if config.getoption("--log-cli-level", None) is None:
@@ -433,6 +490,10 @@ def pytest_configure(config):
@pytest.fixture(autouse=True, scope="session")
def setup_session_auto():
+ # Aligns logs nicely
+ logging.addLevelName(logging.WARNING, " WARN")
+ logging.addLevelName(logging.INFO, " INFO")
+
if "PYTEST_TOPOTEST_WORKER" not in os.environ:
is_worker = False
elif not os.environ["PYTEST_TOPOTEST_WORKER"]:
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
index 0bd9408c28..21d4567d6b 100644
--- a/tests/topotests/lib/bgp.py
+++ b/tests/topotests/lib/bgp.py
@@ -50,6 +50,7 @@ def create_router_bgp(tgen, topo=None, input_dict=None, build=False, load_config
"bgp": {
"local_as": "200",
"router_id": "22.22.22.22",
+ "bgp_always_compare_med": True,
"graceful-restart": {
"graceful-restart": True,
"preserve-fw-state": True,
@@ -343,6 +344,13 @@ def __create_bgp_global(tgen, input_dict, router, build=False):
config_data.append(cmd)
+ if "bgp_always_compare_med" in bgp_data:
+ bgp_always_compare_med = bgp_data["bgp_always_compare_med"]
+ if bgp_always_compare_med == True:
+ config_data.append("bgp always-compare-med")
+ elif bgp_always_compare_med == False:
+ config_data.append("no bgp always-compare-med")
+
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return config_data
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index 5d37b062ac..e19d96f918 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -5,6 +5,7 @@
# ("NetDEF") in this file.
#
+import functools
import ipaddress
import json
import os
@@ -13,7 +14,6 @@ import socket
import subprocess
import sys
import traceback
-import functools
from collections import OrderedDict
from copy import deepcopy
from datetime import datetime, timedelta
@@ -32,9 +32,10 @@ from lib.micronet import comm_error
from lib.topogen import TopoRouter, get_topogen
from lib.topolog import get_logger, logger
from lib.topotest import frr_unicode, interface_set_status, version_cmp
-from lib import topotest
from munet.testing.util import pause_test
+from lib import topotest
+
FRRCFG_FILE = "frr_json.conf"
FRRCFG_BKUP_FILE = "frr_json_initial.conf"
@@ -1258,143 +1259,6 @@ def add_interfaces_to_vlan(tgen, input_dict):
logger.debug("result %s", result)
-def tcpdump_capture_start(
- tgen,
- router,
- intf,
- protocol=None,
- grepstr=None,
- timeout=0,
- options=None,
- cap_file=None,
- background=True,
-):
- """
- API to capture network packets using tcp dump.
-
- Packages used :
-
- Parameters
- ----------
- * `tgen`: topogen object.
- * `router`: router on which ping has to be performed.
- * `intf` : interface for capture.
- * `protocol` : protocol for which packet needs to be captured.
- * `grepstr` : string to filter out tcp dump output.
- * `timeout` : Time for which packet needs to be captured.
- * `options` : options for TCP dump, all tcpdump options can be used.
- * `cap_file` : filename to store capture dump.
- * `background` : Make tcp dump run in back ground.
-
- Usage
- -----
- tcpdump_result = tcpdump_dut(tgen, 'r2', intf, protocol='tcp', timeout=20,
- options='-A -vv -x > r2bgp.txt ')
- Returns
- -------
- 1) True for successful capture
- 2) errormsg - when tcp dump fails
- """
-
- logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
-
- rnode = tgen.gears[router]
-
- if timeout > 0:
- cmd = "timeout {}".format(timeout)
- else:
- cmd = ""
-
- cmdargs = "{} tcpdump".format(cmd)
-
- if intf:
- cmdargs += " -i {}".format(str(intf))
- if protocol:
- cmdargs += " {}".format(str(protocol))
- if options:
- cmdargs += " -s 0 {}".format(str(options))
-
- if cap_file:
- file_name = os.path.join(tgen.logdir, router, cap_file)
- cmdargs += " -w {}".format(str(file_name))
- # Remove existing capture file
- rnode.run("rm -rf {}".format(file_name))
-
- if grepstr:
- cmdargs += ' | grep "{}"'.format(str(grepstr))
-
- logger.info("Running tcpdump command: [%s]", cmdargs)
- if not background:
- rnode.run(cmdargs)
- else:
- # XXX this & is bogus doesn't work
- # rnode.run("nohup {} & /dev/null 2>&1".format(cmdargs))
- rnode.run("nohup {} > /dev/null 2>&1".format(cmdargs))
-
- # Check if tcpdump process is running
- if background:
- result = rnode.run("pgrep tcpdump")
- logger.debug("ps -ef | grep tcpdump \n {}".format(result))
-
- if not result:
- errormsg = "tcpdump is not running {}".format("tcpdump")
- return errormsg
- else:
- logger.info("Packet capture started on %s: interface %s", router, intf)
-
- logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
- return True
-
-
-def tcpdump_capture_stop(tgen, router):
- """
- API to capture network packets using tcp dump.
-
- Packages used :
-
- Parameters
- ----------
- * `tgen`: topogen object.
- * `router`: router on which ping has to be performed.
- * `intf` : interface for capture.
- * `protocol` : protocol for which packet needs to be captured.
- * `grepstr` : string to filter out tcp dump output.
- * `timeout` : Time for which packet needs to be captured.
- * `options` : options for TCP dump, all tcpdump options can be used.
- * `cap2file` : filename to store capture dump.
- * `bakgrnd` : Make tcp dump run in back ground.
-
- Usage
- -----
- tcpdump_result = tcpdump_dut(tgen, 'r2', intf, protocol='tcp', timeout=20,
- options='-A -vv -x > r2bgp.txt ')
- Returns
- -------
- 1) True for successful capture
- 2) errormsg - when tcp dump fails
- """
-
- logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
-
- rnode = tgen.gears[router]
-
- # Check if tcpdump process is running
- result = rnode.run("ps -ef | grep tcpdump")
- logger.debug("ps -ef | grep tcpdump \n {}".format(result))
-
- if not re_search(r"{}".format("tcpdump"), result):
- errormsg = "tcpdump is not running {}".format("tcpdump")
- return errormsg
- else:
- # XXX this doesn't work with micronet
- ppid = tgen.net.nameToNode[rnode.name].pid
- rnode.run("set +m; pkill -P %s tcpdump &> /dev/null" % ppid)
- logger.info("Stopped tcpdump capture")
-
- logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
- return True
-
-
def create_debug_log_config(tgen, input_dict, build=False):
"""
Enable/disable debug logs for any protocol with defined debug
@@ -3292,233 +3156,6 @@ def configure_interface_mac(tgen, input_dict):
return True
-def socat_send_mld_join(
- tgen,
- server,
- protocol_option,
- mld_groups,
- send_from_intf,
- send_from_intf_ip=None,
- port=12345,
- reuseaddr=True,
-):
- """
- API to send MLD join using SOCAT tool
-
- Parameters:
- -----------
- * `tgen` : Topogen object
- * `server`: iperf server, from where IGMP join would be sent
- * `protocol_option`: Protocol options, ex: UDP6-RECV
- * `mld_groups`: IGMP group for which join has to be sent
- * `send_from_intf`: Interface from which join would be sent
- * `send_from_intf_ip`: Interface IP, default is None
- * `port`: Port to be used, default is 12345
- * `reuseaddr`: True|False, bydefault True
-
- returns:
- --------
- errormsg or True
- """
-
- logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
-
- rnode = tgen.routers()[server]
- socat_args = "socat -u "
-
- # UDP4/TCP4/UDP6/UDP6-RECV/UDP6-SEND
- if protocol_option:
- socat_args += "{}".format(protocol_option)
-
- if port:
- socat_args += ":{},".format(port)
-
- if reuseaddr:
- socat_args += "{},".format("reuseaddr")
-
- # Group address range to cover
- if mld_groups:
- if not isinstance(mld_groups, list):
- mld_groups = [mld_groups]
-
- for mld_group in mld_groups:
- socat_cmd = socat_args
- join_option = "ipv6-join-group"
-
- if send_from_intf and not send_from_intf_ip:
- socat_cmd += "{}='[{}]:{}'".format(join_option, mld_group, send_from_intf)
- else:
- socat_cmd += "{}='[{}]:{}:[{}]'".format(
- join_option, mld_group, send_from_intf, send_from_intf_ip
- )
-
- socat_cmd += " STDOUT"
-
- socat_cmd += " &>{}/socat.logs &".format(tgen.logdir)
-
- # Run socat command to send IGMP join
- logger.info("[DUT: {}]: Running command: [{}]".format(server, socat_cmd))
- output = rnode.run("set +m; {} echo $!".format(socat_cmd))
-
- # Check if socat join process is running
- if output:
- pid = output.split()[0]
- rnode.run("touch /var/run/frr/socat_join.pid")
- rnode.run("echo %s >> /var/run/frr/socat_join.pid" % pid)
- else:
- errormsg = "Socat join is not sent for {}. Error {}".format(
- mld_group, output
- )
- logger.error(output)
- return errormsg
-
- logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
- return True
-
-
-def socat_send_pim6_traffic(
- tgen,
- server,
- protocol_option,
- mld_groups,
- send_from_intf,
- port=12345,
- multicast_hops=True,
-):
- """
- API to send pim6 data taffic using SOCAT tool
-
- Parameters:
- -----------
- * `tgen` : Topogen object
- * `server`: iperf server, from where IGMP join would be sent
- * `protocol_option`: Protocol options, ex: UDP6-RECV
- * `mld_groups`: MLD group for which join has to be sent
- * `send_from_intf`: Interface from which join would be sent
- * `port`: Port to be used, default is 12345
- * `multicast_hops`: multicast-hops count, default is 255
-
- returns:
- --------
- errormsg or True
- """
-
- logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
-
- rnode = tgen.routers()[server]
- socat_args = "socat -u STDIO "
-
- # UDP4/TCP4/UDP6/UDP6-RECV/UDP6-SEND
- if protocol_option:
- socat_args += "'{}".format(protocol_option)
-
- # Group address range to cover
- if mld_groups:
- if not isinstance(mld_groups, list):
- mld_groups = [mld_groups]
-
- for mld_group in mld_groups:
- socat_cmd = socat_args
- if port:
- socat_cmd += ":[{}]:{},".format(mld_group, port)
-
- if send_from_intf:
- socat_cmd += "interface={0},so-bindtodevice={0},".format(send_from_intf)
-
- if multicast_hops:
- socat_cmd += "multicast-hops=255'"
-
- socat_cmd += " >{}/socat.logs &".format(tgen.logdir)
-
- # Run socat command to send pim6 traffic
- logger.info(
- "[DUT: {}]: Running command: [set +m; ( while sleep 1; do date; done ) | {}]".format(
- server, socat_cmd
- )
- )
-
- # Open a shell script file and write data to it, which will be
- # used to send pim6 traffic continously
- traffic_shell_script = "{}/{}/traffic.sh".format(tgen.logdir, server)
- with open("{}".format(traffic_shell_script), "w") as taffic_sh:
- taffic_sh.write(
- "#!/usr/bin/env bash\n( while sleep 1; do date; done ) | {}\n".format(
- socat_cmd
- )
- )
-
- rnode.run("chmod 755 {}".format(traffic_shell_script))
- output = rnode.run("{} &>/dev/null & echo $!".format(traffic_shell_script))
-
- # Check if socat traffic process is running
- if output:
- pid = output.split()[0]
- rnode.run("touch /var/run/frr/socat_traffic.pid")
- rnode.run("echo %s >> /var/run/frr/socat_traffic.pid" % pid)
-
- else:
- errormsg = "Socat traffic is not sent for {}. Error {}".format(
- mld_group, output
- )
- logger.error(output)
- return errormsg
-
- logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
- return True
-
-
-def kill_socat(tgen, dut=None, action=None):
- """
- Killing socat process if running for any router in topology
-
- Parameters:
- -----------
- * `tgen` : Topogen object
- * `dut` : Any iperf hostname to send igmp prune
- * `action`: to kill mld join using socat
- to kill mld traffic using socat
-
- Usage:
- ------
- kill_socat(tgen, dut ="i6", action="remove_mld_join")
-
- """
-
- logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
-
- router_list = tgen.routers()
- for router, rnode in router_list.items():
- if dut is not None and router != dut:
- continue
-
- traffic_shell_script = "{}/{}/traffic.sh".format(tgen.logdir, router)
- pid_socat_join = rnode.run("cat /var/run/frr/socat_join.pid")
- pid_socat_traffic = rnode.run("cat /var/run/frr/socat_traffic.pid")
- if action == "remove_mld_join":
- pids = pid_socat_join
- elif action == "remove_mld_traffic":
- pids = pid_socat_traffic
- else:
- pids = "\n".join([pid_socat_join, pid_socat_traffic])
-
- if os.path.exists(traffic_shell_script):
- cmd = (
- "ps -ef | grep %s | awk -F' ' '{print $2}' | xargs kill -9"
- % traffic_shell_script
- )
- logger.debug("[DUT: {}]: Running command: [{}]".format(router, cmd))
- rnode.run(cmd)
-
- for pid in pids.split("\n"):
- pid = pid.strip()
- if pid.isdigit():
- cmd = "set +m; kill -9 %s &> /dev/null" % pid
- logger.debug("[DUT: {}]: Running command: [{}]".format(router, cmd))
- rnode.run(cmd)
-
- logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
-
-
#############################################
# Verification APIs
#############################################
diff --git a/tests/topotests/lib/mcast-tester.py b/tests/topotests/lib/mcast-tester.py
index 8a8251010c..5efbecd5e5 100755
--- a/tests/topotests/lib/mcast-tester.py
+++ b/tests/topotests/lib/mcast-tester.py
@@ -11,6 +11,7 @@ for the multicast group we subscribed to.
import argparse
import json
+import ipaddress
import os
import socket
import struct
@@ -35,13 +36,16 @@ def interface_name_to_index(name):
def multicast_join(sock, ifindex, group, port):
"Joins a multicast group."
- mreq = struct.pack(
- "=4sLL", socket.inet_aton(args.group), socket.INADDR_ANY, ifindex
- )
-
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- sock.bind((group, port))
- sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
+
+ if ip_version == 4:
+ mreq = group.packed + struct.pack("@II", socket.INADDR_ANY, ifindex)
+ opt = socket.IP_ADD_MEMBERSHIP
+ else:
+ mreq = group.packed + struct.pack("@I", ifindex)
+ opt = socket.IPV6_JOIN_GROUP
+ sock.bind((str(group), port))
+ sock.setsockopt(ip_proto, opt, mreq)
#
@@ -50,15 +54,14 @@ def multicast_join(sock, ifindex, group, port):
parser = argparse.ArgumentParser(description="Multicast RX utility")
parser.add_argument("group", help="Multicast IP")
parser.add_argument("interface", help="Interface name")
+parser.add_argument("--port", type=int, default=1000, help="port to send to")
+parser.add_argument("--ttl", type=int, default=16, help="TTL/hops for sending packets")
parser.add_argument("--socket", help="Point to topotest UNIX socket")
parser.add_argument(
"--send", help="Transmit instead of join with interval", type=float, default=0
)
args = parser.parse_args()
-ttl = 16
-port = 1000
-
# Get interface index/validate.
ifindex = interface_name_to_index(args.interface)
if ifindex is None:
@@ -85,7 +88,12 @@ else:
# Set topotest socket non blocking so we can multiplex the main loop.
toposock.setblocking(False)
-msock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+args.group = ipaddress.ip_address(args.group)
+ip_version = args.group.version
+ip_family = socket.AF_INET if ip_version == 4 else socket.AF_INET6
+ip_proto = socket.IPPROTO_IP if ip_version == 4 else socket.IPPROTO_IPV6
+
+msock = socket.socket(ip_family, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
if args.send > 0:
# Prepare multicast bit in that interface.
msock.setsockopt(
@@ -93,12 +101,18 @@ if args.send > 0:
25,
struct.pack("%ds" % len(args.interface), args.interface.encode("utf-8")),
)
- # Set packets TTL.
- msock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack("b", ttl))
+
+ # Set packets TTL/hops.
+ ttlopt = socket.IP_MULTICAST_TTL if ip_version == 4 else socket.IPV6_MULTICAST_HOPS
+ if ip_version == 4:
+ msock.setsockopt(ip_proto, ttlopt, struct.pack("B", args.ttl))
+ else:
+ msock.setsockopt(ip_proto, ttlopt, struct.pack("I", args.ttl))
+
# Block to ensure packet send.
msock.setblocking(True)
else:
- multicast_join(msock, ifindex, args.group, port)
+ multicast_join(msock, ifindex, args.group, args.port)
def should_exit():
@@ -120,7 +134,7 @@ def should_exit():
counter = 0
while not should_exit():
if args.send > 0:
- msock.sendto(b"test %d" % counter, (args.group, port))
+ msock.sendto(b"test %d" % counter, (str(args.group), args.port))
counter += 1
time.sleep(args.send)
diff --git a/tests/topotests/lib/micronet_compat.py b/tests/topotests/lib/micronet_compat.py
index d648a120ab..b348c85988 100644
--- a/tests/topotests/lib/micronet_compat.py
+++ b/tests/topotests/lib/micronet_compat.py
@@ -121,7 +121,7 @@ class Mininet(BaseMunet):
g_mnet_inst = None
- def __init__(self, rundir=None, pytestconfig=None):
+ def __init__(self, rundir=None, pytestconfig=None, logger=None):
"""
Create a Micronet.
"""
@@ -140,7 +140,7 @@ class Mininet(BaseMunet):
# os.umask(0)
super(Mininet, self).__init__(
- pid=False, rundir=rundir, pytestconfig=pytestconfig
+ pid=False, rundir=rundir, pytestconfig=pytestconfig, logger=logger
)
# From munet/munet/native.py
diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py
index e26bdb3af3..f69718a5bd 100644
--- a/tests/topotests/lib/pim.py
+++ b/tests/topotests/lib/pim.py
@@ -1,35 +1,35 @@
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: ISC
# Copyright (c) 2019 by VMware, Inc. ("VMware")
# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
# ("NetDEF") in this file.
import datetime
+import functools
import os
import re
import sys
import traceback
-import functools
from copy import deepcopy
from time import sleep
-from lib import topotest
-
# Import common_config to use commomnly used APIs
from lib.common_config import (
- create_common_configurations,
HostApplicationHelper,
InvalidCLIError,
create_common_configuration,
- InvalidCLIError,
+ create_common_configurations,
+ get_frr_ipv6_linklocal,
retry,
run_frr_cmd,
validate_ip_address,
- get_frr_ipv6_linklocal,
)
from lib.micronet import get_exec_path
from lib.topolog import logger
from lib.topotest import frr_unicode
+from lib import topotest
+
####
CWD = os.path.dirname(os.path.realpath(__file__))
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
index 0e685a97b0..6ddd223e25 100644
--- a/tests/topotests/lib/topogen.py
+++ b/tests/topotests/lib/topogen.py
@@ -84,7 +84,7 @@ def get_exabgp_cmd(commander=None):
"""Return the command to use for ExaBGP version < 4."""
if commander is None:
- commander = Commander("topogen")
+ commander = Commander("exabgp", logger=logging.getLogger("exabgp"))
def exacmd_version_ok(exacmd):
logger.debug("checking %s for exabgp < version 4", exacmd)
@@ -107,7 +107,7 @@ def get_exabgp_cmd(commander=None):
exacmd = py2_path + " -m exabgp"
if exacmd_version_ok(exacmd):
return exacmd
- py2_path = commander.get_exec_path("python")
+ py2_path = commander.get_exec_path("python")
if py2_path:
exacmd = py2_path + " -m exabgp"
if exacmd_version_ok(exacmd):
@@ -209,7 +209,11 @@ class Topogen(object):
# Mininet(Micronet) to build the actual topology.
assert not inspect.isclass(topodef)
- self.net = Mininet(rundir=self.logdir, pytestconfig=topotest.g_pytest_config)
+ self.net = Mininet(
+ rundir=self.logdir,
+ pytestconfig=topotest.g_pytest_config,
+ logger=topolog.get_logger("mu", log_level="debug"),
+ )
# Adjust the parent namespace
topotest.fix_netns_limits(self.net)
@@ -1090,8 +1094,9 @@ class TopoSwitch(TopoGear):
# pylint: disable=too-few-public-methods
def __init__(self, tgen, name, **params):
+ logger = topolog.get_logger(name, log_level="debug")
super(TopoSwitch, self).__init__(tgen, name, **params)
- tgen.net.add_switch(name)
+ tgen.net.add_switch(name, logger=logger)
def __str__(self):
gear = super(TopoSwitch, self).__str__()
diff --git a/tests/topotests/lib/topolog.py b/tests/topotests/lib/topolog.py
index b501670789..aceb2cb031 100644
--- a/tests/topotests/lib/topolog.py
+++ b/tests/topotests/lib/topolog.py
@@ -15,13 +15,6 @@ This file defines our logging abstraction.
import logging
import os
-import subprocess
-import sys
-
-if sys.version_info[0] > 2:
- pass
-else:
- pass
try:
from xdist import is_xdist_controller
@@ -31,8 +24,6 @@ except ImportError:
return False
-BASENAME = "topolog"
-
# Helper dictionary to convert Topogen logging levels to Python's logging.
DEBUG_TOPO2LOGGING = {
"debug": logging.DEBUG,
@@ -42,13 +33,43 @@ DEBUG_TOPO2LOGGING = {
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
-FORMAT = "%(asctime)s.%(msecs)03d %(levelname)s: %(name)s: %(message)s"
+FORMAT = "%(asctime)s %(levelname)s: %(name)s: %(message)s"
handlers = {}
-logger = logging.getLogger("topolog")
+logger = logging.getLogger("topo")
+
+
+# Remove this and use munet version when we move to pytest_asyncio
+def get_test_logdir(nodeid=None, module=False):
+ """Get log directory relative pathname."""
+ xdist_worker = os.getenv("PYTEST_XDIST_WORKER", "")
+ mode = os.getenv("PYTEST_XDIST_MODE", "no")
+
+ # nodeid: all_protocol_startup/test_all_protocol_startup.py::test_router_running
+ # may be missing "::testname" if module is True
+ if not nodeid:
+ nodeid = os.environ["PYTEST_CURRENT_TEST"].split(" ")[0]
+
+ cur_test = nodeid.replace("[", "_").replace("]", "_")
+ if module:
+ idx = cur_test.rfind("::")
+ path = cur_test if idx == -1 else cur_test[:idx]
+ testname = ""
+ else:
+ path, testname = cur_test.split("::")
+ testname = testname.replace("/", ".")
+ path = path[:-3].replace("/", ".")
+ # We use different logdir paths based on how xdist is running.
+ if mode == "each":
+ if module:
+ return os.path.join(path, "worker-logs", xdist_worker)
+ return os.path.join(path, testname, xdist_worker)
+ assert mode in ("no", "load", "loadfile", "loadscope"), f"Unknown dist mode {mode}"
+ return path if module else os.path.join(path, testname)
-def set_handler(l, target=None):
+
+def set_handler(lg, target=None):
if target is None:
h = logging.NullHandler()
else:
@@ -59,106 +80,81 @@ def set_handler(l, target=None):
h.setFormatter(logging.Formatter(fmt=FORMAT))
# Don't filter anything at the handler level
h.setLevel(logging.DEBUG)
- l.addHandler(h)
+ lg.addHandler(h)
return h
-def set_log_level(l, level):
+def set_log_level(lg, level):
"Set the logging level."
# Messages sent to this logger only are created if this level or above.
log_level = DEBUG_TOPO2LOGGING.get(level, level)
- l.setLevel(log_level)
+ lg.setLevel(log_level)
-def get_logger(name, log_level=None, target=None):
- l = logging.getLogger("{}.{}".format(BASENAME, name))
+def reset_logger(lg):
+ while lg.handlers:
+ x = lg.handlers.pop()
+ x.close()
+ lg.removeHandler(x)
- if log_level is not None:
- set_log_level(l, log_level)
- if target is not None:
- set_handler(l, target)
-
- return l
-
-
-# nodeid: all_protocol_startup/test_all_protocol_startup.py::test_router_running
-
-
-def get_test_logdir(nodeid=None):
- """Get log directory relative pathname."""
- xdist_worker = os.getenv("PYTEST_XDIST_WORKER", "")
- mode = os.getenv("PYTEST_XDIST_MODE", "no")
+def get_logger(name, log_level=None, target=None, reset=True):
+ lg = logging.getLogger(name)
- if not nodeid:
- nodeid = os.environ["PYTEST_CURRENT_TEST"].split(" ")[0]
+ if reset:
+ reset_logger(lg)
- cur_test = nodeid.replace("[", "_").replace("]", "_")
- path, testname = cur_test.split("::")
- path = path[:-3].replace("/", ".")
+ if log_level is not None:
+ set_log_level(lg, log_level)
- # We use different logdir paths based on how xdist is running.
- if mode == "each":
- return os.path.join(path, testname, xdist_worker)
- elif mode == "load":
- return os.path.join(path, testname)
- else:
- assert (
- mode == "no" or mode == "loadfile" or mode == "loadscope"
- ), "Unknown dist mode {}".format(mode)
+ if target is not None:
+ set_handler(lg, target)
- return path
+ return lg
-def logstart(nodeid, location, rundir):
+def logstart(nodeid, logpath):
"""Called from pytest before module setup."""
-
- mode = os.getenv("PYTEST_XDIST_MODE", "no")
worker = os.getenv("PYTEST_TOPOTEST_WORKER", "")
+ wstr = f" on worker {worker}" if worker else ""
+ handler_id = nodeid + worker
+ logpath = logpath.absolute()
- # We only per-test log in the workers (or non-dist)
- if not worker and mode != "no":
- return
+ logging.debug("logstart: adding logging for %s%s at %s", nodeid, wstr, logpath)
+ root_logger = logging.getLogger()
+ handler = logging.FileHandler(logpath, mode="w")
+ handler.setFormatter(logging.Formatter(FORMAT))
- handler_id = nodeid + worker
- assert handler_id not in handlers
-
- rel_log_dir = get_test_logdir(nodeid)
- exec_log_dir = os.path.join(rundir, rel_log_dir)
- subprocess.check_call(
- "mkdir -p {0} && chmod 1777 {0}".format(exec_log_dir), shell=True
- )
- exec_log_path = os.path.join(exec_log_dir, "exec.log")
-
- # Add test based exec log handler
- h = set_handler(logger, exec_log_path)
- handlers[handler_id] = h
-
- if worker:
- logger.info(
- "Logging on worker %s for %s into %s", worker, handler_id, exec_log_path
- )
- else:
- logger.info("Logging for %s into %s", handler_id, exec_log_path)
+ root_logger.addHandler(handler)
+ handlers[handler_id] = handler
+ logging.debug("logstart: added logging for %s%s at %s", nodeid, wstr, logpath)
+ return handler
-def logfinish(nodeid, location):
- """Called from pytest after module teardown."""
- # This function may not be called if pytest is interrupted.
+def logfinish(nodeid, logpath):
+ """Called from pytest after module teardown."""
worker = os.getenv("PYTEST_TOPOTEST_WORKER", "")
- handler_id = nodeid + worker
+ wstr = f" on worker {worker}" if worker else ""
+
+ root_logger = logging.getLogger()
- if handler_id in handlers:
- # Remove test based exec log handler
- if worker:
- logger.info("Closing logs for %s", handler_id)
+ handler_id = nodeid + worker
+ if handler_id not in handlers:
+ logging.critical("can't find log handler to remove")
+ else:
+ logging.debug(
+ "logfinish: removing logging for %s%s at %s", nodeid, wstr, logpath
+ )
h = handlers[handler_id]
- logger.removeHandler(handlers[handler_id])
+ root_logger.removeHandler(h)
h.flush()
h.close()
del handlers[handler_id]
+ logging.debug(
+ "logfinish: removed logging for %s%s at %s", nodeid, wstr, logpath
+ )
console_handler = set_handler(logger, None)
diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py
index 0e96921b7f..845d3e3b53 100644
--- a/tests/topotests/lib/topotest.py
+++ b/tests/topotests/lib/topotest.py
@@ -24,6 +24,7 @@ import subprocess
import sys
import tempfile
import time
+import logging
from collections.abc import Mapping
from copy import deepcopy
@@ -38,7 +39,7 @@ g_pytest_config = None
def get_logs_path(rundir):
- logspath = topolog.get_test_logdir()
+ logspath = topolog.get_test_logdir(module=True)
return os.path.join(rundir, logspath)
@@ -1137,7 +1138,9 @@ def _sysctl_assure(commander, variable, value):
def sysctl_atleast(commander, variable, min_value, raises=False):
try:
if commander is None:
- commander = micronet.Commander("topotest")
+ logger = logging.getLogger("topotest")
+ commander = micronet.Commander("sysctl", logger=logger)
+
return _sysctl_atleast(commander, variable, min_value)
except subprocess.CalledProcessError as error:
logger.warning(
@@ -1153,7 +1156,8 @@ def sysctl_atleast(commander, variable, min_value, raises=False):
def sysctl_assure(commander, variable, value, raises=False):
try:
if commander is None:
- commander = micronet.Commander("topotest")
+ logger = logging.getLogger("topotest")
+ commander = micronet.Commander("sysctl", logger=logger)
return _sysctl_assure(commander, variable, value)
except subprocess.CalledProcessError as error:
logger.warning(
diff --git a/tests/topotests/mgmt_config/r1/early-end-zebra.conf b/tests/topotests/mgmt_config/r1/early-end-zebra.conf
new file mode 100644
index 0000000000..44a2f96825
--- /dev/null
+++ b/tests/topotests/mgmt_config/r1/early-end-zebra.conf
@@ -0,0 +1,6 @@
+allow-external-route-update
+end
+ip multicast rpf-lookup-mode urib-only
+end
+ip table range 2 3
+end \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/early-end.conf b/tests/topotests/mgmt_config/r1/early-end.conf
new file mode 100644
index 0000000000..3aacad6471
--- /dev/null
+++ b/tests/topotests/mgmt_config/r1/early-end.conf
@@ -0,0 +1,8 @@
+ip route 15.1.0.0/24 101.0.0.2
+end
+ip route 15.2.0.0/24 101.0.0.2
+end
+ip route 15.3.0.0/24 101.0.0.2
+end
+ip route 15.4.0.0/24 101.0.0.2
+end \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/early-end2-zebra.conf b/tests/topotests/mgmt_config/r1/early-end2-zebra.conf
new file mode 100644
index 0000000000..37619d52ac
--- /dev/null
+++ b/tests/topotests/mgmt_config/r1/early-end2-zebra.conf
@@ -0,0 +1,7 @@
+conf t
+allow-external-route-update
+end
+ip multicast rpf-lookup-mode urib-only
+end
+ip table range 2 3
+end \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/early-end2.conf b/tests/topotests/mgmt_config/r1/early-end2.conf
new file mode 100644
index 0000000000..229ccc7410
--- /dev/null
+++ b/tests/topotests/mgmt_config/r1/early-end2.conf
@@ -0,0 +1,9 @@
+conf t
+ip route 16.1.0.0/24 101.0.0.2
+end
+ip route 16.2.0.0/24 101.0.0.2
+end
+ip route 16.3.0.0/24 101.0.0.2
+end
+ip route 16.4.0.0/24 101.0.0.2
+end \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/early-exit-zebra.conf b/tests/topotests/mgmt_config/r1/early-exit-zebra.conf
new file mode 100644
index 0000000000..44f202dbcb
--- /dev/null
+++ b/tests/topotests/mgmt_config/r1/early-exit-zebra.conf
@@ -0,0 +1,6 @@
+allow-external-route-update
+exit
+ip multicast rpf-lookup-mode urib-only
+exit
+ip table range 2 3
+exit \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/early-exit.conf b/tests/topotests/mgmt_config/r1/early-exit.conf
new file mode 100644
index 0000000000..c6a52df5d3
--- /dev/null
+++ b/tests/topotests/mgmt_config/r1/early-exit.conf
@@ -0,0 +1,8 @@
+ip route 13.1.0.0/24 101.0.0.2
+exit
+ip route 13.2.0.0/24 101.0.0.2
+exit
+ip route 13.3.0.0/24 101.0.0.2
+exit
+ip route 13.4.0.0/24 101.0.0.2
+exit \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/early-exit2-zebra.conf b/tests/topotests/mgmt_config/r1/early-exit2-zebra.conf
new file mode 100644
index 0000000000..c7109bfd39
--- /dev/null
+++ b/tests/topotests/mgmt_config/r1/early-exit2-zebra.conf
@@ -0,0 +1,7 @@
+conf t
+allow-external-route-update
+exit
+ip multicast rpf-lookup-mode urib-only
+exit
+ip table range 2 3
+exit \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/early-exit2.conf b/tests/topotests/mgmt_config/r1/early-exit2.conf
new file mode 100644
index 0000000000..79510c0aec
--- /dev/null
+++ b/tests/topotests/mgmt_config/r1/early-exit2.conf
@@ -0,0 +1,9 @@
+conf t
+ip route 14.1.0.0/24 101.0.0.2
+exit
+ip route 14.2.0.0/24 101.0.0.2
+exit
+ip route 14.3.0.0/24 101.0.0.2
+exit
+ip route 14.4.0.0/24 101.0.0.2
+exit \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/mgmtd.conf b/tests/topotests/mgmt_config/r1/mgmtd.conf
new file mode 100644
index 0000000000..318de765c8
--- /dev/null
+++ b/tests/topotests/mgmt_config/r1/mgmtd.conf
@@ -0,0 +1,11 @@
+debug northbound notifications
+debug northbound libyang
+debug northbound events
+debug northbound callbacks
+debug mgmt backend datastore frontend transaction
+debug mgmt client backend
+debug mgmt client frontend
+
+ip route 12.0.0.0/24 101.0.0.2
+
+ipv6 route 2012::/48 2101::2 \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/normal-exit.conf b/tests/topotests/mgmt_config/r1/normal-exit.conf
new file mode 100644
index 0000000000..c6a52df5d3
--- /dev/null
+++ b/tests/topotests/mgmt_config/r1/normal-exit.conf
@@ -0,0 +1,8 @@
+ip route 13.1.0.0/24 101.0.0.2
+exit
+ip route 13.2.0.0/24 101.0.0.2
+exit
+ip route 13.3.0.0/24 101.0.0.2
+exit
+ip route 13.4.0.0/24 101.0.0.2
+exit \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/one-exit-zebra.conf b/tests/topotests/mgmt_config/r1/one-exit-zebra.conf
new file mode 100644
index 0000000000..0c38459702
--- /dev/null
+++ b/tests/topotests/mgmt_config/r1/one-exit-zebra.conf
@@ -0,0 +1,3 @@
+allow-external-route-update
+exit
+ip multicast rpf-lookup-mode urib-only
diff --git a/tests/topotests/mgmt_config/r1/one-exit.conf b/tests/topotests/mgmt_config/r1/one-exit.conf
new file mode 100644
index 0000000000..47147d44eb
--- /dev/null
+++ b/tests/topotests/mgmt_config/r1/one-exit.conf
@@ -0,0 +1,3 @@
+ip route 20.1.0.0/24 101.0.0.2
+exit
+ip route 20.2.0.0/24 101.0.0.2
diff --git a/tests/topotests/mgmt_config/r1/one-exit2-zebra.conf b/tests/topotests/mgmt_config/r1/one-exit2-zebra.conf
new file mode 100644
index 0000000000..34acb76d92
--- /dev/null
+++ b/tests/topotests/mgmt_config/r1/one-exit2-zebra.conf
@@ -0,0 +1,4 @@
+conf t
+allow-external-route-update
+exit
+ip multicast rpf-lookup-mode urib-only \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/one-exit2.conf b/tests/topotests/mgmt_config/r1/one-exit2.conf
new file mode 100644
index 0000000000..262339a854
--- /dev/null
+++ b/tests/topotests/mgmt_config/r1/one-exit2.conf
@@ -0,0 +1,4 @@
+conf t
+ip route 21.1.0.0/24 101.0.0.2
+exit
+ip route 21.2.0.0/24 101.0.0.2
diff --git a/tests/topotests/mgmt_config/r1/zebra.conf b/tests/topotests/mgmt_config/r1/zebra.conf
new file mode 100644
index 0000000000..f3264efb00
--- /dev/null
+++ b/tests/topotests/mgmt_config/r1/zebra.conf
@@ -0,0 +1,7 @@
+log timestamp precision 6
+log file frr-r1.log debug
+
+interface r1-eth0
+ ip address 101.0.0.1/24
+ ipv6 address 2101::1/64
+exit \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/test_config.py b/tests/topotests/mgmt_config/test_config.py
new file mode 100644
index 0000000000..b07ed8f7fd
--- /dev/null
+++ b/tests/topotests/mgmt_config/test_config.py
@@ -0,0 +1,385 @@
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: ISC
+#
+# June 10 2023, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2023, LabN Consulting, L.L.C.
+#
+"""
+Test mgmtd parsing of configs.
+
+So:
+
+MGMTD matches zebra:
+
+one exit file: ONE: vty -f file
+one exit redir: ONE: vty < file
+early exit file: ONE: vty -f file
+early exit redir: ONE: vty < file
+early end file: ALL: vty -f file
+early end redir: ONE: vty < file
+
+Raw tests:
+
+FAILED mgmt_config/test_config.py::test_mgmtd_one_exit_file - AssertionError: vtysh < didn't work after exit
+FAILED mgmt_config/test_config.py::test_mgmtd_one_exit_redir - AssertionError: vtysh < didn't work after exit
+FAILED mgmt_config/test_config.py::test_mgmtd_early_exit_file - AssertionError: vtysh -f didn't work after 1 exit
+FAILED mgmt_config/test_config.py::test_mgmtd_early_exit_redir - AssertionError: vtysh < didn't work after 1 exits
+FAILED mgmt_config/test_config.py::test_mgmtd_early_end_redir - AssertionError: vtysh < didn't work after 1 end
+
+FAILED mgmt_config/test_config.py::test_zebra_one_exit_file - AssertionError: zebra second conf missing
+FAILED mgmt_config/test_config.py::test_zebra_one_exit_redir - AssertionError: zebra second conf missing
+FAILED mgmt_config/test_config.py::test_zebra_early_exit_file - AssertionError: zebra second conf missing
+FAILED mgmt_config/test_config.py::test_zebra_early_exit_redir - AssertionError: zebra second conf missing
+FAILED mgmt_config/test_config.py::test_zebra_early_end_redir - AssertionError: zebra second conf missing
+
+Before fixed:
+
+one exit file: NONE: vty -f file
+early exit file: NONE: vty -f file
+
+FAILED mgmt_config/test_config.py::test_mgmtd_one_exit_file - AssertionError: vtysh -f didn't work before exit
+FAILED mgmt_config/test_config.py::test_mgmtd_one_exit_redir - AssertionError: vtysh < didn't work after exit
+FAILED mgmt_config/test_config.py::test_mgmtd_early_exit_file - AssertionError: vtysh -f didn't work before exit
+FAILED mgmt_config/test_config.py::test_mgmtd_early_exit_redir - AssertionError: vtysh < didn't work after 1 exits
+FAILED mgmt_config/test_config.py::test_mgmtd_early_end_redir - AssertionError: vtysh < didn't work after 1 end
+
+FAILED mgmt_config/test_config.py::test_zebra_one_exit_file - AssertionError: zebra second conf missing
+FAILED mgmt_config/test_config.py::test_zebra_one_exit_redir - AssertionError: zebra second conf missing
+FAILED mgmt_config/test_config.py::test_zebra_early_exit_file - AssertionError: zebra second conf missing
+FAILED mgmt_config/test_config.py::test_zebra_early_exit_redir - AssertionError: zebra second conf missing
+FAILED mgmt_config/test_config.py::test_zebra_early_end_redir - AssertionError: zebra second conf missing
+
+"""
+import ipaddress
+import logging
+import os
+import re
+from pathlib import Path
+
+import pytest
+from lib.common_config import retry, step
+from lib.topogen import Topogen, TopoRouter
+
+# pytestmark = [pytest.mark.staticd, pytest.mark.mgmtd]
+pytestmark = [pytest.mark.staticd]
+
+
+@retry(retry_timeout=1, initial_wait=0.1)
+def check_kernel(r1, prefix, expected=True):
+ net = ipaddress.ip_network(prefix)
+ if net.version == 6:
+ kernel = r1.cmd_nostatus("ip -6 route show", warn=not expected)
+ else:
+ kernel = r1.cmd_nostatus("ip -4 route show", warn=not expected)
+
+ logging.debug("checking kernel routing table:\n%0.1920s", kernel)
+ route = f"{str(net)}(?: nhid [0-9]+)?.*proto (static|196)"
+ m = re.search(route, kernel)
+ if expected and not m:
+ return f"Failed to find \n'{route}'\n in \n'{kernel:.1920}'"
+ elif not expected and m:
+ return f"Failed found \n'{route}'\n in \n'{kernel:.1920}'"
+ return None
+
+
+@pytest.fixture(scope="module")
+def tgen(request):
+ "Setup/Teardown the environment and provide tgen argument to tests"
+
+ topodef = {"s1": ("r1",)}
+
+ tgen = Topogen(topodef, request.module.__name__)
+ tgen.start_topology()
+
+ # configure mgmtd using current mgmtd config file
+ tgen.gears["r1"].load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
+ tgen.gears["r1"].load_config(TopoRouter.RD_MGMTD)
+
+ tgen.start_router()
+ yield tgen
+ tgen.stop_topology()
+
+
+def save_log_snippet(logfile, content, savepath=None):
+ os.sync()
+ os.sync()
+ os.sync()
+
+ with open(logfile, encoding="utf-8") as f:
+ buf = f.read()
+ assert content == buf[: len(content)]
+ newcontent = buf[len(content) :]
+
+ if savepath:
+ with open(savepath, "w", encoding="utf-8") as f:
+ f.write(newcontent)
+
+ return buf
+
+
+def mapname(lname):
+ return lname.replace(".conf", "") + "-log.txt"
+
+
+logbuf = ""
+
+
+@pytest.fixture(scope="module")
+def r1(tgen):
+ return tgen.gears["r1"].net
+
+
+@pytest.fixture(scope="module")
+def confdir():
+ return Path(os.environ["PYTEST_TOPOTEST_SCRIPTDIR"]) / "r1"
+
+
+@pytest.fixture(scope="module")
+def tempdir(r1):
+ return Path(r1.rundir)
+
+
+@pytest.fixture(scope="module")
+def logpath(tempdir):
+ return tempdir / "mgmtd.log"
+
+
+@pytest.fixture(autouse=True, scope="function")
+def cleanup_config(r1, tempdir, logpath):
+ global logbuf
+
+ logbuf = save_log_snippet(logpath, logbuf, "/dev/null")
+
+ yield
+
+ r1.cmd_nostatus("vtysh -c 'conf t' -c 'no allow-external-route-update'")
+ r1.cmd_nostatus("vtysh -c 'conf t' -c 'no ip multicast rpf-lookup-mode urib-only'")
+ r1.cmd_nostatus("vtysh -c 'conf t' -c 'no ip table range 2 3'")
+
+ logbuf = save_log_snippet(logpath, logbuf, "/dev/null")
+
+
+def test_staticd_startup(r1):
+ r1.cmd_nostatus(
+ "vtysh -c 'debug mgmt client frontend' "
+ "-c 'debug mgmt client backend' "
+ "-c 'debug mgmt backend frontend datastore transaction'"
+ )
+ step("Verifying routes are present on r1")
+ result = check_kernel(r1, "12.0.0.0/24", retry_timeout=3.0)
+ assert result is None
+
+
+def test_mgmtd_one_exit_file(r1, confdir, tempdir, logpath):
+ global logbuf
+
+ conf = "one-exit.conf"
+ step(f"load {conf} file with vtysh -f ")
+ output = r1.cmd_nostatus(f"vtysh -f {confdir / conf}")
+ logbuf = save_log_snippet(logpath, logbuf, tempdir / mapname(conf))
+ print(output)
+
+ result1 = check_kernel(r1, "20.1.0.0/24")
+ result2 = check_kernel(r1, "20.2.0.0/24")
+
+ assert result1 is None, "vtysh -f didn't work before exit"
+ assert result2 is not None, "vtysh < worked after exit, unexpected"
+
+
+def test_mgmtd_one_exit_redir(r1, confdir, tempdir, logpath):
+ global logbuf
+
+ conf = "one-exit2.conf"
+ step(f"Redirect {conf} file into vtysh")
+ output = r1.cmd_nostatus(f"vtysh < {confdir / conf}")
+ logbuf = save_log_snippet(logpath, logbuf, tempdir / mapname(conf))
+ print(output)
+
+ result1 = check_kernel(r1, "21.1.0.0/24")
+ result2 = check_kernel(r1, "21.2.0.0/24")
+
+ assert result1 is None, "vtysh < didn't work before exit"
+ assert result2 is not None, "vtysh < worked after exit, unexpected"
+
+
+def test_mgmtd_early_exit_file(r1, confdir, tempdir, logpath):
+ global logbuf
+
+ conf = "early-exit.conf"
+ step(f"load {conf} file with vtysh -f ")
+ output = r1.cmd_nostatus(f"vtysh -f {confdir / conf}")
+ logbuf = save_log_snippet(logpath, logbuf, tempdir / mapname(conf))
+ print(output)
+
+ result1 = check_kernel(r1, "13.1.0.0/24")
+ result2 = check_kernel(r1, "13.2.0.0/24")
+ result3 = check_kernel(r1, "13.3.0.0/24")
+
+ assert result1 is None, "vtysh -f didn't work before exit"
+ assert result2 is not None, "vtysh -f worked after 1 exit, unexpected"
+ assert result3 is not None, "vtysh -f worked after 2 exit, unexpected"
+
+
+def test_mgmtd_early_exit_redir(r1, confdir, tempdir, logpath):
+ global logbuf
+
+ conf = "early-exit2.conf"
+ step(f"Redirect {conf} file into vtysh")
+ output = r1.cmd_nostatus(f"vtysh < {confdir / conf}")
+ logbuf = save_log_snippet(logpath, logbuf, tempdir / mapname(conf))
+ print(output)
+
+ result1 = check_kernel(r1, "14.1.0.0/24")
+ result2 = check_kernel(r1, "14.2.0.0/24")
+ result3 = check_kernel(r1, "14.3.0.0/24")
+
+ assert result1 is None, "vtysh < didn't work before exit"
+ assert result2 is not None, "vtysh < worked after 1 exits, unexpected"
+ assert result3 is not None, "vtysh < worked after 2 exits, unexpected"
+
+
+def test_mgmtd_early_end_file(r1, confdir, tempdir, logpath):
+ global logbuf
+
+ conf = "early-end.conf"
+ step(f"load {conf} file with vtysh -f ")
+ output = r1.cmd_nostatus(f"vtysh -f {confdir / conf}")
+ logbuf = save_log_snippet(logpath, logbuf, tempdir / mapname(conf))
+ print(output)
+
+ result1 = check_kernel(r1, "15.1.0.0/24")
+ result2 = check_kernel(r1, "15.2.0.0/24")
+ result3 = check_kernel(r1, "15.3.0.0/24")
+
+ assert result1 is None, "vtysh -f didn't work before end"
+ assert result2 is None, "vtysh -f didn't work after 1 end"
+ assert result3 is None, "vtysh -f didn't work after 2 ends"
+
+
+def test_mgmtd_early_end_redir(r1, confdir, tempdir, logpath):
+ global logbuf
+
+ conf = "early-end2.conf"
+ step(f"Redirect {conf} file into vtysh")
+ output = r1.cmd_nostatus(f"vtysh < {confdir / conf}")
+ logbuf = save_log_snippet(logpath, logbuf, tempdir / mapname(conf))
+ print(output)
+
+ result1 = check_kernel(r1, "16.1.0.0/24")
+ result2 = check_kernel(r1, "16.2.0.0/24")
+ result3 = check_kernel(r1, "16.3.0.0/24")
+
+ assert result1 is None, "vtysh < didn't work before end"
+ assert result2 is not None, "vtysh < worked after 1 end, unexpected"
+ assert result3 is not None, "vtysh < worked after 2 end, unexpected"
+
+
+#
+# Zebra
+#
+
+
+def test_zebra_one_exit_file(r1, confdir, tempdir, logpath):
+ global logbuf
+
+ conf = "one-exit-zebra.conf"
+ step(f"load {conf} file with vtysh -f ")
+ output = r1.cmd_nostatus(f"vtysh -f {confdir / conf}")
+ logbuf = save_log_snippet(logpath, logbuf, tempdir / mapname(conf))
+ print(output)
+
+ showrun = r1.cmd_nostatus("vtysh -c 'show running'")
+ assert "allow-external-route-update" in showrun, "zebra conf missing"
+ assert (
+ "ip multicast rpf-lookup-mode urib-only" not in showrun
+ ), "zebra second conf present, unexpected"
+
+
+def test_zebra_one_exit_redir(r1, confdir, tempdir, logpath):
+ global logbuf
+
+ conf = "one-exit2-zebra.conf"
+ step(f"Redirect {conf} file into vtysh")
+ output = r1.cmd_nostatus(f"vtysh < {confdir / conf}")
+ logbuf = save_log_snippet(logpath, logbuf, tempdir / mapname(conf))
+ print(output)
+
+ showrun = r1.cmd_nostatus("vtysh -c 'show running'")
+
+ assert "allow-external-route-update" in showrun, "zebra conf missing"
+ assert (
+ "ip multicast rpf-lookup-mode urib-only" not in showrun
+ ), "zebra second conf present, unexpected"
+
+
+def test_zebra_early_exit_file(r1, confdir, tempdir, logpath):
+ global logbuf
+
+ conf = "early-exit-zebra.conf"
+ step(f"load {conf} file with vtysh -f ")
+ output = r1.cmd_nostatus(f"vtysh -f {confdir / conf}")
+ logbuf = save_log_snippet(logpath, logbuf, tempdir / mapname(conf))
+ print(output)
+
+ showrun = r1.cmd_nostatus("vtysh -c 'show running'")
+
+ assert "allow-external-route-update" in showrun, "zebra conf missing"
+ assert (
+ "ip multicast rpf-lookup-mode urib-only" not in showrun
+ ), "zebra second conf present, unexpected"
+ assert "ip table range 2 3" not in showrun, "zebra third conf present, unexpected"
+
+
+def test_zebra_early_exit_redir(r1, confdir, tempdir, logpath):
+ global logbuf
+
+ conf = "early-exit2-zebra.conf"
+ step(f"Redirect {conf} file into vtysh")
+ output = r1.cmd_nostatus(f"vtysh < {confdir / conf}")
+ logbuf = save_log_snippet(logpath, logbuf, tempdir / mapname(conf))
+ print(output)
+
+ showrun = r1.cmd_nostatus("vtysh -c 'show running'")
+
+ assert "allow-external-route-update" in showrun, "zebra conf missing"
+ assert (
+ "ip multicast rpf-lookup-mode urib-only" not in showrun
+ ), "zebra second conf present, unexpected"
+ assert "ip table range 2 3" not in showrun, "zebra third conf present, unexpected"
+
+
+def test_zebra_early_end_file(r1, confdir, tempdir, logpath):
+ global logbuf
+
+ conf = "early-end-zebra.conf"
+ step(f"load {conf} file with vtysh -f ")
+ output = r1.cmd_nostatus(f"vtysh -f {confdir / conf}")
+ logbuf = save_log_snippet(logpath, logbuf, tempdir / mapname(conf))
+ print(output)
+
+ showrun = r1.cmd_nostatus("vtysh -c 'show running'")
+
+ assert "allow-external-route-update" in showrun, "zebra conf missing"
+ assert (
+ "ip multicast rpf-lookup-mode urib-only" in showrun
+ ), "zebra second conf missing"
+ assert "ip table range 2 3" in showrun, "zebra third missing"
+
+
+def test_zebra_early_end_redir(r1, confdir, tempdir, logpath):
+ global logbuf
+
+ conf = "early-end2-zebra.conf"
+ step(f"Redirect {conf} file into vtysh")
+ output = r1.cmd_nostatus(f"vtysh < {confdir / conf}")
+ logbuf = save_log_snippet(logpath, logbuf, tempdir / mapname(conf))
+ print(output)
+
+ showrun = r1.cmd_nostatus("vtysh -c 'show running'")
+
+ assert "allow-external-route-update" in showrun, "zebra conf missing"
+ assert (
+ "ip multicast rpf-lookup-mode urib-only" not in showrun
+ ), "zebra second conf present, unexpected"
+ assert "ip table range 2 3" not in showrun, "zebra third conf present, unexpected"
diff --git a/tests/topotests/mgmt_startup/test_bigconf.py b/tests/topotests/mgmt_startup/test_bigconf.py
index 3b13229af5..4f46c8fabd 100644
--- a/tests/topotests/mgmt_startup/test_bigconf.py
+++ b/tests/topotests/mgmt_startup/test_bigconf.py
@@ -42,8 +42,10 @@ def tgen(request):
tgen = Topogen(topodef, request.module.__name__)
tgen.start_topology()
+ prologue = open(f"{CWD}/r1/mgmtd.conf").read()
+
confpath = f"{tgen.gears['r1'].gearlogdir}/r1-late-big.conf"
- start, end = write_big_route_conf("10.0.0.0/8", ROUTE_COUNT, confpath)
+ start, end = write_big_route_conf("10.0.0.0/8", ROUTE_COUNT, confpath, prologue)
ROUTE_RANGE[0] = start
ROUTE_RANGE[1] = end
diff --git a/tests/topotests/mgmt_startup/test_config.py b/tests/topotests/mgmt_startup/test_cfgfile_var.py
index 6a54f71910..6a54f71910 100644
--- a/tests/topotests/mgmt_startup/test_config.py
+++ b/tests/topotests/mgmt_startup/test_cfgfile_var.py
diff --git a/tests/topotests/mgmt_startup/test_late_bigconf.py b/tests/topotests/mgmt_startup/test_late_bigconf.py
index 5e594aba6c..0b5bf38d10 100644
--- a/tests/topotests/mgmt_startup/test_late_bigconf.py
+++ b/tests/topotests/mgmt_startup/test_late_bigconf.py
@@ -42,8 +42,10 @@ def tgen(request):
tgen = Topogen(topodef, request.module.__name__)
tgen.start_topology()
+ prologue = open(f"{CWD}/r1/mgmtd.conf").read()
+
confpath = f"{tgen.gears['r1'].gearlogdir}/r1-late-big.conf"
- start, end = write_big_route_conf("10.0.0.0/8", ROUTE_COUNT, confpath)
+ start, end = write_big_route_conf("10.0.0.0/8", ROUTE_COUNT, confpath, prologue)
ROUTE_RANGE[0] = start
ROUTE_RANGE[1] = end
@@ -74,9 +76,23 @@ def test_staticd_latestart(tgen):
assert result is not None, "last route present and should not be"
step("Starting staticd")
+ t2 = Timeout(0)
r1.startDaemons(["staticd"])
result = check_kernel(r1, ROUTE_RANGE[0], retry_timeout=60)
assert result is None, "first route not present and should be"
- result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=60)
+ logging.info("r1: elapsed time for first route %ss", t2.elapsed())
+
+ count = 0
+ ocount = 0
+ while count < ROUTE_COUNT:
+ rc, o, e = r1.net.cmd_status("ip -o route | wc -l")
+ if not rc:
+ if count > ocount + 100:
+ ocount = count
+ logging.info("r1: elapsed time for %d routes %s", count, t2.elapsed())
+ count = int(o)
+
+ result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=1200)
assert result is None, "last route not present and should be"
+ logging.info("r1: elapsed time for last route %ss", t2.elapsed())
diff --git a/tests/topotests/mgmt_startup/util.py b/tests/topotests/mgmt_startup/util.py
index 87a2ad442e..e366351326 100644
--- a/tests/topotests/mgmt_startup/util.py
+++ b/tests/topotests/mgmt_startup/util.py
@@ -50,11 +50,13 @@ def get_ip_networks(super_prefix, count):
return tuple(network.subnets(count_log2))[0:count]
-def write_big_route_conf(super_prefix, count, confpath):
+def write_big_route_conf(super_prefix, count, confpath, prologue=""):
start = None
end = None
with open(confpath, "w+", encoding="ascii") as f:
+ if prologue:
+ f.write(prologue + "\n")
for net in get_ip_networks(super_prefix, count):
end = net
if not start:
diff --git a/tests/topotests/multicast_mld_join_topo1/test_multicast_mld_local_join.py b/tests/topotests/multicast_mld_join_topo1/test_multicast_mld_local_join.py
index 2c4fb4e998..826d6e2941 100644
--- a/tests/topotests/multicast_mld_join_topo1/test_multicast_mld_local_join.py
+++ b/tests/topotests/multicast_mld_join_topo1/test_multicast_mld_local_join.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: ISC
#
# Copyright (c) 2023 by VMware, Inc. ("VMware")
@@ -20,52 +20,31 @@ Following tests are covered:
5. Verify static MLD groups after removing and adding MLD config
"""
-import os
import sys
import time
-import pytest
-
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, "../"))
-sys.path.append(os.path.join(CWD, "../lib/"))
-
-# Required to instantiate the topology builder class.
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib.topogen import Topogen, get_topogen
-from re import search as re_search
-from re import findall as findall
+import pytest
from lib.common_config import (
+ reset_config_on_routers,
start_topology,
- write_test_header,
- write_test_footer,
step,
- kill_router_daemons,
- start_router_daemons,
- reset_config_on_routers,
- do_countdown,
- apply_raw_config,
- socat_send_pim6_traffic,
+ write_test_footer,
+ write_test_header,
)
-
from lib.pim import (
- create_pim_config,
- verify_mroutes,
- verify_upstream_iif,
- verify_mld_groups,
- clear_pim6_mroute,
McastTesterHelper,
- verify_pim_neighbors,
create_mld_config,
- verify_mld_groups,
+ create_pim_config,
verify_local_mld_groups,
+ verify_mld_groups,
+ verify_mroutes,
+ verify_pim_neighbors,
verify_pim_rp_info,
+ verify_upstream_iif,
)
-from lib.topolog import logger
+from lib.topogen import Topogen, get_topogen
from lib.topojson import build_config_from_json
+from lib.topolog import logger
r1_r2_links = []
r1_r3_links = []
@@ -131,7 +110,7 @@ def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- json_file = "{}/multicast_mld_local_join.json".format(CWD)
+ json_file = "multicast_mld_local_join.json"
tgen = Topogen(json_file, mod.__name__)
global topo
topo = tgen.json_topo
@@ -151,6 +130,9 @@ def setup_module(mod):
result = verify_pim_neighbors(tgen, topo)
assert result is True, " Verify PIM neighbor: Failed Error: {}".format(result)
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
logger.info("Running setup_module() done")
@@ -161,6 +143,8 @@ def teardown_module():
tgen = get_topogen()
+ app_helper.cleanup()
+
# Stop toplogy and Remove tmp files
tgen.stop_topology()
@@ -265,6 +249,8 @@ def test_mroute_with_mld_local_joins_p0(request):
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
step("Enable the MLD on R11 interfac of R1 and configure local mld groups")
@@ -330,9 +316,7 @@ def test_mroute_with_mld_local_joins_p0(request):
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("Send traffic from R4 to all the groups ( ffaa::1 to ffaa::5)")
- intf_ip = topo["routers"]["i4"]["links"]["r4"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i4"]["links"]["r4"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i4", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i4", MLD_JOIN_RANGE_1, "r4")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
@@ -458,6 +442,8 @@ def test_remove_add_mld_local_joins_p1(request):
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
step("Enable the MLD on R11 interfac of R1 and configure local mld groups")
@@ -517,9 +503,7 @@ def test_remove_add_mld_local_joins_p1(request):
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("Send traffic from R4 to all the groups ( ffaa::1 to ffaa::5)")
- intf_ip = topo["routers"]["i4"]["links"]["r4"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i4"]["links"]["r4"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i4", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i4", MLD_JOIN_RANGE_1, "r4")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
@@ -710,6 +694,8 @@ def test_remove_add_mld_config_with_local_joins_p1(request):
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
step("Enable the MLD on R11 interfac of R1 and configure local mld groups")
@@ -759,9 +745,7 @@ def test_remove_add_mld_config_with_local_joins_p1(request):
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("Send traffic from R4 to all the groups ( ffaa::1 to ffaa::5)")
- intf_ip = topo["routers"]["i4"]["links"]["r4"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i4"]["links"]["r4"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i4", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i4", MLD_JOIN_RANGE_1, "r4")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
diff --git a/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm1.py b/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm1.py
index 87b04b41be..aff623705c 100644
--- a/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm1.py
+++ b/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm1.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: ISC
#
@@ -30,61 +30,40 @@ should get update accordingly
data traffic
"""
-import os
+import datetime
import sys
-import json
import time
-import datetime
-import pytest
-
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, "../"))
-sys.path.append(os.path.join(CWD, "../lib/"))
-
-# Required to instantiate the topology builder class.
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib.topogen import Topogen, get_topogen
+import pytest
from lib.common_config import (
- start_topology,
- write_test_header,
- write_test_footer,
- step,
+ get_frr_ipv6_linklocal,
+ required_linux_kernel_version,
reset_config_on_routers,
shutdown_bringup_interface,
- start_router,
- stop_router,
- create_static_routes,
- required_linux_kernel_version,
- socat_send_mld_join,
- socat_send_pim6_traffic,
- get_frr_ipv6_linklocal,
- kill_socat,
+ start_topology,
+ step,
+ write_test_footer,
+ write_test_header,
)
-from lib.bgp import create_router_bgp
from lib.pim import (
- create_pim_config,
+ McastTesterHelper,
+ clear_pim6_mroute,
create_mld_config,
+ create_pim_config,
+ verify_mld_config,
verify_mld_groups,
+ verify_mroute_summary,
verify_mroutes,
- clear_pim6_interface_traffic,
- verify_upstream_iif,
- clear_pim6_mroute,
verify_pim_interface_traffic,
- verify_pim_state,
- McastTesterHelper,
verify_pim_join,
- verify_mroute_summary,
verify_pim_nexthop,
+ verify_pim_state,
verify_sg_traffic,
- verify_mld_config,
+ verify_upstream_iif,
)
-
-from lib.topolog import logger
+from lib.topogen import Topogen, get_topogen
from lib.topojson import build_config_from_json
+from lib.topolog import logger
# Global variables
GROUP_RANGE = "ff00::/8"
@@ -141,8 +120,7 @@ def setup_module(mod):
logger.info("Running setup_module to create topology")
- testdir = os.path.dirname(os.path.realpath(__file__))
- json_file = "{}/multicast_pim6_sm_topo1.json".format(testdir)
+ json_file = "multicast_pim6_sm_topo1.json"
tgen = Topogen(json_file, mod.__name__)
global topo
topo = tgen.json_topo
@@ -159,6 +137,9 @@ def setup_module(mod):
# Creating configuration from JSON
build_config_from_json(tgen, tgen.json_topo)
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
logger.info("Running setup_module() done")
@@ -169,8 +150,7 @@ def teardown_module():
tgen = get_topogen()
- # Clean up socat
- kill_socat(tgen)
+ app_helper.cleanup()
# Stop toplogy and Remove tmp files
tgen.stop_topology()
@@ -296,6 +276,8 @@ def test_multicast_data_traffic_static_RP_send_traffic_then_join_p0(request):
# Creating configuration from JSON
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
@@ -334,9 +316,7 @@ def test_multicast_data_traffic_static_RP_send_traffic_then_join_p0(request):
step("Send multicast traffic from FRR3 to all the receivers" "ffaa::1-5")
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
source = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
@@ -375,11 +355,7 @@ def test_multicast_data_traffic_static_RP_send_traffic_then_join_p0(request):
)
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i1"]["links"]["r1"]["interface"]
- intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip
- )
+ result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
@@ -532,11 +508,7 @@ def test_verify_mroute_when_receiver_is_outside_frr_p0(request):
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i1"]["links"]["r1"]["interface"]
- intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i1", "UDP6-RECV", _MLD_JOIN_RANGE, intf, intf_ip
- )
+ result = app_helper.run_join("i1", _MLD_JOIN_RANGE, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("verify MLD joins received on r1")
@@ -546,9 +518,7 @@ def test_verify_mroute_when_receiver_is_outside_frr_p0(request):
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to all the receivers" "ffaa::1-5")
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", _MLD_JOIN_RANGE, intf)
+ result = app_helper.run_traffic("i2", _MLD_JOIN_RANGE, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
@@ -561,11 +531,7 @@ def test_verify_mroute_when_receiver_is_outside_frr_p0(request):
result = create_mld_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
- i5_r5 = topo["routers"]["i5"]["links"]["r5"]["interface"]
- intf_ip = topo["routers"]["i5"]["links"]["r5"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i5", "UDP6-RECV", _MLD_JOIN_RANGE, i5_r5, intf_ip
- )
+ result = app_helper.run_join("i5", _MLD_JOIN_RANGE, "r5")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("FRR1 has 10 (*.G) and 10 (S,G) verify using 'show ipv6 mroute'")
@@ -682,6 +648,8 @@ def test_verify_mroute_when_frr_is_transit_router_p2(request):
# Creating configuration from JSON
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
@@ -708,11 +676,7 @@ def test_verify_mroute_when_frr_is_transit_router_p2(request):
step("Enable mld on FRR1 interface and send mld join ")
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i1"]["links"]["r1"]["interface"]
- intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip
- )
+ result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("verify mld groups received on R1")
@@ -722,9 +686,7 @@ def test_verify_mroute_when_frr_is_transit_router_p2(request):
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to ffaa::1-5 receivers")
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("shut the direct link to R1 ")
@@ -841,6 +803,8 @@ def test_verify_mroute_when_RP_unreachable_p1(request):
# Creating configuration from JSON
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
@@ -868,17 +832,11 @@ def test_verify_mroute_when_RP_unreachable_p1(request):
step("Enable mld on FRR1 interface and send mld join ffaa::1-5")
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i1"]["links"]["r1"]["interface"]
- intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip
- )
+ result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to ffaa::1-5 receivers")
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure one MLD interface on FRR3 node and send MLD" " join (ffcc::1)")
@@ -888,11 +846,7 @@ def test_verify_mroute_when_RP_unreachable_p1(request):
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i8"]["links"]["r3"]["interface"]
- intf_ip = topo["routers"]["i8"]["links"]["r3"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i8", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip
- )
+ result = app_helper.run_join("i8", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("verify MLD groups received ")
@@ -975,16 +929,14 @@ def test_modify_mld_query_timer_p0(request):
# Creating configuration from JSON
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i8"]["links"]["r3"]["interface"]
- intf_ip = topo["routers"]["i8"]["links"]["r3"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i8", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip
- )
+ result = app_helper.run_join("i8", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Enable MLD on receiver interface")
@@ -1023,9 +975,7 @@ def test_modify_mld_query_timer_p0(request):
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to ffaa::1-5 receivers")
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
@@ -1158,17 +1108,15 @@ def test_modify_mld_max_query_response_timer_p0(request):
# Creating configuration from JSON
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
step("Enable mld on FRR1 interface and send MLD join")
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i1"]["links"]["r1"]["interface"]
- intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip
- )
+ result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
@@ -1214,9 +1162,7 @@ def test_modify_mld_max_query_response_timer_p0(request):
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to ffaa::1-5 receivers")
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
@@ -1431,6 +1377,8 @@ def test_verify_impact_on_multicast_traffic_when_RP_removed_p0(request):
# Creating configuration from JSON
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
@@ -1438,9 +1386,7 @@ def test_verify_impact_on_multicast_traffic_when_RP_removed_p0(request):
step("send multicast traffic for group range ffaa::1-5")
step("Send multicast traffic from FRR3 to ffaa::1-5 receivers")
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure static RP for group (ffaa::1) on r5")
@@ -1464,11 +1410,7 @@ def test_verify_impact_on_multicast_traffic_when_RP_removed_p0(request):
step("Enable mld on FRR1 interface and send MLD join")
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i1"]["links"]["r1"]["interface"]
- intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip
- )
+ result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
diff --git a/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm2.py b/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm2.py
index 788a839918..767264a7c0 100644
--- a/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm2.py
+++ b/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm2.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: ISC
#
@@ -21,61 +21,31 @@ PIM nbr and mroute from FRR node
different
"""
-import os
import sys
-import json
import time
-import datetime
-import pytest
-
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, "../"))
-sys.path.append(os.path.join(CWD, "../lib/"))
-
-# Required to instantiate the topology builder class.
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib.topogen import Topogen, get_topogen
+import pytest
from lib.common_config import (
- start_topology,
- write_test_header,
- write_test_footer,
- step,
+ required_linux_kernel_version,
reset_config_on_routers,
shutdown_bringup_interface,
- start_router,
- stop_router,
- create_static_routes,
- required_linux_kernel_version,
- socat_send_mld_join,
- socat_send_pim6_traffic,
- get_frr_ipv6_linklocal,
- kill_socat,
+ start_topology,
+ step,
+ write_test_footer,
+ write_test_header,
)
-from lib.bgp import create_router_bgp
from lib.pim import (
+ McastTesterHelper,
+ clear_pim6_mroute,
create_pim_config,
- create_mld_config,
- verify_mld_groups,
verify_mroutes,
- clear_pim6_interface_traffic,
- verify_upstream_iif,
- clear_pim6_mroute,
verify_pim_interface_traffic,
- verify_pim_state,
- McastTesterHelper,
- verify_pim_join,
- verify_mroute_summary,
- verify_pim_nexthop,
verify_sg_traffic,
- verify_mld_config,
+ verify_upstream_iif,
)
-
-from lib.topolog import logger
+from lib.topogen import Topogen, get_topogen
from lib.topojson import build_config_from_json
+from lib.topolog import logger
# Global variables
GROUP_RANGE = "ff00::/8"
@@ -114,6 +84,16 @@ ASSERT_MSG = "Testcase {} : Failed Error: {}"
pytestmark = [pytest.mark.pim6d]
+@pytest.fixture(scope="function")
+def app_helper():
+ # helper = McastTesterHelper(get_topogen())
+ # yield helepr
+ # helper.cleanup()
+ # Even better use contextmanager functionality:
+ with McastTesterHelper(get_topogen()) as ah:
+ yield ah
+
+
def setup_module(mod):
"""
Sets up the pytest environment
@@ -132,8 +112,7 @@ def setup_module(mod):
logger.info("Running setup_module to create topology")
- testdir = os.path.dirname(os.path.realpath(__file__))
- json_file = "{}/multicast_pim6_sm_topo1.json".format(testdir)
+ json_file = "multicast_pim6_sm_topo1.json"
tgen = Topogen(json_file, mod.__name__)
global topo
topo = tgen.json_topo
@@ -160,9 +139,6 @@ def teardown_module():
tgen = get_topogen()
- # Clean up socat
- kill_socat(tgen)
-
# Stop toplogy and Remove tmp files
tgen.stop_topology()
@@ -225,7 +201,7 @@ def verify_state_incremented(state_before, state_after):
#####################################################
-def test_clear_mroute_and_verify_multicast_data_p0(request):
+def test_clear_mroute_and_verify_multicast_data_p0(request, app_helper):
"""
Verify (*,G) and (S,G) entry populated again after clear the
PIM nbr and mroute from FRR node
@@ -237,6 +213,8 @@ def test_clear_mroute_and_verify_multicast_data_p0(request):
# Creating configuration from JSON
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
@@ -266,18 +244,12 @@ def test_clear_mroute_and_verify_multicast_data_p0(request):
)
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i1"]["links"]["r1"]["interface"]
- intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip
- )
+ result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to all the receivers" "ffaa::1-5")
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Clear the mroute on r1, wait for 5 sec")
@@ -457,7 +429,9 @@ def test_clear_mroute_and_verify_multicast_data_p0(request):
write_test_footer(tc_name)
-def test_verify_SPT_switchover_when_RPT_and_SPT_path_is_different_p0(request):
+def test_verify_SPT_switchover_when_RPT_and_SPT_path_is_different_p0(
+ request, app_helper
+):
"""
Verify SPT switchover working when RPT and SPT path is
different
@@ -498,11 +472,7 @@ def test_verify_SPT_switchover_when_RPT_and_SPT_path_is_different_p0(request):
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("send mld join (ffbb::1-5, ffcc::1-5) to R1")
- intf = topo["routers"]["i1"]["links"]["r1"]["interface"]
- intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i1", "UDP6-RECV", _MLD_JOIN_RANGE, intf, intf_ip
- )
+ result = app_helper.run_join("i1", _MLD_JOIN_RANGE, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("registerRx and registerStopTx value before traffic sent")
@@ -518,9 +488,7 @@ def test_verify_SPT_switchover_when_RPT_and_SPT_path_is_different_p0(request):
step(
"Send multicast traffic from FRR3 to all the receivers" "ffbb::1-5 , ffcc::1-5"
)
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", _MLD_JOIN_RANGE, intf)
+ result = app_helper.run_traffic("i2", _MLD_JOIN_RANGE, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
diff --git a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py
index 977cd477c8..23326337d6 100755
--- a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py
+++ b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: ISC
#
@@ -41,57 +41,36 @@ Test steps
8. Verify PIM6 join send towards the higher preferred RP
9. Verify PIM6 prune send towards the lower preferred RP
"""
-
-import os
import sys
-import json
import time
-import pytest
-
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, "../"))
-sys.path.append(os.path.join(CWD, "../lib/"))
-
-# Required to instantiate the topology builder class.
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib.topogen import Topogen, get_topogen
+import pytest
from lib.common_config import (
- start_topology,
- write_test_header,
- write_test_footer,
+ check_router_status,
reset_config_on_routers,
- step,
shutdown_bringup_interface,
- kill_router_daemons,
- start_router_daemons,
- create_static_routes,
- check_router_status,
- socat_send_mld_join,
- socat_send_pim6_traffic,
- kill_socat,
+ start_topology,
+ step,
+ write_test_footer,
+ write_test_header,
)
from lib.pim import (
+ McastTesterHelper,
+ clear_pim6_interface_traffic,
create_pim_config,
- verify_upstream_iif,
+ get_pim6_interface_traffic,
verify_join_state_and_timer,
+ verify_mld_groups,
verify_mroutes,
- verify_pim_neighbors,
+ verify_pim6_neighbors,
verify_pim_interface_traffic,
verify_pim_rp_info,
verify_pim_state,
- clear_pim6_interface_traffic,
- clear_pim6_mroute,
- verify_pim6_neighbors,
- get_pim6_interface_traffic,
- clear_pim6_interfaces,
- verify_mld_groups,
+ verify_upstream_iif,
)
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json, build_topo_from_json
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
# Global variables
GROUP_RANGE_1 = "ff08::/64"
@@ -141,7 +120,7 @@ def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- json_file = "{}/multicast_pim6_static_rp.json".format(CWD)
+ json_file = "multicast_pim6_static_rp.json"
tgen = Topogen(json_file, mod.__name__)
global TOPO
TOPO = tgen.json_topo
@@ -163,6 +142,9 @@ def setup_module(mod):
result = verify_pim6_neighbors(tgen, TOPO)
assert result is True, "setup_module :Failed \n Error:" " {}".format(result)
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
logger.info("Running setup_module() done")
@@ -172,8 +154,7 @@ def teardown_module():
logger.info("Running teardown_module to delete topology")
tgen = get_topogen()
- # Clean up socat
- kill_socat(tgen)
+ app_helper.cleanup()
# Stop toplogy and Remove tmp files
tgen.stop_topology()
@@ -260,6 +241,8 @@ def test_pim6_add_delete_static_RP_p0(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Shut link b/w R1 and R3 and R1 and R4 as per testcase topology")
intf_r1_r3 = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
intf_r1_r4 = TOPO["routers"]["r1"]["links"]["r4"]["interface"]
@@ -313,11 +296,7 @@ def test_pim6_add_delete_static_RP_p0(request):
)
step("send mld join {} to R1".format(GROUP_ADDRESS_1))
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
- )
+ result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
@@ -457,6 +436,8 @@ def test_pim6_SPT_RPT_path_same_p1(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Shut link b/w R1->R3, R1->R4 and R3->R1, R3->R4 as per " "testcase topology")
intf_r1_r3 = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
intf_r1_r4 = TOPO["routers"]["r1"]["links"]["r4"]["interface"]
@@ -494,11 +475,7 @@ def test_pim6_SPT_RPT_path_same_p1(request):
step(
"Enable MLD on r1 interface and send MLD join {} to R1".format(GROUP_ADDRESS_1)
)
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
- )
+ result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
@@ -508,9 +485,8 @@ def test_pim6_SPT_RPT_path_same_p1(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("Send multicast traffic from R5")
- intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
- result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", GROUP_ADDRESS_1, intf)
+ result = app_helper.run_traffic("r5", GROUP_ADDRESS_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r2: Verify RP info")
@@ -630,6 +606,8 @@ def test_pim6_RP_configured_as_LHR_p1(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable MLD on r1 interface")
step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
@@ -665,11 +643,7 @@ def test_pim6_RP_configured_as_LHR_p1(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("send mld join {} to R1".format(GROUP_ADDRESS_1))
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
- )
+ result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
@@ -679,9 +653,8 @@ def test_pim6_RP_configured_as_LHR_p1(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("r5: Send multicast traffic for group {}".format(GROUP_ADDRESS_1))
- intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
- result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", GROUP_ADDRESS_1, intf)
+ result = app_helper.run_traffic("r5", GROUP_ADDRESS_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
@@ -762,6 +735,8 @@ def test_pim6_RP_configured_as_FHR_p1(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable MLD on r1 interface")
step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
step("r3: Configure r3(FHR) as RP")
@@ -792,11 +767,7 @@ def test_pim6_RP_configured_as_FHR_p1(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("send mld join {} to R1".format(GROUP_ADDRESS_1))
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
- )
+ result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
@@ -806,9 +777,8 @@ def test_pim6_RP_configured_as_FHR_p1(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("r5: Send multicast traffic for group {}".format(GROUP_ADDRESS_1))
- intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
- result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", GROUP_ADDRESS_1, intf)
+ result = app_helper.run_traffic("r5", GROUP_ADDRESS_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
@@ -890,6 +860,8 @@ def test_pim6_SPT_RPT_path_different_p1(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable MLD on r1 interface")
step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
step("r2: Configure r2 as RP")
@@ -921,11 +893,7 @@ def test_pim6_SPT_RPT_path_different_p1(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("send mld join {} to R1".format(GROUP_ADDRESS_1))
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
- )
+ result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
@@ -935,9 +903,8 @@ def test_pim6_SPT_RPT_path_different_p1(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("r5: Send multicast traffic for group {}".format(GROUP_ADDRESS_1))
- intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
- result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", GROUP_ADDRESS_1, intf)
+ result = app_helper.run_traffic("r5", GROUP_ADDRESS_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
@@ -1060,6 +1027,8 @@ def test_pim6_send_join_on_higher_preffered_rp_p1(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable MLD on r1 interface")
step("Enable the PIM66 on all the interfaces of r1, r2, r3 and r4 routers")
step(
@@ -1109,11 +1078,7 @@ def test_pim6_send_join_on_higher_preffered_rp_p1(request):
)
step("r0: send mld join {} to R1".format(GROUP_ADDRESS_3))
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_3, intf, intf_ip
- )
+ result = app_helper.run_join("r0", GROUP_ADDRESS_3, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
diff --git a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py
index a61164baa2..39497e91ed 100755
--- a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py
+++ b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: ISC
#
@@ -33,55 +33,31 @@ Test steps
import os
import sys
-import json
import time
-import pytest
-
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, "../"))
-sys.path.append(os.path.join(CWD, "../lib/"))
-
-# Required to instantiate the topology builder class.
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib.topogen import Topogen, get_topogen
+import pytest
from lib.common_config import (
- start_topology,
- write_test_header,
- write_test_footer,
+ create_debug_log_config,
reset_config_on_routers,
- step,
shutdown_bringup_interface,
- kill_router_daemons,
- start_router_daemons,
- create_static_routes,
- check_router_status,
- socat_send_mld_join,
- socat_send_pim6_traffic,
- kill_socat,
- create_debug_log_config,
+ start_topology,
+ step,
+ write_test_footer,
+ write_test_header,
)
from lib.pim import (
+ McastTesterHelper,
create_pim_config,
- verify_upstream_iif,
verify_join_state_and_timer,
+ verify_mld_groups,
verify_mroutes,
- verify_pim_neighbors,
- verify_pim_interface_traffic,
- verify_pim_rp_info,
- verify_pim_state,
- clear_pim6_interface_traffic,
- clear_pim6_mroute,
verify_pim6_neighbors,
- get_pim6_interface_traffic,
- clear_pim6_interfaces,
- verify_mld_groups,
+ verify_pim_rp_info,
+ verify_upstream_iif,
)
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json, build_topo_from_json
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
# Global variables
GROUP_RANGE_1 = "ff08::/64"
@@ -145,7 +121,7 @@ def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- json_file = "{}/multicast_pim6_static_rp.json".format(CWD)
+ json_file = "multicast_pim6_static_rp.json"
tgen = Topogen(json_file, mod.__name__)
global TOPO
TOPO = tgen.json_topo
@@ -167,6 +143,9 @@ def setup_module(mod):
result = verify_pim6_neighbors(tgen, TOPO)
assert result is True, "setup_module :Failed \n Error:" " {}".format(result)
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
logger.info("Running setup_module() done")
@@ -176,8 +155,7 @@ def teardown_module():
logger.info("Running teardown_module to delete topology")
tgen = get_topogen()
- # Clean up socat
- kill_socat(tgen)
+ app_helper.cleanup()
# Stop toplogy and Remove tmp files
tgen.stop_topology()
@@ -265,6 +243,8 @@ def test_pim6_multiple_groups_same_RP_address_p2(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
input_dict = {
"r1": {"debug": {"log_file": "r1_debug.log", "enable": ["pim6d"]}},
"r2": {"debug": {"log_file": "r2_debug.log", "enable": ["pim6d"]}},
@@ -305,10 +285,7 @@ def test_pim6_multiple_groups_same_RP_address_p2(request):
group_address_list = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2
step("r0: Send MLD join for 10 groups")
intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", group_address_list, intf, intf_ip
- )
+ result = app_helper.run_join("r0", group_address_list, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
@@ -318,9 +295,8 @@ def test_pim6_multiple_groups_same_RP_address_p2(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("r5: Send multicast traffic for group {}".format(group_address_list))
- intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
- result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", group_address_list, intf)
+ result = app_helper.run_traffic("r5", group_address_list, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
@@ -593,6 +569,8 @@ def test_pim6_multiple_groups_different_RP_address_p2(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable MLD on r1 interface")
step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
step("r2: Configure r2 as RP")
@@ -646,11 +624,7 @@ def test_pim6_multiple_groups_different_RP_address_p2(request):
group_address_list = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2
step("r0: Send MLD join for 10 groups")
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", group_address_list, intf, intf_ip
- )
+ result = app_helper.run_join("r0", group_address_list, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
@@ -660,9 +634,8 @@ def test_pim6_multiple_groups_different_RP_address_p2(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("r5: Send multicast traffic for group {}".format(group_address_list))
- intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
- result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", group_address_list, intf)
+ result = app_helper.run_traffic("r5", group_address_list, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
@@ -1189,6 +1162,8 @@ def test_pim6_delete_RP_shut_noshut_upstream_interface_p1(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable MLD on r1 interface")
step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
step("r2: Configure r2 as RP")
@@ -1220,11 +1195,7 @@ def test_pim6_delete_RP_shut_noshut_upstream_interface_p1(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("r0: Send MLD join")
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
- )
+ result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
diff --git a/tests/topotests/rip_topo1/test_rip_topo1.py b/tests/topotests/rip_topo1/test_rip_topo1.py
index d23962b77d..9b0eaf9a18 100644
--- a/tests/topotests/rip_topo1/test_rip_topo1.py
+++ b/tests/topotests/rip_topo1/test_rip_topo1.py
@@ -19,6 +19,7 @@ import re
import sys
import pytest
from time import sleep
+import functools
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
@@ -258,58 +259,43 @@ def test_zebra_ipv4_routingTable():
global fatal_error
net = get_topogen().net
+ def _verify_ip_route(expected):
+ # Actual output from router
+ actual = (
+ net["r%s" % i]
+ .cmd('vtysh -c "show ip route" 2> /dev/null | grep "^R"')
+ .rstrip()
+ )
+ # Drop timers on end of line
+ actual = re.sub(r", [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", "", actual)
+ # Fix newlines (make them all the same)
+ actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
+
+ return topotest.get_textdiff(
+ actual,
+ expected,
+ title1="actual Zebra IPv4 routing table",
+ title2="expected Zebra IPv4 routing table",
+ )
+
# Skip if previous fatal error condition is raised
if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
- # Verify OSPFv3 Routing Table
print("\n\n** Verifing Zebra IPv4 Routing Table")
print("******************************************\n")
- failures = 0
for i in range(1, 4):
refTableFile = "%s/r%s/show_ip_route.ref" % (thisDir, i)
if os.path.isfile(refTableFile):
- # Read expected result from file
expected = open(refTableFile).read().rstrip()
# Fix newlines (make them all the same)
expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
- # Actual output from router
- actual = (
- net["r%s" % i]
- .cmd('vtysh -c "show ip route" 2> /dev/null | grep "^R"')
- .rstrip()
- )
- # Drop timers on end of line
- actual = re.sub(r", [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", "", actual)
- # Fix newlines (make them all the same)
- actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
-
- # Generate Diff
- diff = topotest.get_textdiff(
- actual,
- expected,
- title1="actual Zebra IPv4 routing table",
- title2="expected Zebra IPv4 routing table",
- )
-
- # Empty string if it matches, otherwise diff contains unified diff
- if diff:
- sys.stderr.write(
- "r%s failed Zebra IPv4 Routing Table Check:\n%s\n" % (i, diff)
- )
- failures += 1
- else:
- print("r%s ok" % i)
-
- assert (
- failures == 0
- ), "Zebra IPv4 Routing Table verification failed for router r%s:\n%s" % (
- i,
- diff,
- )
+ test_func = functools.partial(_verify_ip_route, expected)
+ success, _ = topotest.run_and_expect(test_func, "", count=30, wait=1)
+ assert success, "Failed verifying IPv4 routes for r{}".format(i)
# Make sure that all daemons are still running
for i in range(1, 4):
@@ -344,7 +330,6 @@ def test_shutdown_check_stderr():
if __name__ == "__main__":
-
# To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli
# retval = pytest.main(["-s", "--tb=no"])
retval = pytest.main(["-s"])
diff --git a/tests/topotests/ripng_topo1/test_ripng_topo1.py b/tests/topotests/ripng_topo1/test_ripng_topo1.py
index ce2f5986d1..6bebf6044b 100644
--- a/tests/topotests/ripng_topo1/test_ripng_topo1.py
+++ b/tests/topotests/ripng_topo1/test_ripng_topo1.py
@@ -19,7 +19,7 @@ import re
import sys
import pytest
from time import sleep
-
+import functools
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from lib import topotest
@@ -273,6 +273,27 @@ def test_zebra_ipv6_routingTable():
global fatal_error
net = get_topogen().net
+ def _verify_ip_route(expected):
+ # Actual output from router
+ actual = (
+ net["r%s" % i]
+ .cmd('vtysh -c "show ipv6 route" 2> /dev/null | grep "^R"')
+ .rstrip()
+ )
+ # Mask out Link-Local mac address portion. They are random...
+ actual = re.sub(r" fe80::[0-9a-f:]+", " fe80::XXXX:XXXX:XXXX:XXXX", actual)
+ # Drop timers on end of line
+ actual = re.sub(r", [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", "", actual)
+ # Fix newlines (make them all the same)
+ actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
+
+ return topotest.get_textdiff(
+ actual,
+ expected,
+ title1="actual Zebra IPv6 routing table",
+ title2="expected Zebra IPv6 routing table",
+ )
+
# Skip if previous fatal error condition is raised
if fatal_error != "":
pytest.skip(fatal_error)
@@ -291,42 +312,9 @@ def test_zebra_ipv6_routingTable():
# Fix newlines (make them all the same)
expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
- # Actual output from router
- actual = (
- net["r%s" % i]
- .cmd('vtysh -c "show ipv6 route" 2> /dev/null | grep "^R"')
- .rstrip()
- )
- # Mask out Link-Local mac address portion. They are random...
- actual = re.sub(r" fe80::[0-9a-f:]+", " fe80::XXXX:XXXX:XXXX:XXXX", actual)
- # Drop timers on end of line
- actual = re.sub(r", [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", "", actual)
- # Fix newlines (make them all the same)
- actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
-
- # Generate Diff
- diff = topotest.get_textdiff(
- actual,
- expected,
- title1="actual Zebra IPv6 routing table",
- title2="expected Zebra IPv6 routing table",
- )
-
- # Empty string if it matches, otherwise diff contains unified diff
- if diff:
- sys.stderr.write(
- "r%s failed Zebra IPv6 Routing Table Check:\n%s\n" % (i, diff)
- )
- failures += 1
- else:
- print("r%s ok" % i)
-
- assert (
- failures == 0
- ), "Zebra IPv6 Routing Table verification failed for router r%s:\n%s" % (
- i,
- diff,
- )
+ test_func = functools.partial(_verify_ip_route, expected)
+ success, _ = topotest.run_and_expect(test_func, "", count=30, wait=1)
+ assert success, "Failed verifying IPv6 routes for r{}".format(i)
# Make sure that all daemons are running
for i in range(1, 4):
@@ -386,7 +374,6 @@ def test_shutdown_check_memleak():
if __name__ == "__main__":
-
# To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli
# retval = pytest.main(["-s", "--tb=no"])
retval = pytest.main(["-s"])
diff --git a/tools/frr-reload.py b/tools/frr-reload.py
index 0e0aec9839..c2be9f78eb 100755
--- a/tools/frr-reload.py
+++ b/tools/frr-reload.py
@@ -1480,12 +1480,17 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del):
lines_to_add_to_del.append((tmp_ctx_keys, line))
for (ctx_keys, line) in lines_to_del_to_del:
- if line is not None:
+ try:
lines_to_del.remove((ctx_keys, line))
+ except ValueError:
+ pass
for (ctx_keys, line) in lines_to_add_to_del:
- if line is not None:
+ try:
lines_to_add.remove((ctx_keys, line))
+ except ValueError:
+ pass
+
return (lines_to_add, lines_to_del)
diff --git a/vrrpd/vrrp_vty.c b/vrrpd/vrrp_vty.c
index 7a17de747c..9971df58a3 100644
--- a/vrrpd/vrrp_vty.c
+++ b/vrrpd/vrrp_vty.c
@@ -398,6 +398,7 @@ static struct json_object *vrrp_build_json(struct vrrp_vrouter *vr)
json_object_string_add(j, "interface", vr->ifp->name);
json_object_int_add(j, "advertisementInterval",
vr->advertisement_interval * CS2MS);
+ json_object_int_add(j, "priority", vr->priority);
/* v4 */
json_object_string_add(v4, "interface",
vr->v4->mvl_ifp ? vr->v4->mvl_ifp->name : "");
diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c
index 04f7ff65e9..c94b47fef5 100644
--- a/vtysh/vtysh.c
+++ b/vtysh/vtysh.c
@@ -880,6 +880,13 @@ int vtysh_config_from_file(struct vty *vty, FILE *fp)
if (strmatch(vty_buf_trimmed, "end"))
continue;
+ if (strmatch(vty_buf_trimmed, "exit") &&
+ vty->node == CONFIG_NODE) {
+ fprintf(stderr, "line %d: Warning[%d]...: %s\n", lineno,
+ vty->node, "early exit from config file");
+ break;
+ }
+
ret = command_config_read_one_line(vty, &cmd, lineno, 1);
switch (ret) {
diff --git a/zebra/interface.c b/zebra/interface.c
index ccf1a0a204..509a0ffd2c 100644
--- a/zebra/interface.c
+++ b/zebra/interface.c
@@ -135,6 +135,7 @@ static int if_zebra_new_hook(struct interface *ifp)
zebra_if->ifp = ifp;
zebra_if->multicast = IF_ZEBRA_DATA_UNSPEC;
+ zebra_if->mpls_config = IF_ZEBRA_DATA_UNSPEC;
zebra_if->shutdown = IF_ZEBRA_DATA_OFF;
zebra_if->link_nsid = NS_UNKNOWN;
@@ -3016,10 +3017,10 @@ DEFPY (mpls,
if (no) {
dplane_intf_mpls_modify_state(ifp, false);
- if_data->mpls = IF_ZEBRA_DATA_UNSPEC;
+ if_data->mpls_config = IF_ZEBRA_DATA_UNSPEC;
} else {
dplane_intf_mpls_modify_state(ifp, true);
- if_data->mpls = IF_ZEBRA_DATA_ON;
+ if_data->mpls_config = IF_ZEBRA_DATA_ON;
}
return CMD_SUCCESS;
@@ -4859,7 +4860,8 @@ static int if_config_write(struct vty *vty)
IF_ZEBRA_DATA_ON
? ""
: "no ");
- if (if_data->mpls == IF_ZEBRA_DATA_ON)
+
+ if (if_data->mpls_config == IF_ZEBRA_DATA_ON)
vty_out(vty, " mpls enable\n");
}
diff --git a/zebra/interface.h b/zebra/interface.h
index e5545d6ba0..44dcafbad0 100644
--- a/zebra/interface.h
+++ b/zebra/interface.h
@@ -114,6 +114,9 @@ struct zebra_if {
/* MPLS status. */
bool mpls;
+ /* MPLS configuration */
+ uint8_t mpls_config;
+
/* Linkdown status */
bool linkdown, linkdownv6;
diff --git a/zebra/main.c b/zebra/main.c
index 81a3066445..bd4623be55 100644
--- a/zebra/main.c
+++ b/zebra/main.c
@@ -206,12 +206,17 @@ void zebra_finalize(struct event *dummy)
vrf_terminate();
+ /*
+ * Stop dplane thread and finish any cleanup
+ * This is before the zebra_ns_early_shutdown call
+ * because sockets that the dplane depends on are closed
+ * in those functions
+ */
+ zebra_dplane_shutdown();
+
ns_walk_func(zebra_ns_early_shutdown, NULL, NULL);
zebra_ns_notify_close();
- /* Stop dplane thread and finish any cleanup */
- zebra_dplane_shutdown();
-
/* Final shutdown of ns resources */
ns_walk_func(zebra_ns_final_shutdown, NULL, NULL);
diff --git a/zebra/rib.h b/zebra/rib.h
index a56bb05d68..65cc1ffab9 100644
--- a/zebra/rib.h
+++ b/zebra/rib.h
@@ -465,6 +465,13 @@ extern uint8_t route_distance(int type);
extern void zebra_rib_evaluate_rn_nexthops(struct route_node *rn, uint32_t seq,
bool rt_delete);
+/*
+ * rib_find_rn_from_ctx
+ *
+ * Returns a lock increased route_node for the appropriate
+ * table and prefix specified by the context. Developer
+ * should unlock the node when done.
+ */
extern struct route_node *
rib_find_rn_from_ctx(const struct zebra_dplane_ctx *ctx);
diff --git a/zebra/rule_netlink.c b/zebra/rule_netlink.c
index c7832992ea..518c948c99 100644
--- a/zebra/rule_netlink.c
+++ b/zebra/rule_netlink.c
@@ -116,9 +116,9 @@ static ssize_t netlink_rule_msg_encode(
return 0;
}
- /* dsfield, if specified */
+ /* dsfield, if specified; mask off the ECN bits */
if (filter_bm & PBR_FILTER_DSFIELD)
- req->frh.tos = dsfield;
+ req->frh.tos = dsfield & 0xfc;
/* protocol to match on */
if (filter_bm & PBR_FILTER_IP_PROTOCOL)
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index 4c6c336d41..4bc9f4acfa 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -801,11 +801,17 @@ int zsend_route_notify_owner(const struct route_node *rn,
int zsend_route_notify_owner_ctx(const struct zebra_dplane_ctx *ctx,
enum zapi_route_notify_owner note)
{
- return (route_notify_internal(
- rib_find_rn_from_ctx(ctx), dplane_ctx_get_type(ctx),
- dplane_ctx_get_instance(ctx), dplane_ctx_get_vrf(ctx),
- dplane_ctx_get_table(ctx), note, dplane_ctx_get_afi(ctx),
- dplane_ctx_get_safi(ctx)));
+ int result;
+ struct route_node *rn = rib_find_rn_from_ctx(ctx);
+
+ result = route_notify_internal(
+ rn, dplane_ctx_get_type(ctx), dplane_ctx_get_instance(ctx),
+ dplane_ctx_get_vrf(ctx), dplane_ctx_get_table(ctx), note,
+ dplane_ctx_get_afi(ctx), dplane_ctx_get_safi(ctx));
+
+ route_unlock_node(rn);
+
+ return result;
}
static void zread_route_notify_request(ZAPI_HANDLER_ARGS)
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index a768c33a30..f90f9191ee 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -5546,7 +5546,7 @@ int dplane_provider_register(const char *name,
struct zebra_dplane_provider **prov_p)
{
int ret = 0;
- struct zebra_dplane_provider *p = NULL, *last;
+ struct zebra_dplane_provider *p = NULL, *last, *prev = NULL;
/* Validate */
if (fp == NULL) {
@@ -5589,10 +5589,11 @@ int dplane_provider_register(const char *name,
frr_each (dplane_prov_list, &zdplane_info.dg_providers, last) {
if (last->dp_priority > p->dp_priority)
break;
+ prev = last;
}
if (last)
- dplane_prov_list_add_after(&zdplane_info.dg_providers, last, p);
+ dplane_prov_list_add_after(&zdplane_info.dg_providers, prev, p);
else
dplane_prov_list_add_tail(&zdplane_info.dg_providers, p);