summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bgpd/bgp_attr.c101
-rw-r--r--bgpd/bgp_attr.h9
-rw-r--r--bgpd/bgp_mpath.c14
-rw-r--r--bgpd/bgp_mplsvpn.c89
-rw-r--r--bgpd/bgp_nexthop.c7
-rw-r--r--bgpd/bgp_nht.c49
-rw-r--r--bgpd/bgp_route.c104
-rw-r--r--bgpd/bgp_route.h13
-rw-r--r--bgpd/bgp_routemap.c4
-rw-r--r--bgpd/bgp_snmp_bgp4v2.c6
-rw-r--r--bgpd/bgp_trace.h65
-rw-r--r--bgpd/bgp_zebra.c109
-rw-r--r--bgpd/bgpd.c3
-rw-r--r--bgpd/rfapi/bgp_rfapi_cfg.c12
-rw-r--r--doc/user/bgp.rst11
-rw-r--r--doc/user/evpn.rst2
-rw-r--r--doc/user/snmp.rst61
-rw-r--r--lib/if.c17
-rw-r--r--lib/if.h1
-rw-r--r--ospfd/ospf_packet.c4
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_vrf/ce1/zebra.conf2
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_vrf/ce2/zebra.conf2
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/bgpd.conf1
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/zebra.conf1
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/bgpd.conf1
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/zebra.conf1
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py14
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/bgpd.conf48
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/staticd.conf6
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/zebra.conf11
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py21
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py50
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py77
-rw-r--r--tests/topotests/bgp_srv6l3vpn_route_leak/ce1/bgpd.conf0
-rw-r--r--tests/topotests/bgp_srv6l3vpn_route_leak/ce1/zebra.conf9
-rw-r--r--tests/topotests/bgp_srv6l3vpn_route_leak/pe1/bgpd.conf41
-rw-r--r--tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/default_ipv4_vpn.json31
-rw-r--r--tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf10_ipv4_unicast.json25
-rw-r--r--tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf20_ipv4.json22
-rw-r--r--tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf20_ipv4_unicast.json27
-rw-r--r--tests/topotests/bgp_srv6l3vpn_route_leak/pe1/zebra.conf27
-rwxr-xr-xtests/topotests/bgp_srv6l3vpn_route_leak/test_bgp_srv6l3vpn_route_leak.py128
-rw-r--r--tests/topotests/bgp_unique_rid/bgp_unique_rid.json505
-rw-r--r--tests/topotests/bgp_unique_rid/bgp_unique_rid_vrf.json529
-rw-r--r--tests/topotests/bgp_unique_rid/test_bgp_unique_rid.py906
-rw-r--r--tests/topotests/bgp_unique_rid/test_bgp_unique_rid_vrf.py479
-rw-r--r--tests/topotests/bgp_vrf_route_leak_basic/r1/bgpd.conf6
-rw-r--r--tests/topotests/bgp_vrf_route_leak_basic/r1/zebra.conf6
-rw-r--r--tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py234
-rw-r--r--tests/topotests/lib/bgp.py31
-rw-r--r--tests/topotests/lib/bgprib.py193
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt2
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt2
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt4
-rwxr-xr-xtools/frr-reload.py1
-rw-r--r--zebra/connected.c72
-rw-r--r--zebra/zebra_vty.c2
-rw-r--r--zebra/zserv.c3
58 files changed, 3899 insertions, 302 deletions
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index f3848db072..3d93e8a1ac 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -825,55 +825,56 @@ bool attrhash_cmp(const void *p1, const void *p2)
&& attr1->med == attr2->med
&& attr1->local_pref == attr2->local_pref
&& attr1->rmap_change_flags == attr2->rmap_change_flags) {
- if (attr1->aggregator_as == attr2->aggregator_as
- && attr1->aggregator_addr.s_addr
- == attr2->aggregator_addr.s_addr
- && attr1->weight == attr2->weight
- && attr1->tag == attr2->tag
- && attr1->label_index == attr2->label_index
- && attr1->mp_nexthop_len == attr2->mp_nexthop_len
- && bgp_attr_get_ecommunity(attr1)
- == bgp_attr_get_ecommunity(attr2)
- && bgp_attr_get_ipv6_ecommunity(attr1)
- == bgp_attr_get_ipv6_ecommunity(attr2)
- && bgp_attr_get_lcommunity(attr1)
- == bgp_attr_get_lcommunity(attr2)
- && bgp_attr_get_cluster(attr1)
- == bgp_attr_get_cluster(attr2)
- && bgp_attr_get_transit(attr1)
- == bgp_attr_get_transit(attr2)
- && bgp_attr_get_aigp_metric(attr1)
- == bgp_attr_get_aigp_metric(attr2)
- && attr1->rmap_table_id == attr2->rmap_table_id
- && (attr1->encap_tunneltype == attr2->encap_tunneltype)
- && encap_same(attr1->encap_subtlvs, attr2->encap_subtlvs)
+ if (attr1->aggregator_as == attr2->aggregator_as &&
+ attr1->aggregator_addr.s_addr ==
+ attr2->aggregator_addr.s_addr &&
+ attr1->weight == attr2->weight &&
+ attr1->tag == attr2->tag &&
+ attr1->label_index == attr2->label_index &&
+ attr1->mp_nexthop_len == attr2->mp_nexthop_len &&
+ bgp_attr_get_ecommunity(attr1) ==
+ bgp_attr_get_ecommunity(attr2) &&
+ bgp_attr_get_ipv6_ecommunity(attr1) ==
+ bgp_attr_get_ipv6_ecommunity(attr2) &&
+ bgp_attr_get_lcommunity(attr1) ==
+ bgp_attr_get_lcommunity(attr2) &&
+ bgp_attr_get_cluster(attr1) ==
+ bgp_attr_get_cluster(attr2) &&
+ bgp_attr_get_transit(attr1) ==
+ bgp_attr_get_transit(attr2) &&
+ bgp_attr_get_aigp_metric(attr1) ==
+ bgp_attr_get_aigp_metric(attr2) &&
+ attr1->rmap_table_id == attr2->rmap_table_id &&
+ (attr1->encap_tunneltype == attr2->encap_tunneltype) &&
+ encap_same(attr1->encap_subtlvs, attr2->encap_subtlvs)
#ifdef ENABLE_BGP_VNC
&& encap_same(bgp_attr_get_vnc_subtlvs(attr1),
bgp_attr_get_vnc_subtlvs(attr2))
#endif
&& IPV6_ADDR_SAME(&attr1->mp_nexthop_global,
- &attr2->mp_nexthop_global)
- && IPV6_ADDR_SAME(&attr1->mp_nexthop_local,
- &attr2->mp_nexthop_local)
- && IPV4_ADDR_SAME(&attr1->mp_nexthop_global_in,
- &attr2->mp_nexthop_global_in)
- && IPV4_ADDR_SAME(&attr1->originator_id,
- &attr2->originator_id)
- && overlay_index_same(attr1, attr2)
- && !memcmp(&attr1->esi, &attr2->esi, sizeof(esi_t))
- && attr1->es_flags == attr2->es_flags
- && attr1->mm_sync_seqnum == attr2->mm_sync_seqnum
- && attr1->df_pref == attr2->df_pref
- && attr1->df_alg == attr2->df_alg
- && attr1->nh_ifindex == attr2->nh_ifindex
- && attr1->nh_lla_ifindex == attr2->nh_lla_ifindex
- && attr1->distance == attr2->distance
- && srv6_l3vpn_same(attr1->srv6_l3vpn, attr2->srv6_l3vpn)
- && srv6_vpn_same(attr1->srv6_vpn, attr2->srv6_vpn)
- && attr1->srte_color == attr2->srte_color
- && attr1->nh_type == attr2->nh_type
- && attr1->bh_type == attr2->bh_type
- && attr1->otc == attr2->otc)
+ &attr2->mp_nexthop_global) &&
+ IPV6_ADDR_SAME(&attr1->mp_nexthop_local,
+ &attr2->mp_nexthop_local) &&
+ IPV4_ADDR_SAME(&attr1->mp_nexthop_global_in,
+ &attr2->mp_nexthop_global_in) &&
+ IPV4_ADDR_SAME(&attr1->originator_id,
+ &attr2->originator_id) &&
+ overlay_index_same(attr1, attr2) &&
+ !memcmp(&attr1->esi, &attr2->esi, sizeof(esi_t)) &&
+ attr1->es_flags == attr2->es_flags &&
+ attr1->mm_sync_seqnum == attr2->mm_sync_seqnum &&
+ attr1->df_pref == attr2->df_pref &&
+ attr1->df_alg == attr2->df_alg &&
+ attr1->nh_ifindex == attr2->nh_ifindex &&
+ attr1->nh_flag == attr2->nh_flag &&
+ attr1->nh_lla_ifindex == attr2->nh_lla_ifindex &&
+ attr1->distance == attr2->distance &&
+ srv6_l3vpn_same(attr1->srv6_l3vpn, attr2->srv6_l3vpn) &&
+ srv6_vpn_same(attr1->srv6_vpn, attr2->srv6_vpn) &&
+ attr1->srte_color == attr2->srte_color &&
+ attr1->nh_type == attr2->nh_type &&
+ attr1->bh_type == attr2->bh_type &&
+ attr1->otc == attr2->otc)
return true;
}
@@ -2254,6 +2255,12 @@ int bgp_mp_reach_parse(struct bgp_attr_parser_args *args,
return BGP_ATTR_PARSE_WITHDRAW;
}
attr->nh_ifindex = peer->nexthop.ifp->ifindex;
+ if (if_is_operative(peer->nexthop.ifp))
+ SET_FLAG(attr->nh_flag,
+ BGP_ATTR_NH_IF_OPERSTATE);
+ else
+ UNSET_FLAG(attr->nh_flag,
+ BGP_ATTR_NH_IF_OPERSTATE);
}
break;
case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL:
@@ -2271,6 +2278,12 @@ int bgp_mp_reach_parse(struct bgp_attr_parser_args *args,
return BGP_ATTR_PARSE_WITHDRAW;
}
attr->nh_ifindex = peer->nexthop.ifp->ifindex;
+ if (if_is_operative(peer->nexthop.ifp))
+ SET_FLAG(attr->nh_flag,
+ BGP_ATTR_NH_IF_OPERSTATE);
+ else
+ UNSET_FLAG(attr->nh_flag,
+ BGP_ATTR_NH_IF_OPERSTATE);
}
if (attr->mp_nexthop_len
== BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL) {
diff --git a/bgpd/bgp_attr.h b/bgpd/bgp_attr.h
index a34da1a6de..f8beb5fba9 100644
--- a/bgpd/bgp_attr.h
+++ b/bgpd/bgp_attr.h
@@ -170,6 +170,12 @@ struct attr {
uint32_t med;
uint32_t local_pref;
ifindex_t nh_ifindex;
+ uint8_t nh_flag;
+
+#define BGP_ATTR_NH_VALID 0x01
+#define BGP_ATTR_NH_IF_OPERSTATE 0x02
+#define BGP_ATTR_NH_MP_PREFER_GLOBAL 0x04 /* MP Nexthop preference */
+#define BGP_ATTR_NH_REFRESH 0x08
/* Path origin attribute */
uint8_t origin;
@@ -220,9 +226,6 @@ struct attr {
/* MP Nexthop length */
uint8_t mp_nexthop_len;
- /* MP Nexthop preference */
- uint8_t mp_nexthop_prefer_global;
-
/* Static MAC for EVPN */
uint8_t sticky;
diff --git a/bgpd/bgp_mpath.c b/bgpd/bgp_mpath.c
index 32a5e14b11..84c847d796 100644
--- a/bgpd/bgp_mpath.c
+++ b/bgpd/bgp_mpath.c
@@ -142,15 +142,21 @@ int bgp_path_info_nexthop_cmp(struct bgp_path_info *bpi1,
&bpi2->attr->mp_nexthop_global);
break;
case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL:
- addr1 = (bpi1->attr->mp_nexthop_prefer_global)
+ addr1 = (CHECK_FLAG(
+ bpi1->attr->nh_flag,
+ BGP_ATTR_NH_MP_PREFER_GLOBAL))
? bpi1->attr->mp_nexthop_global
: bpi1->attr->mp_nexthop_local;
- addr2 = (bpi2->attr->mp_nexthop_prefer_global)
+ addr2 = (CHECK_FLAG(
+ bpi2->attr->nh_flag,
+ BGP_ATTR_NH_MP_PREFER_GLOBAL))
? bpi2->attr->mp_nexthop_global
: bpi2->attr->mp_nexthop_local;
- if (!bpi1->attr->mp_nexthop_prefer_global
- && !bpi2->attr->mp_nexthop_prefer_global)
+ if (!CHECK_FLAG(bpi1->attr->nh_flag,
+ BGP_ATTR_NH_MP_PREFER_GLOBAL) &&
+ !CHECK_FLAG(bpi2->attr->nh_flag,
+ BGP_ATTR_NH_MP_PREFER_GLOBAL))
compare = !bgp_interface_same(
bpi1->peer->ifp,
bpi2->peer->ifp);
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index 0270695c2f..9b86c9b4b1 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -1060,9 +1060,11 @@ static bool leak_update_nexthop_valid(struct bgp *to_bgp, struct bgp_dest *bn,
{
struct bgp_path_info *bpi_ultimate;
struct bgp *bgp_nexthop;
+ struct bgp_table *table;
bool nh_valid;
bpi_ultimate = bgp_get_imported_bpi_ultimate(source_bpi);
+ table = bgp_dest_table(bpi_ultimate->net);
if (bpi->extra && bpi->extra->bgp_orig)
bgp_nexthop = bpi->extra->bgp_orig;
@@ -1070,13 +1072,25 @@ static bool leak_update_nexthop_valid(struct bgp *to_bgp, struct bgp_dest *bn,
bgp_nexthop = bgp_orig;
/*
- * No nexthop tracking for redistributed routes or for
+ * No nexthop tracking for redistributed routes,
+ * for static (i.e. coming from the bgp network statement or for
* EVPN-imported routes that get leaked.
*/
if (bpi_ultimate->sub_type == BGP_ROUTE_REDISTRIBUTE ||
is_pi_family_evpn(bpi_ultimate))
nh_valid = 1;
- else
+ else if (bpi_ultimate->type == ZEBRA_ROUTE_BGP &&
+ bpi_ultimate->sub_type == BGP_ROUTE_STATIC && table &&
+ (table->safi == SAFI_UNICAST ||
+ table->safi == SAFI_LABELED_UNICAST)) {
+ /* Routes from network statement */
+ if (CHECK_FLAG(bgp_nexthop->flags, BGP_FLAG_IMPORT_CHECK))
+ nh_valid = bgp_find_or_add_nexthop(
+ to_bgp, bgp_nexthop, afi, safi, bpi_ultimate,
+ NULL, 0, p);
+ else
+ nh_valid = 1;
+ } else
/*
* TBD do we need to do anything about the
* 'connected' parameter?
@@ -1266,6 +1280,7 @@ leak_update(struct bgp *to_bgp, struct bgp_dest *bn,
if (debug)
zlog_debug("%s: ->%s: %pBD Found route, changed attr",
__func__, to_bgp->name_pretty, bn);
+ UNSET_FLAG(bpi->attr->nh_flag, BGP_ATTR_NH_REFRESH);
return bpi;
}
@@ -1864,11 +1879,31 @@ static bool vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
uint32_t num_labels = 0;
int nexthop_self_flag = 1;
struct bgp_path_info *bpi_ultimate = NULL;
+ struct bgp_path_info *bpi;
int origin_local = 0;
struct bgp *src_vrf;
+ struct interface *ifp;
int debug = BGP_DEBUG(vpn, VPN_LEAK_TO_VRF);
+ /*
+ * For VRF-2-VRF route-leaking,
+ * the source will be the originating VRF.
+ *
+ * If ACCEPT_OWN mechanism is enabled, then we SHOULD(?)
+ * get the source VRF (BGP) by looking at the RD.
+ */
+ struct bgp *src_bgp = bgp_lookup_by_rd(path_vpn, prd, afi);
+
+ if (path_vpn->extra && path_vpn->extra->bgp_orig)
+ src_vrf = path_vpn->extra->bgp_orig;
+ else if (src_bgp)
+ src_vrf = src_bgp;
+ else
+ src_vrf = from_bgp;
+
+ bn = bgp_afi_node_get(to_bgp->rib[afi][safi], afi, safi, p, NULL);
+
if (!vpn_leak_from_vpn_active(to_bgp, afi, &debugmsg)) {
if (debug)
zlog_debug("%s: skipping: %s", __func__, debugmsg);
@@ -1928,6 +1963,18 @@ static bool vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
community_strip_accept_own(&static_attr);
+ for (bpi = bgp_dest_get_bgp_path_info(bn); bpi; bpi = bpi->next) {
+ if (bpi->extra && bpi->extra->parent == path_vpn)
+ break;
+ }
+
+ if (bpi &&
+ leak_update_nexthop_valid(to_bgp, bn, &static_attr, afi, safi,
+ path_vpn, bpi, src_vrf, p, debug))
+ SET_FLAG(static_attr.nh_flag, BGP_ATTR_NH_VALID);
+ else
+ UNSET_FLAG(static_attr.nh_flag, BGP_ATTR_NH_VALID);
+
/*
* Nexthop: stash and clear
*
@@ -1970,6 +2017,22 @@ static bool vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
break;
}
+ if (static_attr.nexthop.s_addr == INADDR_ANY &&
+ IN6_IS_ADDR_UNSPECIFIED(&static_attr.mp_nexthop_global)) {
+ ifp = if_get_vrf_loopback(src_vrf->vrf_id);
+ if (ifp)
+ static_attr.nh_ifindex = ifp->ifindex;
+ } else if (static_attr.nh_ifindex)
+ ifp = if_lookup_by_index(static_attr.nh_ifindex,
+ src_vrf->vrf_id);
+ else
+ ifp = NULL;
+
+ if (ifp && if_is_operative(ifp))
+ SET_FLAG(static_attr.nh_flag, BGP_ATTR_NH_IF_OPERSTATE);
+ else
+ UNSET_FLAG(static_attr.nh_flag, BGP_ATTR_NH_IF_OPERSTATE);
+
/*
* route map handling
*/
@@ -2051,22 +2114,6 @@ static bool vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
zlog_debug("%s: pfx %pBD: num_labels %d", __func__,
path_vpn->net, num_labels);
- /*
- * For VRF-2-VRF route-leaking,
- * the source will be the originating VRF.
- *
- * If ACCEPT_OWN mechanism is enabled, then we SHOULD(?)
- * get the source VRF (BGP) by looking at the RD.
- */
- struct bgp *src_bgp = bgp_lookup_by_rd(path_vpn, prd, afi);
-
- if (path_vpn->extra && path_vpn->extra->bgp_orig)
- src_vrf = path_vpn->extra->bgp_orig;
- else if (src_bgp)
- src_vrf = src_bgp;
- else
- src_vrf = from_bgp;
-
leak_update(to_bgp, bn, new_attr, afi, safi, path_vpn, pLabels,
num_labels, src_vrf, &nexthop_orig, nexthop_self_flag,
debug);
@@ -2821,6 +2868,10 @@ int bgp_show_mpls_vpn(struct vty *vty, afi_t afi, struct prefix_rd *prd,
{
struct bgp *bgp;
struct bgp_table *table;
+ uint16_t show_flags = 0;
+
+ if (use_json)
+ SET_FLAG(show_flags, BGP_SHOW_OPT_JSON);
bgp = bgp_get_default();
if (bgp == NULL) {
@@ -2832,7 +2883,7 @@ int bgp_show_mpls_vpn(struct vty *vty, afi_t afi, struct prefix_rd *prd,
}
table = bgp->rib[afi][SAFI_MPLS_VPN];
return bgp_show_table_rd(vty, bgp, SAFI_MPLS_VPN, table, prd, type,
- output_arg, use_json);
+ output_arg, show_flags);
}
DEFUN (show_bgp_ip_vpn_all_rd,
diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c
index 25a4a1b521..6bbdbdc1a9 100644
--- a/bgpd/bgp_nexthop.c
+++ b/bgpd/bgp_nexthop.c
@@ -500,11 +500,8 @@ static void bgp_connected_cleanup(struct route_table *table,
if (!bc)
return;
- bc->refcnt--;
- if (bc->refcnt == 0) {
- XFREE(MTYPE_BGP_CONN, bc);
- bgp_dest_set_bgp_connected_ref_info(bn, NULL);
- }
+ XFREE(MTYPE_BGP_CONN, bc);
+ bgp_dest_set_bgp_connected_ref_info(bn, NULL);
}
bool bgp_nexthop_self(struct bgp *bgp, afi_t afi, uint8_t type,
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index cf8ff524e9..b6b0c584d7 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -46,6 +46,7 @@
#include "bgpd/bgp_flowspec_util.h"
#include "bgpd/bgp_evpn.h"
#include "bgpd/bgp_rd.h"
+#include "bgpd/bgp_mplsvpn.h"
extern struct zclient *zclient;
@@ -388,7 +389,7 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
if (pi && is_route_parent_evpn(pi))
bnc->is_evpn_gwip_nexthop = true;
- if (is_bgp_static_route) {
+ if (is_bgp_static_route && !CHECK_FLAG(bnc->flags, BGP_STATIC_ROUTE)) {
SET_FLAG(bnc->flags, BGP_STATIC_ROUTE);
/* If we're toggling the type, re-register */
@@ -423,8 +424,8 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
SET_FLAG(bnc->flags, BGP_NEXTHOP_CONNECTED);
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED);
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_VALID);
- } else if (peer && !connected
- && CHECK_FLAG(bnc->flags, BGP_NEXTHOP_CONNECTED)) {
+ } else if (peer && !connected &&
+ CHECK_FLAG(bnc->flags, BGP_NEXTHOP_CONNECTED)) {
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_CONNECTED);
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED);
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_VALID);
@@ -834,10 +835,13 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
{
struct bgp_nexthop_cache_head *tree = NULL;
struct bgp_nexthop_cache *bnc_nhc, *bnc_import;
+ struct bgp_path_info *pi;
+ struct bgp_dest *dest;
struct bgp *bgp;
struct prefix match;
struct zapi_route nhr;
afi_t afi;
+ safi_t safi;
bgp = bgp_lookup_by_vrf_id(vrf_id);
if (!bgp) {
@@ -858,25 +862,37 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
tree = &bgp->nexthop_cache_table[afi];
bnc_nhc = bnc_find(tree, &match, nhr.srte_color, 0);
- if (!bnc_nhc) {
- if (BGP_DEBUG(nht, NHT))
- zlog_debug(
- "parse nexthop update(%pFX(%u)(%s)): bnc info not found for nexthop cache",
- &nhr.prefix, nhr.srte_color, bgp->name_pretty);
- } else
+ if (bnc_nhc)
bgp_process_nexthop_update(bnc_nhc, &nhr, false);
+ else if (BGP_DEBUG(nht, NHT))
+ zlog_debug(
+ "parse nexthop update(%pFX(%u)(%s)): bnc info not found for nexthop cache",
+ &nhr.prefix, nhr.srte_color, bgp->name_pretty);
tree = &bgp->import_check_table[afi];
bnc_import = bnc_find(tree, &match, nhr.srte_color, 0);
- if (!bnc_import) {
- if (BGP_DEBUG(nht, NHT))
- zlog_debug(
- "parse nexthop update(%pFX(%u)(%s)): bnc info not found for import check",
- &nhr.prefix, nhr.srte_color, bgp->name_pretty);
- } else
+ if (bnc_import) {
bgp_process_nexthop_update(bnc_import, &nhr, true);
+ safi = nhr.safi;
+ if (bgp->rib[afi][safi]) {
+ dest = bgp_afi_node_get(bgp->rib[afi][safi], afi, safi,
+ &match, NULL);
+
+ for (pi = bgp_dest_get_bgp_path_info(dest); pi;
+ pi = pi->next)
+ if (pi->peer == bgp->peer_self &&
+ pi->type == ZEBRA_ROUTE_BGP &&
+ pi->sub_type == BGP_ROUTE_STATIC)
+ vpn_leak_from_vrf_update(
+ bgp_get_default(), bgp, pi);
+ }
+ } else if (BGP_DEBUG(nht, NHT))
+ zlog_debug(
+ "parse nexthop update(%pFX(%u)(%s)): bnc info not found for import check",
+ &nhr.prefix, nhr.srte_color, bgp->name_pretty);
+
/*
* HACK: if any BGP route is dependant on an SR-policy that doesn't
* exist, zebra will never send NH updates relative to that policy. In
@@ -989,7 +1005,8 @@ static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p)
*/
else if (pi->attr->mp_nexthop_len
== BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) {
- if (pi->attr->mp_nexthop_prefer_global)
+ if (CHECK_FLAG(pi->attr->nh_flag,
+ BGP_ATTR_NH_MP_PREFER_GLOBAL))
p->u.prefix6 =
pi->attr->mp_nexthop_global;
else
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index 95493c11f8..e478abc44a 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -277,8 +277,10 @@ struct bgp_path_info_extra *bgp_path_info_extra_get(struct bgp_path_info *pi)
}
/* Free bgp route information. */
-static void bgp_path_info_free(struct bgp_path_info *path)
+void bgp_path_info_free_with_caller(const char *name,
+ struct bgp_path_info *path)
{
+ frrtrace(2, frr_bgp, bgp_path_info_free, path, name);
bgp_attr_unintern(&path->attr);
bgp_unlink_nexthop(path);
@@ -389,8 +391,10 @@ static int bgp_dest_set_defer_flag(struct bgp_dest *dest, bool delete)
return -1;
}
-void bgp_path_info_add(struct bgp_dest *dest, struct bgp_path_info *pi)
+void bgp_path_info_add_with_caller(const char *name, struct bgp_dest *dest,
+ struct bgp_path_info *pi)
{
+ frrtrace(2, frr_bgp, bgp_path_info_add, dest, pi, name);
struct bgp_path_info *top;
top = bgp_dest_get_bgp_path_info(dest);
@@ -8679,6 +8683,7 @@ void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
afi_t afi;
route_map_result_t ret;
struct bgp_redist *red;
+ struct interface *ifp;
/* Make default attribute. */
bgp_attr_default_set(&attr, bgp, BGP_ORIGIN_INCOMPLETE);
@@ -8728,6 +8733,11 @@ void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
}
attr.nh_type = nhtype;
attr.nh_ifindex = ifindex;
+ ifp = if_lookup_by_index(ifindex, bgp->vrf_id);
+ if (ifp && if_is_operative(ifp))
+ SET_FLAG(attr.nh_flag, BGP_ATTR_NH_IF_OPERSTATE);
+ else
+ UNSET_FLAG(attr.nh_flag, BGP_ATTR_NH_IF_OPERSTATE);
attr.med = metric;
attr.distance = distance;
@@ -8911,7 +8921,11 @@ void bgp_redistribute_withdraw(struct bgp *bgp, afi_t afi, int type,
bgp_aggregate_decrement(bgp, bgp_dest_get_prefix(dest),
pi, afi, SAFI_UNICAST);
bgp_path_info_delete(dest, pi);
- bgp_process(bgp, dest, afi, SAFI_UNICAST);
+ if (!CHECK_FLAG(bgp->flags,
+ BGP_FLAG_DELETE_IN_PROGRESS))
+ bgp_process(bgp, dest, afi, SAFI_UNICAST);
+ else
+ bgp_path_info_reap(dest, pi);
}
}
}
@@ -9410,9 +9424,10 @@ void route_vty_out(struct vty *vty, const struct prefix *p,
"link-local");
if ((IPV6_ADDR_CMP(&attr->mp_nexthop_global,
- &attr->mp_nexthop_local)
- != 0)
- && !attr->mp_nexthop_prefer_global)
+ &attr->mp_nexthop_local) !=
+ 0) &&
+ !CHECK_FLAG(attr->nh_flag,
+ BGP_ATTR_NH_MP_PREFER_GLOBAL))
json_object_boolean_true_add(
json_nexthop_ll, "used");
else
@@ -9424,10 +9439,11 @@ void route_vty_out(struct vty *vty, const struct prefix *p,
} else {
/* Display LL if LL/Global both in table unless
* prefer-global is set */
- if (((attr->mp_nexthop_len
- == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL)
- && !attr->mp_nexthop_prefer_global)
- || (path->peer->conf_if)) {
+ if (((attr->mp_nexthop_len ==
+ BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) &&
+ !CHECK_FLAG(attr->nh_flag,
+ BGP_ATTR_NH_MP_PREFER_GLOBAL)) ||
+ (path->peer->conf_if)) {
if (path->peer->conf_if) {
len = vty_out(vty, "%s",
path->peer->conf_if);
@@ -10689,7 +10705,8 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
json_object_boolean_true_add(json_nexthop_ll,
"accessible");
- if (!attr->mp_nexthop_prefer_global)
+ if (!CHECK_FLAG(attr->nh_flag,
+ BGP_ATTR_NH_MP_PREFER_GLOBAL))
json_object_boolean_true_add(json_nexthop_ll,
"used");
else
@@ -10699,7 +10716,8 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
vty_out(vty, " (%s) %s\n",
inet_ntop(AF_INET6, &attr->mp_nexthop_local,
buf, INET6_ADDRSTRLEN),
- attr->mp_nexthop_prefer_global
+ CHECK_FLAG(attr->nh_flag,
+ BGP_ATTR_NH_MP_PREFER_GLOBAL)
? "(prefer-global)"
: "(used)");
}
@@ -11248,6 +11266,8 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi,
bool use_json = CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON);
bool wide = CHECK_FLAG(show_flags, BGP_SHOW_OPT_WIDE);
bool all = CHECK_FLAG(show_flags, BGP_SHOW_OPT_AFI_ALL);
+ bool detail_json = CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON_DETAIL);
+ bool detail_routes = CHECK_FLAG(show_flags, BGP_SHOW_OPT_ROUTES_DETAIL);
if (output_cum && *output_cum != 0)
header = false;
@@ -11281,8 +11301,7 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi,
}
/* Check for 'json detail', where we need header output once per dest */
- if (use_json && CHECK_FLAG(show_flags, BGP_SHOW_OPT_DETAIL) &&
- type != bgp_show_type_dampend_paths &&
+ if (use_json && detail_json && type != bgp_show_type_dampend_paths &&
type != bgp_show_type_damp_neighbor &&
type != bgp_show_type_flap_statistics &&
type != bgp_show_type_flap_neighbor)
@@ -11545,17 +11564,19 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi,
vty_out(vty, "Default local pref %u, ",
bgp->default_local_pref);
vty_out(vty, "local AS %u\n", bgp->as);
- vty_out(vty, BGP_SHOW_SCODE_HEADER);
- vty_out(vty, BGP_SHOW_NCODE_HEADER);
- vty_out(vty, BGP_SHOW_OCODE_HEADER);
- vty_out(vty, BGP_SHOW_RPKI_HEADER);
+ if (!detail_routes) {
+ vty_out(vty, BGP_SHOW_SCODE_HEADER);
+ vty_out(vty, BGP_SHOW_NCODE_HEADER);
+ vty_out(vty, BGP_SHOW_OCODE_HEADER);
+ vty_out(vty, BGP_SHOW_RPKI_HEADER);
+ }
if (type == bgp_show_type_dampend_paths
|| type == bgp_show_type_damp_neighbor)
vty_out(vty, BGP_SHOW_DAMP_HEADER);
else if (type == bgp_show_type_flap_statistics
|| type == bgp_show_type_flap_neighbor)
vty_out(vty, BGP_SHOW_FLAP_HEADER);
- else
+ else if (!detail_routes)
vty_out(vty, (wide ? BGP_SHOW_HEADER_WIDE
: BGP_SHOW_HEADER));
header = false;
@@ -11598,16 +11619,30 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi,
AFI_IP, safi, use_json,
json_paths);
else {
- if (CHECK_FLAG(show_flags, BGP_SHOW_OPT_DETAIL))
+ if (detail_routes || detail_json) {
+ const struct prefix_rd *prd = NULL;
+
+ if (dest->pdest)
+ prd = bgp_rd_from_dest(
+ dest->pdest, safi);
+
+ if (!use_json)
+ route_vty_out_detail_header(
+ vty, bgp, dest,
+ bgp_dest_get_prefix(
+ dest),
+ prd, table->afi, safi,
+ NULL);
+
route_vty_out_detail(
- vty, bgp, dest,
- bgp_dest_get_prefix(dest), pi,
+ vty, bgp, dest, dest_p, pi,
family2afi(dest_p->family),
safi, RPKI_NOT_BEING_USED,
json_paths);
- else
+ } else {
route_vty_out(vty, dest_p, pi, display,
safi, json_paths, wide);
+ }
}
display++;
}
@@ -11689,7 +11724,8 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi,
int bgp_show_table_rd(struct vty *vty, struct bgp *bgp, safi_t safi,
struct bgp_table *table, struct prefix_rd *prd_match,
- enum bgp_show_type type, void *output_arg, bool use_json)
+ enum bgp_show_type type, void *output_arg,
+ uint16_t show_flags)
{
struct bgp_dest *dest, *next;
unsigned long output_cum = 0;
@@ -11697,13 +11733,10 @@ int bgp_show_table_rd(struct vty *vty, struct bgp *bgp, safi_t safi,
unsigned long json_header_depth = 0;
struct bgp_table *itable;
bool show_msg;
- uint16_t show_flags = 0;
+ bool use_json = !!CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON);
show_msg = (!use_json && type == bgp_show_type_normal);
- if (use_json)
- SET_FLAG(show_flags, BGP_SHOW_OPT_JSON);
-
for (dest = bgp_table_top(table); dest; dest = next) {
const struct prefix *dest_p = bgp_dest_get_prefix(dest);
@@ -11769,7 +11802,7 @@ static int bgp_show(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
/* use MPLS and ENCAP specific shows until they are merged */
if (safi == SAFI_MPLS_VPN) {
return bgp_show_table_rd(vty, bgp, safi, table, NULL, type,
- output_arg, use_json);
+ output_arg, show_flags);
}
if (safi == SAFI_FLOWSPEC && type == bgp_show_type_detail) {
@@ -12651,7 +12684,8 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
|A.B.C.D/M longer-prefixes\
|X:X::X:X/M longer-prefixes\
|optimal-route-reflection [WORD$orr_group_name]\
- ] [json$uj [detail$detail] | wide$wide]",
+ |detail-routes$detail_routes\
+ ] [json$uj [detail$detail_json] | wide$wide]",
SHOW_STR IP_STR BGP_STR BGP_INSTANCE_HELP_STR BGP_AFI_HELP_STR
BGP_SAFI_WITH_LABEL_HELP_STR
"Display the entries for all address families\n"
@@ -12701,6 +12735,7 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
"Display route and more specific routes\n"
"Display Optimal Route Reflection RR Clients\n"
"ORR Group name\n"
+ "Display detailed version of all routes\n"
JSON_STR
"Display detailed version of JSON output\n"
"Increase table width for longer prefixes\n")
@@ -12724,8 +12759,11 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
SET_FLAG(show_flags, BGP_SHOW_OPT_JSON);
}
- if (detail)
- SET_FLAG(show_flags, BGP_SHOW_OPT_DETAIL);
+ if (detail_json)
+ SET_FLAG(show_flags, BGP_SHOW_OPT_JSON_DETAIL);
+
+ if (detail_routes)
+ SET_FLAG(show_flags, BGP_SHOW_OPT_ROUTES_DETAIL);
/* [<ipv4|ipv6> [all]] */
if (all) {
@@ -14702,7 +14740,7 @@ DEFUN (show_ip_bgp_flowspec_routes_detailed,
struct bgp *bgp = NULL;
int idx = 0;
bool uj = use_json(argc, argv);
- uint16_t show_flags = BGP_SHOW_OPT_DETAIL;
+ uint16_t show_flags = BGP_SHOW_OPT_ROUTES_DETAIL;
if (uj) {
argc--;
diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h
index 3fa58c0dfb..e16e077029 100644
--- a/bgpd/bgp_route.h
+++ b/bgpd/bgp_route.h
@@ -663,8 +663,9 @@ DECLARE_HOOK(bgp_process,
#define BGP_SHOW_OPT_AFI_IP6 (1 << 4)
#define BGP_SHOW_OPT_ESTABLISHED (1 << 5)
#define BGP_SHOW_OPT_FAILED (1 << 6)
-#define BGP_SHOW_OPT_DETAIL (1 << 7)
+#define BGP_SHOW_OPT_JSON_DETAIL (1 << 7)
#define BGP_SHOW_OPT_TERSE (1 << 8)
+#define BGP_SHOW_OPT_ROUTES_DETAIL (1 << 9)
/* Prototypes. */
extern void bgp_rib_remove(struct bgp_dest *dest, struct bgp_path_info *pi,
@@ -865,7 +866,7 @@ extern void route_vty_out_detail(struct vty *vty, struct bgp *bgp,
extern int bgp_show_table_rd(struct vty *vty, struct bgp *bgp, safi_t safi,
struct bgp_table *table, struct prefix_rd *prd,
enum bgp_show_type type, void *output_arg,
- bool use_json);
+ uint16_t show_flags);
extern void bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi);
extern bool bgp_update_martian_nexthop(struct bgp *bgp, afi_t afi, safi_t safi,
uint8_t type, uint8_t stype,
@@ -882,4 +883,12 @@ bgp_path_selection_reason2str(enum bgp_path_selection_reason reason);
extern bool bgp_addpath_encode_rx(struct peer *peer, afi_t afi, safi_t safi);
extern const struct prefix_rd *bgp_rd_from_dest(const struct bgp_dest *dest,
safi_t safi);
+extern void bgp_path_info_free_with_caller(const char *caller,
+ struct bgp_path_info *path);
+extern void bgp_path_info_add_with_caller(const char *caller,
+ struct bgp_dest *dest,
+ struct bgp_path_info *pi);
+#define bgp_path_info_add(A, B) \
+ bgp_path_info_add_with_caller(__func__, (A), (B))
+#define bgp_path_info_free(B) bgp_path_info_free_with_caller(__func__, (B))
#endif /* _QUAGGA_BGP_ROUTE_H */
diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c
index 1ce2eb4352..f779b34371 100644
--- a/bgpd/bgp_routemap.c
+++ b/bgpd/bgp_routemap.c
@@ -3531,11 +3531,11 @@ route_set_ipv6_nexthop_prefer_global(void *rule, const struct prefix *prefix,
if (CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_IN)
|| CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_IMPORT)) {
/* Set next hop preference to global */
- path->attr->mp_nexthop_prefer_global = true;
+ SET_FLAG(path->attr->nh_flag, BGP_ATTR_NH_MP_PREFER_GLOBAL);
SET_FLAG(path->attr->rmap_change_flags,
BATTR_RMAP_IPV6_PREFER_GLOBAL_CHANGED);
} else {
- path->attr->mp_nexthop_prefer_global = false;
+ UNSET_FLAG(path->attr->nh_flag, BGP_ATTR_NH_MP_PREFER_GLOBAL);
SET_FLAG(path->attr->rmap_change_flags,
BATTR_RMAP_IPV6_PREFER_GLOBAL_CHANGED);
}
diff --git a/bgpd/bgp_snmp_bgp4v2.c b/bgpd/bgp_snmp_bgp4v2.c
index d8d8549960..fe0c33251e 100644
--- a/bgpd/bgp_snmp_bgp4v2.c
+++ b/bgpd/bgp_snmp_bgp4v2.c
@@ -704,7 +704,8 @@ static uint8_t *bgp4v2PathAttrTable(struct variable *v, oid name[],
case BGP_ATTR_NHLEN_IPV6_GLOBAL:
return SNMP_INTEGER(2);
case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL:
- if (path->attr->mp_nexthop_prefer_global)
+ if (CHECK_FLAG(path->attr->nh_flag,
+ BGP_ATTR_NH_MP_PREFER_GLOBAL))
return SNMP_INTEGER(2);
else
return SNMP_INTEGER(4);
@@ -718,7 +719,8 @@ static uint8_t *bgp4v2PathAttrTable(struct variable *v, oid name[],
case BGP_ATTR_NHLEN_IPV6_GLOBAL:
return SNMP_IP6ADDRESS(path->attr->mp_nexthop_global);
case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL:
- if (path->attr->mp_nexthop_prefer_global)
+ if (CHECK_FLAG(path->attr->nh_flag,
+ BGP_ATTR_NH_MP_PREFER_GLOBAL))
return SNMP_IP6ADDRESS(
path->attr->mp_nexthop_global);
else
diff --git a/bgpd/bgp_trace.h b/bgpd/bgp_trace.h
index 14149b5139..7cc8f24e06 100644
--- a/bgpd/bgp_trace.h
+++ b/bgpd/bgp_trace.h
@@ -247,6 +247,71 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_LOGLEVEL(frr_bgp, bgp_dest_unlock, TRACE_INFO)
+/*
+ * peer_lock/peer_unlock
+ */
+TRACEPOINT_EVENT(
+ frr_bgp,
+ bgp_peer_lock,
+ TP_ARGS(struct peer *, peer,
+ const char *, name),
+ TP_FIELDS(
+ ctf_string(caller, name)
+ ctf_string(peer, PEER_HOSTNAME(peer))
+ ctf_integer(unsigned int, count, peer->lock)
+ )
+)
+TRACEPOINT_LOGLEVEL(frr_bgp, bgp_peer_lock, TRACE_INFO)
+
+TRACEPOINT_EVENT(
+ frr_bgp,
+ bgp_peer_unlock,
+ TP_ARGS(struct peer *, peer,
+ const char *, name),
+ TP_FIELDS(
+ ctf_string(caller, name)
+ ctf_string(peer, PEER_HOSTNAME(peer))
+ ctf_integer(unsigned int, count, peer->lock)
+ )
+)
+TRACEPOINT_LOGLEVEL(frr_bgp, bgp_peer_unlock, TRACE_INFO)
+
+/*
+ * bgp_path_info_add/bgp_path_info_free
+ */
+TRACEPOINT_EVENT(
+ frr_bgp,
+ bgp_path_info_add,
+ TP_ARGS(struct bgp_dest *, dest,
+ struct bgp_path_info *, bpi,
+ const char *, name),
+ TP_FIELDS(
+ ctf_string(caller, name)
+ ctf_string(prefix, bgp_dest_get_prefix_str(dest))
+ ctf_string(peer, PEER_HOSTNAME(bpi->peer))
+ ctf_integer(unsigned int, dest_lock,
+ bgp_dest_get_lock_count(dest))
+ ctf_integer(unsigned int, peer_lock, bpi->peer->lock)
+ )
+)
+TRACEPOINT_LOGLEVEL(frr_bgp, bgp_path_info_add, TRACE_INFO)
+
+TRACEPOINT_EVENT(
+ frr_bgp,
+ bgp_path_info_free,
+ TP_ARGS(struct bgp_path_info *, bpi,
+ const char *, name),
+ TP_FIELDS(
+ ctf_string(caller, name)
+ ctf_string(prefix, bgp_dest_get_prefix_str(bpi->net))
+ ctf_string(peer, PEER_HOSTNAME(bpi->peer))
+ ctf_integer(unsigned int, dest_lock,
+ bgp_dest_get_lock_count(bpi->net))
+ ctf_integer(unsigned int, peer_lock, bpi->peer->lock)
+ )
+)
+TRACEPOINT_LOGLEVEL(frr_bgp, bgp_path_info_free, TRACE_INFO)
+
TRACEPOINT_EVENT(
frr_bgp,
evpn_mac_ip_zsend,
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index 91639982be..f6e7b444c6 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -234,6 +234,7 @@ static int bgp_ifp_up(struct interface *ifp)
struct connected *c;
struct nbr_connected *nc;
struct listnode *node, *nnode;
+ struct bgp *bgp_default = bgp_get_default();
struct bgp *bgp;
bgp = ifp->vrf->info;
@@ -256,6 +257,14 @@ static int bgp_ifp_up(struct interface *ifp)
hook_call(bgp_vrf_status_changed, bgp, ifp);
bgp_nht_ifp_up(ifp);
+ if (bgp_default && if_is_loopback(ifp)) {
+ vpn_leak_zebra_vrf_label_update(bgp, AFI_IP);
+ vpn_leak_zebra_vrf_label_update(bgp, AFI_IP6);
+ vpn_leak_zebra_vrf_sid_update(bgp, AFI_IP);
+ vpn_leak_zebra_vrf_sid_update(bgp, AFI_IP6);
+ vpn_leak_postchange_all();
+ }
+
return 0;
}
@@ -264,6 +273,7 @@ static int bgp_ifp_down(struct interface *ifp)
struct connected *c;
struct nbr_connected *nc;
struct listnode *node, *nnode;
+ struct bgp *bgp_default = bgp_get_default();
struct bgp *bgp;
struct peer *peer;
@@ -303,6 +313,14 @@ static int bgp_ifp_down(struct interface *ifp)
hook_call(bgp_vrf_status_changed, bgp, ifp);
bgp_nht_ifp_down(ifp);
+ if (bgp_default && if_is_loopback(ifp)) {
+ vpn_leak_zebra_vrf_label_update(bgp, AFI_IP);
+ vpn_leak_zebra_vrf_label_update(bgp, AFI_IP6);
+ vpn_leak_zebra_vrf_sid_update(bgp, AFI_IP);
+ vpn_leak_zebra_vrf_sid_update(bgp, AFI_IP6);
+ vpn_leak_postchange_all();
+ }
+
return 0;
}
@@ -390,10 +408,16 @@ static int bgp_interface_address_add(ZAPI_CALLBACK_ARGS)
static int bgp_interface_address_delete(ZAPI_CALLBACK_ARGS)
{
struct listnode *node, *nnode;
+ struct bgp_path_info *pi;
+ struct bgp_table *table;
+ struct bgp_dest *dest;
struct connected *ifc;
struct peer *peer;
- struct bgp *bgp;
+ struct bgp *bgp, *from_bgp, *bgp_default;
+ struct listnode *next;
struct prefix *addr;
+ afi_t afi;
+ safi_t safi;
bgp = bgp_lookup_by_vrf_id(vrf_id);
@@ -421,9 +445,6 @@ static int bgp_interface_address_delete(ZAPI_CALLBACK_ARGS)
* we do not want the peering to bounce.
*/
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
- afi_t afi;
- safi_t safi;
-
if (addr->family == AF_INET)
continue;
@@ -439,6 +460,44 @@ static int bgp_interface_address_delete(ZAPI_CALLBACK_ARGS)
}
}
+ bgp_default = bgp_get_default();
+ afi = family2afi(addr->family);
+ safi = SAFI_UNICAST;
+
+ /* When the last IPv4 address was deleted, Linux removes all routes
+ * using the interface so that bgpd needs to re-send them.
+ */
+ if (bgp_default && afi == AFI_IP) {
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, next, from_bgp)) {
+ table = from_bgp->rib[afi][safi];
+ if (!table)
+ continue;
+
+ for (dest = bgp_table_top(table); dest;
+ dest = bgp_route_next(dest)) {
+ for (pi = bgp_dest_get_bgp_path_info(dest); pi;
+ pi = pi->next) {
+ if (pi->type == ZEBRA_ROUTE_BGP &&
+ pi->attr &&
+ pi->attr->nh_ifindex ==
+ ifc->ifp->ifindex) {
+ SET_FLAG(pi->attr->nh_flag,
+ BGP_ATTR_NH_REFRESH);
+ }
+ }
+ }
+
+ if (from_bgp->inst_type != BGP_INSTANCE_TYPE_VRF)
+ continue;
+
+ vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi,
+ bgp_default, from_bgp);
+
+ vpn_leak_postchange(BGP_VPN_POLICY_DIR_FROMVPN, afi,
+ bgp_default, from_bgp);
+ }
+ }
+
connected_free(&ifc);
return 0;
@@ -1007,7 +1066,8 @@ bgp_path_info_to_ipv6_nexthop(struct bgp_path_info *path, ifindex_t *ifindex)
|| path->attr->mp_nexthop_len
== BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL) {
/* Check if route-map is set to prefer global over link-local */
- if (path->attr->mp_nexthop_prefer_global) {
+ if (CHECK_FLAG(path->attr->nh_flag,
+ BGP_ATTR_NH_MP_PREFER_GLOBAL)) {
nexthop = &path->attr->mp_nexthop_global;
if (IN6_IS_ADDR_LINKLOCAL(nexthop))
*ifindex = path->attr->nh_ifindex;
@@ -1307,6 +1367,7 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
uint8_t distance;
struct peer *peer;
struct bgp_path_info *mpinfo;
+ struct bgp_path_info *bpi_ultimate;
struct bgp *bgp_orig;
uint32_t metric;
struct attr local_attr;
@@ -1355,13 +1416,9 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
peer = info->peer;
- if (info->type == ZEBRA_ROUTE_BGP
- && info->sub_type == BGP_ROUTE_IMPORTED) {
-
- /* Obtain peer from parent */
- if (info->extra && info->extra->parent)
- peer = ((struct bgp_path_info *)(info->extra->parent))
- ->peer;
+ if (info->type == ZEBRA_ROUTE_BGP) {
+ bpi_ultimate = bgp_get_imported_bpi_ultimate(info);
+ peer = bpi_ultimate->peer;
}
tag = info->attr->tag;
@@ -1532,7 +1589,9 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
api_nh->weight = nh_weight;
- if (mpinfo->extra && !sid_zero(&mpinfo->extra->sid[0].sid) &&
+ if (mpinfo->extra &&
+ bgp_is_valid_label(&mpinfo->extra->label[0]) &&
+ !sid_zero(&mpinfo->extra->sid[0].sid) &&
!CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_EVPN)) {
sid_info = &mpinfo->extra->sid[0];
@@ -1540,12 +1599,16 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
sizeof(api_nh->seg6_segs));
if (sid_info->transposition_len != 0) {
- if (!bgp_is_valid_label(
- &mpinfo->extra->label[0]))
- continue;
-
mpls_lse_decode(mpinfo->extra->label[0], &label,
&ttl, &exp, &bos);
+
+ if (label < MPLS_LABEL_UNRESERVED_MIN) {
+ if (bgp_debug_zebra(&api.prefix))
+ zlog_debug(
+ "skip invalid SRv6 routes: transposition scheme is used, but label is too small");
+ continue;
+ }
+
transpose_sid(&api_nh->seg6_segs, label,
sid_info->transposition_offset,
sid_info->transposition_len);
@@ -3187,6 +3250,7 @@ extern struct zebra_privs_t bgpd_privs;
static int bgp_ifp_create(struct interface *ifp)
{
+ struct bgp *bgp_default = bgp_get_default();
struct bgp *bgp;
if (BGP_DEBUG(zebra, ZEBRA))
@@ -3201,6 +3265,17 @@ static int bgp_ifp_create(struct interface *ifp)
bgp_update_interface_nbrs(bgp, ifp, ifp);
hook_call(bgp_vrf_status_changed, bgp, ifp);
+
+ if (bgp_default &&
+ (if_is_loopback_exact(ifp) ||
+ (if_is_vrf(ifp) && ifp->vrf->vrf_id != VRF_DEFAULT))) {
+ vpn_leak_zebra_vrf_label_update(bgp, AFI_IP);
+ vpn_leak_zebra_vrf_label_update(bgp, AFI_IP6);
+ vpn_leak_zebra_vrf_sid_update(bgp, AFI_IP);
+ vpn_leak_zebra_vrf_sid_update(bgp, AFI_IP6);
+ vpn_leak_postchange_all();
+ }
+
return 0;
}
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 734e44f252..9b4aa38d7a 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -93,6 +93,7 @@
#include "bgpd/bgp_evpn_mh.h"
#include "bgpd/bgp_mac.h"
#include "bgpd/bgp_orr.h"
+#include "bgp_trace.h"
DEFINE_MTYPE_STATIC(BGPD, PEER_TX_SHUTDOWN_MSG, "Peer shutdown message (TX)");
DEFINE_MTYPE_STATIC(BGPD, BGP_EVPN_INFO, "BGP EVPN instance information");
@@ -1194,6 +1195,7 @@ static void peer_free(struct peer *peer)
/* increase reference count on a struct peer */
struct peer *peer_lock_with_caller(const char *name, struct peer *peer)
{
+ frrtrace(2, frr_bgp, bgp_peer_lock, peer, name);
assert(peer && (peer->lock >= 0));
peer->lock++;
@@ -1206,6 +1208,7 @@ struct peer *peer_lock_with_caller(const char *name, struct peer *peer)
*/
struct peer *peer_unlock_with_caller(const char *name, struct peer *peer)
{
+ frrtrace(2, frr_bgp, bgp_peer_unlock, peer, name);
assert(peer && (peer->lock > 0));
peer->lock--;
diff --git a/bgpd/rfapi/bgp_rfapi_cfg.c b/bgpd/rfapi/bgp_rfapi_cfg.c
index eae9859ba1..b65d90e1b3 100644
--- a/bgpd/rfapi/bgp_rfapi_cfg.c
+++ b/bgpd/rfapi/bgp_rfapi_cfg.c
@@ -3847,6 +3847,13 @@ struct rfapi_cfg *bgp_rfapi_cfg_new(struct rfapi_rfp_cfg *cfg)
return h;
}
+static void bgp_rfapi_rfgn_list_delete(void *data)
+{
+ struct rfapi_rfg_name *rfgn = data;
+ free(rfgn->name);
+ rfgn_free(rfgn);
+}
+
void bgp_rfapi_cfg_destroy(struct bgp *bgp, struct rfapi_cfg *h)
{
afi_t afi;
@@ -3858,8 +3865,13 @@ void bgp_rfapi_cfg_destroy(struct bgp *bgp, struct rfapi_cfg *h)
if (h->l2_groups != NULL)
list_delete(&h->l2_groups);
list_delete(&h->nve_groups_sequential);
+
+ h->rfg_export_direct_bgp_l->del = bgp_rfapi_rfgn_list_delete;
list_delete(&h->rfg_export_direct_bgp_l);
+
+ h->rfg_export_zebra_l->del = bgp_rfapi_rfgn_list_delete;
list_delete(&h->rfg_export_zebra_l);
+
if (h->default_rt_export_list)
ecommunity_free(&h->default_rt_export_list);
if (h->default_rt_import_list)
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index 0b8e967264..f751eb3a75 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -4325,6 +4325,17 @@ structure is extended with :clicmd:`show bgp [afi] [safi]`.
If ``json`` option is specified, output is displayed in JSON format.
+.. clicmd:: show [ip] bgp [afi] [safi] [all] detail-routes
+
+ Display the detailed version of all routes. The same format as using
+ ``show [ip] bgp [afi] [safi] PREFIX``, but for the whole BGP table.
+
+ If ``all`` option is specified, ``ip`` keyword is ignored and,
+ routes displayed for all AFIs and SAFIs.
+
+ If ``afi`` is specified, with ``all`` option, routes will be displayed for
+ each SAFI in the selected AFI.
+
.. _bgp-display-routes-by-community:
Displaying Routes by Community Attribute
diff --git a/doc/user/evpn.rst b/doc/user/evpn.rst
index 0737ab6f07..c8052803cc 100644
--- a/doc/user/evpn.rst
+++ b/doc/user/evpn.rst
@@ -441,7 +441,7 @@ Here a traditional ``vxlan`` interface is created with the name "vni100" which
uses a VTEP-IP of 100.64.0.1, carries VNI 100, and has Dynamic VTEP learning
disabled. IPv6 address autoconfiguration is disabled for "vni100", then the
interface is enslaved to "br100", ARP/ND suppression is enabled, and Dynamic
-VTEP Learning is disabled.
+MAC Learning is disabled.
.. code-block:: shell
diff --git a/doc/user/snmp.rst b/doc/user/snmp.rst
index b9058cc0d3..0bf3565b2e 100644
--- a/doc/user/snmp.rst
+++ b/doc/user/snmp.rst
@@ -115,6 +115,65 @@ Then, you can use the following command to check everything works as expected:
OSPF-MIB::ospfRouterId.0 = IpAddress: 192.168.42.109
[...]
+An example below is how to query SNMP for BGP:
+
+ .. code-block:: shell
+
+ $ # BGP4-MIB (https://www.circitor.fr/Mibs/Mib/B/BGP4-MIB.mib)
+ $ snmpwalk -c public -v2c -On -Ln localhost .1.3.6.1.2.1.15
+
+ $ # BGP4V2-MIB (http://www.circitor.fr/Mibs/Mib/B/BGP4V2-MIB.mib)
+ $ # Information about the peers (bgp4V2PeerTable):
+ $ snmpwalk -c public -v2c -On -Ln localhost .1.3.6.1.3.5.1.1.2
+ ...
+ .1.3.6.1.3.5.1.1.2.1.1.1.4.192.168.10.124 = Gauge32: 0
+ .1.3.6.1.3.5.1.1.2.1.1.2.16.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.2 = Gauge32: 0
+ .1.3.6.1.3.5.1.1.2.1.2.1.4.192.168.10.124 = INTEGER: 1
+ .1.3.6.1.3.5.1.1.2.1.2.2.16.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.2 = INTEGER: 2
+ .1.3.6.1.3.5.1.1.2.1.3.1.4.192.168.10.124 = Hex-STRING: C0 A8 0A 11
+ .1.3.6.1.3.5.1.1.2.1.3.2.16.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.2 = Hex-STRING: 2A 02 47 80 0A BC 00 00 00 00 00 00 00 00 00 01
+ .1.3.6.1.3.5.1.1.2.1.4.1.4.192.168.10.124 = INTEGER: 1
+ .1.3.6.1.3.5.1.1.2.1.4.2.16.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.2 = INTEGER: 2
+ .1.3.6.1.3.5.1.1.2.1.5.1.4.192.168.10.124 = Hex-STRING: C0 A8 0A 7C
+ .1.3.6.1.3.5.1.1.2.1.5.2.16.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.2 = Hex-STRING: 2A 02 47 80 0A BC 00 00 00 00 00 00 00 00 00 02
+ .1.3.6.1.3.5.1.1.2.1.6.1.4.192.168.10.124 = Gauge32: 179
+ .1.3.6.1.3.5.1.1.2.1.6.2.16.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.2 = Gauge32: 179
+ .1.3.6.1.3.5.1.1.2.1.7.1.4.192.168.10.124 = Gauge32: 65002
+ .1.3.6.1.3.5.1.1.2.1.7.2.16.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.2 = Gauge32: 65002
+ .1.3.6.1.3.5.1.1.2.1.8.1.4.192.168.10.124 = Hex-STRING: C0 A8 0A 11
+ .1.3.6.1.3.5.1.1.2.1.8.2.16.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.2 = Hex-STRING: C0 A8 0A 11
+ .1.3.6.1.3.5.1.1.2.1.9.1.4.192.168.10.124 = Gauge32: 41894
+ .1.3.6.1.3.5.1.1.2.1.9.2.16.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.2 = Gauge32: 39960
+ .1.3.6.1.3.5.1.1.2.1.10.1.4.192.168.10.124 = Gauge32: 65001
+ .1.3.6.1.3.5.1.1.2.1.10.2.16.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.2 = Gauge32: 65001
+ .1.3.6.1.3.5.1.1.2.1.11.1.4.192.168.10.124 = Hex-STRING: C8 C8 C8 CA
+ .1.3.6.1.3.5.1.1.2.1.11.2.16.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.2 = Hex-STRING: C8 C8 C8 CA
+ .1.3.6.1.3.5.1.1.2.1.12.1.4.192.168.10.124 = INTEGER: 2
+ .1.3.6.1.3.5.1.1.2.1.12.2.16.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.2 = INTEGER: 2
+ .1.3.6.1.3.5.1.1.2.1.13.1.4.192.168.10.124 = INTEGER: 6
+ .1.3.6.1.3.5.1.1.2.1.13.2.16.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.2 = INTEGER: 6
+
+ $ # Information about the BGP table (bgp4V2NlriTable):
+ $ snmpwalk -c public -v2c -On -Ln localhost .1.3.6.1.3.5.1.1.9
+ ...
+ .1.3.6.1.3.5.1.1.9.1.22.1.4.10.0.2.0.24.192.168.10.124 = Gauge32: 1
+ .1.3.6.1.3.5.1.1.9.1.22.1.4.10.10.100.0.24.192.168.10.124 = Gauge32: 1
+ .1.3.6.1.3.5.1.1.9.1.22.1.4.172.16.31.1.32.192.168.10.124 = Gauge32: 1
+ .1.3.6.1.3.5.1.1.9.1.22.1.4.172.16.31.2.32.192.168.10.124 = Gauge32: 1
+ .1.3.6.1.3.5.1.1.9.1.22.1.4.172.16.31.3.32.192.168.10.124 = Gauge32: 1
+ .1.3.6.1.3.5.1.1.9.1.22.1.4.192.168.0.0.24.192.168.10.124 = Gauge32: 1
+ .1.3.6.1.3.5.1.1.9.1.22.1.4.192.168.1.0.24.192.168.10.124 = Gauge32: 1
+ .1.3.6.1.3.5.1.1.9.1.22.1.4.192.168.10.0.24.192.168.10.124 = Gauge32: 1
+ .1.3.6.1.3.5.1.1.9.1.22.2.16.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.0.64.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.2 = Gauge32: 1
+ .1.3.6.1.3.5.1.1.9.1.24.1.4.10.0.2.0.24.192.168.10.124 = Hex-STRING: 02 01 FD E9
+ .1.3.6.1.3.5.1.1.9.1.24.1.4.10.10.100.0.24.192.168.10.124 = Hex-STRING: 02 01 FD E9
+ .1.3.6.1.3.5.1.1.9.1.24.1.4.172.16.31.1.32.192.168.10.124 = Hex-STRING: 02 01 FD E9
+ .1.3.6.1.3.5.1.1.9.1.24.1.4.172.16.31.2.32.192.168.10.124 = Hex-STRING: 02 01 FD E9
+ .1.3.6.1.3.5.1.1.9.1.24.1.4.172.16.31.3.32.192.168.10.124 = Hex-STRING: 02 01 FD E9
+ .1.3.6.1.3.5.1.1.9.1.24.1.4.192.168.0.0.24.192.168.10.124 = Hex-STRING: 02 01 FD E9
+ .1.3.6.1.3.5.1.1.9.1.24.1.4.192.168.1.0.24.192.168.10.124 = Hex-STRING: 02 01 FD E9
+ .1.3.6.1.3.5.1.1.9.1.24.1.4.192.168.10.0.24.192.168.10.124 = Hex-STRING: 02 01 FD E9
+ .1.3.6.1.3.5.1.1.9.1.24.2.16.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.0.64.42.2.71.128.10.188.0.0.0.0.0.0.0.0.0.2 = Hex-STRING: 02 01 FD E9
The AgentX protocol can be transported over a Unix socket or using TCP or UDP.
It usually defaults to a Unix socket and depends on how NetSNMP was built. If
@@ -132,5 +191,7 @@ Here is the syntax for using AgentX:
.. clicmd:: agentx
+ Once enabled, it can't be unconfigured. Only removing from the daemons file
+ the keyword ``agentx`` takes an effect.
.. include:: snmptrap.rst
diff --git a/lib/if.c b/lib/if.c
index 70c0c18141..6766a04b37 100644
--- a/lib/if.c
+++ b/lib/if.c
@@ -564,9 +564,24 @@ size_t if_lookup_by_hwaddr(const uint8_t *hw_addr, size_t addrsz,
return count;
}
+/* Get the VRF loopback interface, i.e. the loopback on the default VRF
+ * or the VRF interface.
+ */
+struct interface *if_get_vrf_loopback(vrf_id_t vrf_id)
+{
+ struct interface *ifp = NULL;
+ struct vrf *vrf = vrf_lookup_by_id(vrf_id);
+
+ FOR_ALL_INTERFACES (vrf, ifp)
+ if (if_is_loopback(ifp))
+ return ifp;
+
+ return NULL;
+}
/* Get interface by name if given name interface doesn't exist create
- one. */
+ * one.
+ */
struct interface *if_get_by_name(const char *name, vrf_id_t vrf_id,
const char *vrf_name)
{
diff --git a/lib/if.h b/lib/if.h
index 91dcd46247..a653246ccb 100644
--- a/lib/if.h
+++ b/lib/if.h
@@ -532,6 +532,7 @@ static inline bool if_address_is_local(const void *matchaddr, int family,
struct vrf;
extern struct interface *if_lookup_by_name_vrf(const char *name, struct vrf *vrf);
extern struct interface *if_lookup_by_name(const char *ifname, vrf_id_t vrf_id);
+extern struct interface *if_get_vrf_loopback(vrf_id_t vrf_id);
extern struct interface *if_get_by_name(const char *ifname, vrf_id_t vrf_id,
const char *vrf_name);
diff --git a/ospfd/ospf_packet.c b/ospfd/ospf_packet.c
index 8c87a568c0..0cb9d02725 100644
--- a/ospfd/ospf_packet.c
+++ b/ospfd/ospf_packet.c
@@ -1169,8 +1169,8 @@ static void ospf_db_desc_proc(struct stream *s, struct ospf_interface *oi,
if (IS_OPAQUE_LSA(lsah->type)
&& !CHECK_FLAG(nbr->options, OSPF_OPTION_O)) {
flog_warn(EC_OSPF_PACKET,
- "LSA[Type%d:%pI4]: Opaque capability mismatch?",
- lsah->type, &lsah->id);
+ "LSA[Type%d:%pI4] from %pI4: Opaque capability mismatch?",
+ lsah->type, &lsah->id, &lsah->adv_router);
OSPF_NSM_EVENT_SCHEDULE(nbr, NSM_SeqNumberMismatch);
return;
}
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce1/zebra.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce1/zebra.conf
index 46831bb711..375bbea9ff 100644
--- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce1/zebra.conf
+++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce1/zebra.conf
@@ -4,6 +4,8 @@ hostname ce1
!
interface lo
ip address 99.0.0.1/32
+ ip address 5.1.0.1/24
+ ip address 6.0.2.1/24
!
interface ce1-eth0
description to r1
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce2/zebra.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce2/zebra.conf
index fb4d8cc9c4..90dd3c55b4 100644
--- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce2/zebra.conf
+++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce2/zebra.conf
@@ -4,6 +4,8 @@ hostname ce2
!
interface lo
ip address 99.0.0.2/32
+ ip address 5.1.0.1/24
+ ip address 6.0.2.1/24
!
interface ce2-eth0
description to r3
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/bgpd.conf
index e316de5690..cf7396eb12 100644
--- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/bgpd.conf
+++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/bgpd.conf
@@ -19,6 +19,7 @@ router bgp 5227
network 5.1.3.0/24 route-map rm-nh
network 6.0.1.0/24 route-map rm-nh
network 6.0.2.0/24 route-map rm-nh-same
+ network 6.0.3.0/24 route-map rm-nh-same
neighbor 192.168.1.1 activate
exit-address-family
!
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/zebra.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/zebra.conf
index 77a1163a4b..df6ac47b08 100644
--- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/zebra.conf
+++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/zebra.conf
@@ -4,6 +4,7 @@ hostname ce3
!
interface lo
ip address 99.0.0.3/32
+ ip address 6.0.3.1/24
!
interface ce3-eth0
description to r4
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/bgpd.conf
index 60d9e93108..9a6ca08a0b 100644
--- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/bgpd.conf
+++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/bgpd.conf
@@ -19,6 +19,7 @@ router bgp 5228 vrf ce4-cust2
network 5.4.3.0/24 route-map rm-nh
network 6.0.1.0/24 route-map rm-nh
network 6.0.2.0/24 route-map rm-nh-same
+ network 6.0.3.0/24 route-map rm-nh-same
neighbor 192.168.2.1 activate
exit-address-family
!
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/zebra.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/zebra.conf
index e55c9e779a..0e3a736292 100644
--- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/zebra.conf
+++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/zebra.conf
@@ -4,6 +4,7 @@ hostname ce4
!
interface ce4-cust2
ip address 99.0.0.4/32
+ ip address 6.0.3.1/24
!
interface ce4-eth0
description to r4
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py
index 5161d8471f..b2bf5f5f63 100644
--- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py
+++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py
@@ -175,6 +175,20 @@ def ltemplatePreRouterStartHook():
"setup {0} vrf {0}-cust1, {0}-eth4. enabled mpls input.".format(rtr)
)
# configure cust2 VRFs & MPLS
+ rtrs = ["r1"]
+ cmds = [
+ "ip link add {0}-cust3 type vrf table 20",
+ "ip link set dev {0}-cust3 up",
+ "ip link add {0}-cust4 type vrf table 30",
+ "ip link set dev {0}-cust4 up",
+ "ip link add {0}-cust5 type vrf table 40",
+ "ip link set dev {0}-cust5 up",
+ ]
+ for rtr in rtrs:
+ for cmd in cmds:
+ cc.doCmd(tgen, rtr, cmd.format(rtr))
+ logger.info("setup {0} vrf {0}-cust3 and{0}-cust4.".format(rtr))
+ # configure cust2 VRFs & MPLS
rtrs = ["r4"]
cmds = [
"ip link add {0}-cust2 type vrf table 20",
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/bgpd.conf
index 8d42cfc0d8..24e9f95372 100644
--- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/bgpd.conf
+++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/bgpd.conf
@@ -11,6 +11,7 @@ log file bgpd.log debugging
#debug bgp vpn leak-from-vrf
#debug bgp vpn label
#debug bgp updates out
+#debug bgp nht
router bgp 5226
bgp router-id 1.1.1.1
@@ -39,6 +40,11 @@ router bgp 5227 vrf r1-cust1
neighbor 192.168.1.2 timers 3 10
address-family ipv4 unicast
+ network 10.2.3.4/32
+ network 192.0.0.0/24
+
+ redistribute connected
+
neighbor 192.168.1.2 activate
neighbor 192.168.1.2 next-hop-self
@@ -51,5 +57,47 @@ router bgp 5227 vrf r1-cust1
exit-address-family
+router bgp 5228 vrf r1-cust3
+ bgp router-id 192.168.1.1
+
+ address-family ipv4 unicast
+ rd vpn export 10:13
+ rt vpn import 52:100
+
+ import vpn
+ export vpn
+ exit-address-family
+
+
+router bgp 5227 vrf r1-cust4
+ no bgp network import-check
+
+ bgp router-id 192.168.1.1
+
+ address-family ipv4 unicast
+ network 28.0.0.0/24
+
+ rd vpn export 10:14
+ rt vpn export 52:100
+
+ import vpn
+ export vpn
+ exit-address-family
+
+
+router bgp 5227 vrf r1-cust5
+ bgp router-id 192.168.1.1
+
+ address-family ipv4 unicast
+ redistribute connected
+
+ label vpn export 105
+ rd vpn export 10:15
+ rt vpn both 52:100
+
+ import vpn
+ export vpn
+ exit-address-family
+
!
end
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/staticd.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/staticd.conf
new file mode 100644
index 0000000000..59430fdf99
--- /dev/null
+++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/staticd.conf
@@ -0,0 +1,6 @@
+hostname r1
+log file staticd.log
+!
+vrf r1-cust1
+ ip route 192.0.0.0/24 192.168.1.2
+exit-vrf
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/zebra.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/zebra.conf
index 221bc7a839..e81bc6b2ab 100644
--- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/zebra.conf
+++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/zebra.conf
@@ -4,6 +4,9 @@ hostname r1
password zebra
#debug zebra packet
+#debug zebra rib detailed
+#debug zebra dplane detailed
+#debug zebra nexthop detail
interface lo
ip address 1.1.1.1/32
@@ -18,6 +21,14 @@ interface r1-eth4
ip address 192.168.1.1/24
no link-detect
+interface r1-cust1
+ ip address 10.4.5.6/24
+ no link-detect
+
+interface r1-cust5
+ ip address 29.0.0.1/32
+ no link-detect
+
ip forwarding
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py
index 91a7adf997..89369241a8 100644
--- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py
+++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py
@@ -81,3 +81,24 @@ if ret != False and found != None:
"wait",
"CE3->CE4 (loopback) ping",
)
+ luCommand(
+ "r1",
+ "ip vrf exec r1-cust1 ping 6.0.3.1 -I 10.4.5.6 -c 1",
+ " 0. packet loss",
+ "wait",
+ "R1(r1-cust1)->CE3/4 (loopback) ping",
+ )
+ luCommand(
+ "r1",
+ "ip vrf exec r1-cust1 ping 6.0.3.1 -I 10.4.5.6 -c 1",
+ " 0. packet loss",
+ "pass",
+ "R1(r1-cust1)->CE3/4 (loopback) ping",
+ )
+ luCommand(
+ "r1",
+ "ip vrf exec r1-cust5 ping 6.0.3.1 -I 29.0.0.1 -c 1",
+ " 0. packet loss",
+ "pass",
+ "R1(r1-cust5)->CE3/4 ( (loopback) ping",
+ )
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py
index 75158b127e..e9647898ab 100644
--- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py
+++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py
@@ -72,3 +72,53 @@ luCommand(
"wait",
"CE4->PE4 ping",
)
+ret = luCommand(
+ "r1",
+ "ip vrf exec r1-cust5 ping 29.0.0.1 -I 29.0.0.1 -c 1",
+ " 0. packet loss",
+ "pass",
+ "Ping its own IP. Check https://bugzilla.kernel.org/show_bug.cgi?id=203483 if it fails",
+)
+luCommand(
+ "r1",
+ "ip vrf exec r1-cust5 ping 192.168.1.1 -I 29.0.0.1 -c 1",
+ " 0. packet loss",
+ "pass",
+ "R1(r1-cust5)->R1(r1-cust1 - r1-eth4) ping",
+)
+luCommand(
+ "r1",
+ "ip vrf exec r1-cust5 ping 192.168.1.2 -I 29.0.0.1 -c 1",
+ " 0. packet loss",
+ "wait",
+ "R1(r1-cust5)->CE1 ping",
+)
+luCommand(
+ "r1",
+ "ip vrf exec r1-cust5 ping 192.168.1.2 -I 29.0.0.1 -c 1",
+ " 0. packet loss",
+ "pass",
+ "R1(r1-cust5)->CE1 ping",
+)
+luCommand(
+ "r1",
+ "ip vrf exec r1-cust5 ping 99.0.0.1 -I 29.0.0.1 -c 1",
+ " 0. packet loss",
+ "pass",
+ "R1(r1-cust5)->CE1 (loopback) ping",
+)
+luCommand(
+ "r1",
+ "ip vrf exec r1-cust5 ping 5.1.0.1 -I 29.0.0.1 -c 1",
+ " 0. packet loss",
+ "wait",
+ "R1(r1-cust5)->CE1 (loopback) ping",
+ time=30,
+)
+luCommand(
+ "r1",
+ "ip vrf exec r1-cust5 ping 5.1.0.1 -I 29.0.0.1 -c 1",
+ " 0. packet loss",
+ "pass",
+ "R1(r1-cust5)->CE1 (loopback) ping",
+)
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py
index 1e2758c1c9..3242e3bd3a 100644
--- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py
+++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py
@@ -54,15 +54,44 @@ bgpribRequireUnicastRoutes("ce4", "ipv4", "ce4-cust2", "Cust 4 routes in ce1", w
#
# r1 vtysh -c "show bgp vrf r1-cust1 ipv4"
#
-want_r1_cust1_routes = [
+want_r1_cust1_3_5_routes = [
{"p": "5.1.0.0/24", "n": "99.0.0.1"},
{"p": "5.1.1.0/24", "n": "99.0.0.1"},
{"p": "6.0.1.0/24", "n": "99.0.0.1"},
{"p": "6.0.2.0/24", "n": "99.0.0.1"},
+ {"p": "10.2.3.4/32", "n": "0.0.0.0", "bp": False},
+ {"p": "10.4.5.0/24", "n": "0.0.0.0", "bp": True},
+ {"p": "28.0.0.0/24", "n": "0.0.0.0", "bp": True},
+ {"p": "29.0.0.1/32", "n": "0.0.0.0", "bp": True},
{"p": "99.0.0.1/32", "n": "192.168.1.2"},
+ {"p": "192.0.0.0/24", "n": "0.0.0.0", "bp": True},
+ {"p": "192.168.1.0/24", "n": "0.0.0.0", "bp": True},
]
bgpribRequireUnicastRoutes(
- "r1", "ipv4", "r1-cust1", "Customer 1 routes in r1 vrf", want_r1_cust1_routes
+ "r1", "ipv4", "r1-cust1", "Customer 1 routes in r1 vrf", want_r1_cust1_3_5_routes
+)
+bgpribRequireUnicastRoutes(
+ "r1", "ipv4", "r1-cust3", "Customer 3 routes in r1 vrf", want_r1_cust1_3_5_routes
+)
+bgpribRequireUnicastRoutes(
+ "r1", "ipv4", "r1-cust5", "Customer 5 routes in r1 vrf", want_r1_cust1_3_5_routes
+)
+
+want_r1_cust4_routes = [
+ {"p": "5.1.0.0/24", "n": "99.0.0.1", "exist": False},
+ {"p": "5.1.1.0/24", "n": "99.0.0.1", "exist": False},
+ {"p": "6.0.1.0/24", "n": "99.0.0.1", "exist": False},
+ {"p": "6.0.2.0/24", "n": "99.0.0.1", "exist": False},
+ {"p": "10.2.3.4/32", "n": "0.0.0.0", "exist": False},
+ {"p": "10.4.5.0/24", "n": "0.0.0.0", "exist": False},
+ {"p": "28.0.0.0/24", "n": "0.0.0.0", "bp": True},
+ {"p": "29.0.0.1/32", "n": "0.0.0.0", "exist": False},
+ {"p": "99.0.0.1/32", "n": "192.168.1.2", "exist": False},
+ {"p": "192.0.0.0/24", "n": "0.0.0.0", "exist": False},
+ {"p": "192.168.1.0/24", "n": "0.0.0.0", "exist": False},
+]
+bgpribRequireUnicastRoutes(
+ "r1", "ipv4", "r1-cust4", "Customer 4 routes in r1 vrf", want_r1_cust4_routes
)
want_r3_cust1_routes = [
@@ -70,10 +99,20 @@ want_r3_cust1_routes = [
{"p": "5.1.1.0/24", "n": "99.0.0.2"},
{"p": "6.0.1.0/24", "n": "99.0.0.2"},
{"p": "6.0.2.0/24", "n": "99.0.0.2"},
+ {"p": "10.2.3.4/32", "n": "0.0.0.0", "exist": False},
+ {"p": "28.0.0.0/24", "n": "1.1.1.1", "bp": True},
+ {"p": "29.0.0.1/32", "n": "1.1.1.1", "bp": True},
{"p": "99.0.0.2/32", "n": "192.168.1.2"},
+ {"p": "192.0.0.0/24", "n": "1.1.1.1", "bp": True},
+ {"p": "192.168.1.0/24", "n": "1.1.1.1", "bp": True},
]
bgpribRequireUnicastRoutes(
- "r3", "ipv4", "r3-cust1", "Customer 1 routes in r3 vrf", want_r3_cust1_routes
+ "r3",
+ "ipv4",
+ "r3-cust1",
+ "Customer 1 routes in r3 vrf",
+ want_r3_cust1_routes,
+ retry=30,
)
want_r4_cust1_routes = [
@@ -81,10 +120,20 @@ want_r4_cust1_routes = [
{"p": "5.1.3.0/24", "n": "99.0.0.3"},
{"p": "6.0.1.0/24", "n": "99.0.0.3"},
{"p": "6.0.2.0/24", "n": "99.0.0.3"},
+ {"p": "10.2.3.4/32", "n": "0.0.0.0", "exist": False},
+ {"p": "28.0.0.0/24", "n": "1.1.1.1", "bp": True},
+ {"p": "29.0.0.1/32", "n": "1.1.1.1", "bp": True},
{"p": "99.0.0.3/32", "n": "192.168.1.2"},
+ {"p": "192.0.0.0/24", "n": "1.1.1.1", "bp": True},
+ {"p": "192.168.1.0/24", "n": "1.1.1.1", "bp": True},
]
bgpribRequireUnicastRoutes(
- "r4", "ipv4", "r4-cust1", "Customer 1 routes in r4 vrf", want_r4_cust1_routes
+ "r4",
+ "ipv4",
+ "r4-cust1",
+ "Customer 1 routes in r4 vrf",
+ want_r4_cust1_routes,
+ retry=30,
)
want_r4_cust2_routes = [
@@ -92,10 +141,20 @@ want_r4_cust2_routes = [
{"p": "5.4.3.0/24", "n": "99.0.0.4"},
{"p": "6.0.1.0/24", "n": "99.0.0.4"},
{"p": "6.0.2.0/24", "n": "99.0.0.4"},
+ {"p": "10.2.3.4/32", "n": "0.0.0.0", "exist": False},
+ {"p": "28.0.0.0/24", "n": "1.1.1.1", "bp": True},
+ {"p": "29.0.0.1/32", "n": "1.1.1.1", "bp": True},
{"p": "99.0.0.4/32", "n": "192.168.2.2"},
+ {"p": "192.0.0.0/24", "n": "1.1.1.1", "bp": True},
+ {"p": "192.168.1.0/24", "n": "1.1.1.1", "bp": True},
]
bgpribRequireUnicastRoutes(
- "r4", "ipv4", "r4-cust2", "Customer 2 routes in r4 vrf", want_r4_cust2_routes
+ "r4",
+ "ipv4",
+ "r4-cust2",
+ "Customer 2 routes in r4 vrf",
+ want_r4_cust2_routes,
+ retry=30,
)
########################################################################
@@ -667,7 +726,7 @@ bgpribRequireUnicastRoutes(
luCommand(
"ce1",
'vtysh -c "show bgp ipv4 uni"',
- "12 routes and 12",
+ "18 routes and 19",
"wait",
"Local and remote routes",
10,
@@ -689,7 +748,7 @@ bgpribRequireUnicastRoutes(
luCommand(
"ce2",
'vtysh -c "show bgp ipv4 uni"',
- "12 routes and 15",
+ "18 routes and 22",
"wait",
"Local and remote routes",
10,
@@ -721,7 +780,7 @@ luCommand("r4", 'vtysh -c "show ip route vrf r4-cust2"')
luCommand(
"ce3",
'vtysh -c "show bgp ipv4 uni"',
- "12 routes and 13",
+ "18 routes and 19",
"wait",
"Local and remote routes",
10,
@@ -743,7 +802,7 @@ bgpribRequireUnicastRoutes(
luCommand(
"ce4",
'vtysh -c "show bgp vrf ce4-cust2 ipv4 uni"',
- "12 routes and 14",
+ "18 routes and 21",
"wait",
"Local and remote routes",
10,
diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/ce1/bgpd.conf b/tests/topotests/bgp_srv6l3vpn_route_leak/ce1/bgpd.conf
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_route_leak/ce1/bgpd.conf
diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/ce1/zebra.conf b/tests/topotests/bgp_srv6l3vpn_route_leak/ce1/zebra.conf
new file mode 100644
index 0000000000..823a56d53f
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_route_leak/ce1/zebra.conf
@@ -0,0 +1,9 @@
+log file zebra.log
+!
+hostname ce1
+!
+interface eth0
+ ip address 172.16.0.1/24
+!
+line vty
+!
diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/bgpd.conf b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/bgpd.conf
new file mode 100644
index 0000000000..15779aa0d5
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/bgpd.conf
@@ -0,0 +1,41 @@
+frr defaults traditional
+!
+hostname pe1
+password zebra
+!
+log stdout notifications
+log monitor notifications
+log commands
+!
+router bgp 65001
+ bgp router-id 192.0.2.1
+ !
+ segment-routing srv6
+ locator default
+ exit
+ !
+!
+router bgp 65001 vrf vrf10
+ bgp router-id 192.0.2.1
+ !
+ address-family ipv4 unicast
+ redistribute connected
+ sid vpn export auto
+ rd vpn export 65001:10
+ rt vpn both 0:10
+ import vpn
+ export vpn
+ exit-address-family
+ !
+!
+router bgp 65001 vrf vrf20
+ bgp router-id 192.0.2.1
+ !
+ address-family ipv4 unicast
+ rd vpn export 65001:20
+ rt vpn both 0:10
+ import vpn
+ export vpn
+ exit-address-family
+ !
+!
diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/default_ipv4_vpn.json b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/default_ipv4_vpn.json
new file mode 100644
index 0000000000..dc86d7c978
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/default_ipv4_vpn.json
@@ -0,0 +1,31 @@
+{
+ "vrfName": "default",
+ "routerId": "192.0.2.1",
+ "localAS": 65001,
+ "routes": {
+ "routeDistinguishers": {
+ "65001:10": {
+ "172.16.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "172.16.0.0",
+ "prefixLen": 24,
+ "network": "172.16.0.0\/24",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf10",
+ "nexthops": [
+ {
+ "hostname": "pe1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf10_ipv4_unicast.json b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf10_ipv4_unicast.json
new file mode 100644
index 0000000000..ce2d5c19c3
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf10_ipv4_unicast.json
@@ -0,0 +1,25 @@
+{
+ "vrfName": "vrf10",
+ "routerId": "192.0.2.1",
+ "localAS": 65001,
+ "routes": {
+ "172.16.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "172.16.0.0",
+ "prefixLen": 24,
+ "network": "172.16.0.0\/24",
+ "origin": "incomplete",
+ "nexthops": [
+ {
+ "hostname": "pe1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+}
diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf20_ipv4.json b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf20_ipv4.json
new file mode 100644
index 0000000000..2ce936b291
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf20_ipv4.json
@@ -0,0 +1,22 @@
+{
+ "172.16.0.0\/24": [
+ {
+ "prefix": "172.16.0.0\/24",
+ "prefixLen": 24,
+ "protocol": "bgp",
+ "vrfName": "vrf20",
+ "selected": true,
+ "destSelected": true,
+ "installed": true,
+ "nexthops": [
+ {
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "vrf10",
+ "vrf": "vrf10",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf20_ipv4_unicast.json b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf20_ipv4_unicast.json
new file mode 100644
index 0000000000..6a88d39a8c
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf20_ipv4_unicast.json
@@ -0,0 +1,27 @@
+{
+ "vrfName": "vrf20",
+ "routerId": "192.0.2.1",
+ "localAS": 65001,
+ "routes": {
+ "172.16.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "172.16.0.0",
+ "prefixLen": 24,
+ "network": "172.16.0.0\/24",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf10",
+ "nexthops": [
+ {
+ "hostname": "pe1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+}
diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/zebra.conf b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/zebra.conf
new file mode 100644
index 0000000000..52341fc4fc
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/zebra.conf
@@ -0,0 +1,27 @@
+log file zebra.log
+!
+hostname pe1
+!
+interface lo
+ ip address 10.0.0.1/32
+!
+interface eth0 vrf vrf10
+ ip address 172.16.0.254/24
+!
+line vty
+!
+segment-routing
+ srv6
+ locators
+ locator default
+ prefix 2001:db8:2::/64 block-len 40 node-len 24 func-bits 16
+ exit
+ !
+ exit
+ !
+ exit
+ !
+exit
+!
+end
+!
diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/test_bgp_srv6l3vpn_route_leak.py b/tests/topotests/bgp_srv6l3vpn_route_leak/test_bgp_srv6l3vpn_route_leak.py
new file mode 100755
index 0000000000..16f8adb3c5
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_route_leak/test_bgp_srv6l3vpn_route_leak.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2022, LINE Corporation
+# Authored by Ryoga Saito <ryoga.saito@linecorp.com>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+import os
+import re
+import sys
+import json
+import functools
+import pytest
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from lib.common_config import required_linux_kernel_version
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ tgen.add_router("pe1")
+ tgen.add_router("ce1")
+
+ tgen.add_link(tgen.gears["pe1"], tgen.gears["ce1"], "eth0", "eth0")
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ for rname, router in tgen.routers().items():
+ router.load_config(TopoRouter.RD_ZEBRA,
+ os.path.join(CWD, '{}/zebra.conf'.format(rname)))
+ router.load_config(TopoRouter.RD_BGP,
+ os.path.join(CWD, '{}/bgpd.conf'.format(rname)))
+
+ tgen.gears["pe1"].run("ip link add vrf10 type vrf table 10")
+ tgen.gears["pe1"].run("ip link set vrf10 up")
+ tgen.gears["pe1"].run("ip link add vrf20 type vrf table 20")
+ tgen.gears["pe1"].run("ip link set vrf20 up")
+ tgen.gears["pe1"].run("ip link set eth0 master vrf10")
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def open_json_file(path):
+ try:
+ with open(path, "r") as f:
+ return json.load(f)
+ except IOError:
+ assert False, "Could not read file {}".format(path)
+
+
+def check(name, command, checker):
+ tgen = get_topogen()
+ router = tgen.gears[name]
+
+ def _check():
+ try:
+ return checker(router.vtysh_cmd(command))
+ except:
+ return False
+
+ logger.info('[+] check {} "{}"'.format(name, command))
+ _, result = topotest.run_and_expect(_check, None, count=10, wait=0.5)
+ assert result is None, "Failed"
+
+
+def check_vrf10_bgp_rib(output):
+ expected = open_json_file("%s/pe1/results/vrf10_ipv4_unicast.json" % CWD)
+ actual = json.loads(output)
+ return topotest.json_cmp(actual, expected)
+
+
+def check_default_bgp_vpn_rib(output):
+ expected = open_json_file("%s/pe1/results/default_ipv4_vpn.json" % CWD)
+ actual = json.loads(output)
+ return topotest.json_cmp(actual, expected)
+
+
+def check_vrf20_bgp_rib(output):
+ expected = open_json_file("%s/pe1/results/vrf20_ipv4_unicast.json" % CWD)
+ actual = json.loads(output)
+ return topotest.json_cmp(actual, expected)
+
+
+def check_vrf20_rib(output):
+ expected = open_json_file("%s/pe1/results/vrf20_ipv4.json" % CWD)
+ actual = json.loads(output)
+ return topotest.json_cmp(actual, expected)
+
+
+def test_rib():
+ check("pe1", "show bgp vrf vrf10 ipv4 unicast json", check_vrf10_bgp_rib)
+ check("pe1", "show bgp ipv4 vpn json", check_default_bgp_vpn_rib)
+ check("pe1", "show bgp vrf vrf20 ipv4 unicast json", check_vrf20_bgp_rib)
+ check("pe1", "show ip route vrf vrf20 json", check_vrf20_rib)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_unique_rid/bgp_unique_rid.json b/tests/topotests/bgp_unique_rid/bgp_unique_rid.json
new file mode 100644
index 0000000000..c42ce29954
--- /dev/null
+++ b/tests/topotests/bgp_unique_rid/bgp_unique_rid.json
@@ -0,0 +1,505 @@
+{
+ "address_types": [
+ "ipv4",
+ "ipv6"
+ ],
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 30,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r5": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r4-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4-link3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4-link4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4-link5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4-link6": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4-link7": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {},
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {},
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ },
+ "ospf": {
+ "router_id": "100.1.1.3",
+ "neighbors": {
+ "r4": {},
+ "r5": {}
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "r4": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link6": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link7": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {},
+ "r4-link1": {},
+ "r4-link2": {},
+ "r4-link3": {},
+ "r4-link4": {},
+ "r4-link5": {},
+ "r4-link6": {},
+ "r4-link7": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {},
+ "r4-link1": {},
+ "r4-link2": {},
+ "r4-link3": {},
+ "r4-link4": {},
+ "r4-link5": {},
+ "r4-link6": {},
+ "r4-link7": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ }
+ }
+ },
+ "ospf": {
+ "router_id": "10.10.10.10",
+ "neighbors": {
+ "r3": {}
+ }
+ }
+ },
+ "r5": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "bgp": {
+ "local_as": "300",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ }
+ }
+ },
+ "ospf": {
+ "router_id": "100.1.1.5",
+ "neighbors": {
+ "r3": {}
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/bgp_unique_rid/bgp_unique_rid_vrf.json b/tests/topotests/bgp_unique_rid/bgp_unique_rid_vrf.json
new file mode 100644
index 0000000000..1e280f1847
--- /dev/null
+++ b/tests/topotests/bgp_unique_rid/bgp_unique_rid_vrf.json
@@ -0,0 +1,529 @@
+{
+ "address_types": [
+ "ipv4",
+ "ipv6"
+ ],
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 30,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback",
+ "vrf": "GREEN"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "GREEN"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "GREEN"
+ }
+ },
+ "bgp": [
+ {
+ "local_as": "100",
+ "vrf": "GREEN",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "vrfs": [
+ {
+ "name": "GREEN",
+ "id": "1"
+ }
+ ]
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback",
+ "vrf": "GREEN"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "GREEN"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "GREEN"
+ }
+ },
+ "bgp": [
+ {
+ "local_as": "100",
+ "vrf": "GREEN",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "vrfs": [
+ {
+ "name": "GREEN",
+ "id": "1"
+ }
+ ]
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback",
+ "vrf": "GREEN"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "GREEN"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "GREEN"
+ },
+ "r4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r5": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "RED"
+ },
+ "r4-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4-link3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4-link4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4-link5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4-link6": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4-link7": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": [
+ {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3": {},
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3": {},
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "100",
+ "vrf": "GREEN",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "100",
+ "vrf": "RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r5": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r5": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ],
+ "vrfs": [
+ {
+ "name": "RED",
+ "id": "1"
+ },
+ {
+ "name": "GREEN",
+ "id": "2"
+ }
+ ]
+ },
+ "r4": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link6": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link7": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": [{
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {},
+ "r4-link1": {},
+ "r4-link2": {},
+ "r4-link3": {},
+ "r4-link4": {},
+ "r4-link5": {},
+ "r4-link6": {},
+ "r4-link7": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {},
+ "r4-link1": {},
+ "r4-link2": {},
+ "r4-link3": {},
+ "r4-link4": {},
+ "r4-link5": {},
+ "r4-link6": {},
+ "r4-link7": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ }
+ }
+ }]
+ },
+ "r5": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback",
+ "vrf": "RED"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "RED"
+ }
+ },
+ "bgp": [
+ {
+ "local_as": "300",
+ "vrf":"RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "vrfs": [
+ {
+ "name": "RED",
+ "id": "1"
+ }
+ ]
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/bgp_unique_rid/test_bgp_unique_rid.py b/tests/topotests/bgp_unique_rid/test_bgp_unique_rid.py
new file mode 100644
index 0000000000..7156310536
--- /dev/null
+++ b/tests/topotests/bgp_unique_rid/test_bgp_unique_rid.py
@@ -0,0 +1,906 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+import sys
+import time
+import pytest
+import inspect
+import os
+from copy import deepcopy
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+"""Following tests are covered to test bgp unique rid functionality.
+1. Verify eBGP session when same and different router ID is configured.
+2. Verify iBGP session when same and different router ID is configured.
+3. Verify two different eBGP sessions initiated with same router ID.
+4. Chaos - Verify bgp unique rid functionality in chaos scenarios.
+5. Chaos - Verify bgp unique rid functionality when router reboots with same loopback id.
+6. Chaos - Verify bgp unique rid functionality when router reboots without any ip addresses.
+"""
+
+#################################
+# TOPOLOGY
+#################################
+"""
+
+ +-------+
+ +--------- | R2 |
+ | +-------+
+ |iBGP |
+ +-------+ |
+ | R1 | |iBGP
+ +-------+ |
+ | |
+ | iBGP +-------+ eBGP +-------+
+ +---------- | R3 |========= | R4 |
+ +-------+ +-------+
+ |
+ |eBGP
+ |
+ +-------+
+ | R5 |
+ +-------+
+
+
+"""
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json
+from lib.topolog import logger
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.ospfd, pytest.mark.staticd]
+
+# Required to instantiate the topology builder class.
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ step,
+ write_test_footer,
+ verify_rib,
+ check_address_types,
+ reset_config_on_routers,
+ check_router_status,
+ stop_router,
+ kill_router_daemons,
+ start_router_daemons,
+ start_router,
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence,
+ create_router_bgp,
+ clear_bgp_and_verify,
+)
+
+# Global variables
+topo = None
+bgp_convergence = False
+NETWORK = {
+ "ipv4": [
+ "192.168.20.1/32",
+ "192.168.20.2/32",
+ "192.168.21.1/32",
+ "192.168.21.2/32",
+ "192.168.22.1/32",
+ "192.168.22.2/32",
+ ],
+ "ipv6": [
+ "fc07:50::1/128",
+ "fc07:50::2/128",
+ "fc07:150::1/128",
+ "fc07:150::2/128",
+ "fc07:1::1/128",
+ "fc07:1::2/128",
+ ],
+}
+
+bgp_convergence = False
+ADDR_TYPES = check_address_types()
+routerid = {"ipv4": "10.10.10.14", "ipv6": "fd00:0:0:3::2"}
+
+
+def setup_module(mod):
+ """setup_module.
+
+ Set up the pytest environment
+ * `mod`: module name
+ """
+ global topo
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/bgp_unique_rid.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Checking BGP convergence
+ global bgp_convergence
+ global ADDR_TYPES
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format(
+ bgp_convergence
+ )
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+# Tests starting
+#####################################################
+
+
+def test_bgp_unique_rid_ebgp_p0():
+ """
+ TC: 1
+ Verify eBGP session when same and different router ID is configured.
+ """
+ tgen = get_topogen()
+ global bgp_convergence
+
+ if bgp_convergence is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = inspect.stack()[0][3]
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ step("Configure base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step(
+ "Base config should be up, verify using BGP convergence on all \
+ the routers for IPv4 and IPv6 nbrs"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure the same router id between R4 and R3 10.10.10.10")
+ input_dict = {
+ "r3": {"bgp": {"router_id": "10.10.10.10"}},
+ "r4": {"bgp": {"router_id": "10.10.10.10"}},
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in ESTAB state.")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure the same router id between R5 and R3 (10.10.10.10)")
+ input_dict = {
+ "r3": {"bgp": {"router_id": "10.10.10.10"}},
+ "r5": {"bgp": {"router_id": "10.10.10.10"}},
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in ESTAB state.")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("modify the router id on r3 to different router id (11.11.11.11)")
+ input_dict = {"r3": {"bgp": {"router_id": "11.11.11.11"}}}
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in ESTAB state.")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Reset bgp process")
+ step("Verify neighbours are in ESTAB state.")
+ dut = "r3"
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Clear ip bgp process with *")
+ step("Verify neighbours are in ESTAB state.")
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure neighbours between R3 and R4 in EVPN address family.")
+ input_dict = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "l2vpn": {
+ "evpn": {
+ "advertise": {
+ "ipv4": {"unicast": {}},
+ "ipv6": {"unicast": {}},
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "l2vpn": {
+ "evpn": {
+ "advertise": {
+ "ipv4": {"unicast": {}},
+ "ipv6": {"unicast": {}},
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in ESTAB state.")
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_unique_rid_ibgp_p0():
+ """
+ TC: 2
+ Verify iBGP session when same and different router ID is configured.
+ """
+ tgen = get_topogen()
+ global bgp_convergence
+
+ if bgp_convergence is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = inspect.stack()[0][3]
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ step("Configure base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step(
+ "Base config should be up, verify using BGP convergence on all \
+ the routers for IPv4 and IPv6 nbrs"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure the same router id between R1 and R3 (10.10.10.10)")
+ input_dict = {
+ "r3": {"bgp": {"router_id": "10.10.10.10"}},
+ "r1": {"bgp": {"router_id": "10.10.10.10"}},
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in idle state.")
+ result = verify_bgp_convergence(tgen, topo, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure the same router id between R2 and R3 (10.10.10.10)")
+ input_dict = {
+ "r3": {"bgp": {"router_id": "10.10.10.10"}},
+ "r2": {"bgp": {"router_id": "10.10.10.10"}},
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in idle state.")
+ result = verify_bgp_convergence(tgen, topo, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("modify the router id on r3 to different router id (11.11.11.11)")
+ input_dict = {"r3": {"bgp": {"router_id": "11.11.11.11"}}}
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in ESTAB state.")
+ result = verify_bgp_convergence(tgen, topo, dut="r3")
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Reset bgp process")
+ step("Verify neighbours are in ESTAB state.")
+ dut = "r3"
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Clear ip bgp process with *")
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_unique_rid_multi_bgp_nbrs_p0():
+ """
+ TC: 3
+ 3. Verify two different eBGP sessions initiated with same router ID
+
+ """
+ tgen = get_topogen()
+ global bgp_convergence, topo
+
+ if bgp_convergence is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = inspect.stack()[0][3]
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ step("Configure base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step(
+ "Base config should be up, verify using BGP convergence on all \
+ the routers for IPv4 and IPv6 nbrs"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure the same router id between R3, R4 and R5 (10.10.10.10)")
+ input_dict = {
+ "r3": {"bgp": {"router_id": "10.10.10.10"}},
+ "r4": {"bgp": {"router_id": "10.10.10.10"}},
+ "r5": {"bgp": {"router_id": "10.10.10.10"}},
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in ESTAB state.")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure the same IP address on on R4 and R5 loopback address and \
+ change the neighborship to loopback neighbours between R3 to R4 \
+ and R3 to R5 respectively."
+ )
+
+ topo1 = deepcopy(topo)
+
+ for rtr in ["r4", "r5"]:
+ topo1["routers"][rtr]["links"]["lo"]["ipv4"] = "192.168.1.1/32"
+
+ topo1["routers"]["r3"]["links"]["lo"]["ipv4"] = "192.168.1.3/32"
+ build_config_from_json(tgen, topo1, save_bkup=False)
+
+ step(
+ "change the neighborship to loopback neighbours between R3 to R4 and R3 to R5 respectively."
+ )
+ for rtr in ["r4", "r5"]:
+ configure_bgp_on_rtr = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {"neighbor": {rtr: {"dest_link": {"lo": {}}}}}
+ }
+ }
+ },
+ }
+ }
+ result = create_router_bgp(tgen, topo1, configure_bgp_on_rtr)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ bgp_convergence = verify_bgp_convergence(tgen, topo1, dut="r3")
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step("Change the IP address on the R4 loopback.")
+ topo1["routers"]["r4"]["links"]["lo"]["ipv4"] = "192.168.1.4/32"
+ build_config_from_json(tgen, topo1, save_bkup=False)
+
+ step("Verify neighbours should be again in ESTAB state. (show ip bgp neighbours)")
+ bgp_convergence = verify_bgp_convergence(tgen, topo1, dut="r3")
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step("Clear ip bgp process with *")
+ result = clear_bgp_and_verify(tgen, topo, router="r3")
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_unique_rid_chaos1_p2():
+ """
+ TC: 4
+ 4. Chaos - Verify bgp unique rid functionality in chaos scenarios.
+
+ """
+ tgen = get_topogen()
+ global bgp_convergence
+
+ if bgp_convergence is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = inspect.stack()[0][3]
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ step("Configure base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step(
+ "Base config should be up, verify using BGP convergence on all \
+ the routers for IPv4 and IPv6 nbrs"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure the same router id between R3, R4 and R5 (10.10.10.10)")
+ input_dict = {
+ "r3": {"bgp": {"router_id": "10.10.10.10"}},
+ "r4": {"bgp": {"router_id": "10.10.10.10"}},
+ "r5": {"bgp": {"router_id": "10.10.10.10"}},
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in ESTAB state.")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify eBGP session when same router ID is configured and bgpd process is restarted"
+ )
+
+ # restart bgpd router and verify
+ kill_router_daemons(tgen, "r3", ["bgpd"])
+ start_router_daemons(tgen, "r3", ["bgpd"])
+
+ step(
+ "The session should be established between R3 & R4. "
+ "Once after restart bgp, neighbor should come back up ."
+ )
+
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step(
+ "Verify eBGP session when same router ID is configured and neighbor shutdown is issued and again no shutdown."
+ )
+
+ input_dict = {
+ "r3": {
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3-link1": {"shutdown": True},
+ "r3-link2": {"shutdown": True},
+ "r3-link3": {"shutdown": True},
+ "r3-link4": {"shutdown": True},
+ "r3-link5": {"shutdown": True},
+ "r3-link6": {"shutdown": True},
+ "r3-link7": {"shutdown": True},
+ }
+ },
+ "r5": {"dest_link": {"r3": {"shutdown": True}}},
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3-link1": {"shutdown": True},
+ "r3-link2": {"shutdown": True},
+ "r3-link3": {"shutdown": True},
+ "r3-link4": {"shutdown": True},
+ "r3-link5": {"shutdown": True},
+ "r3-link6": {"shutdown": True},
+ "r3-link7": {"shutdown": True},
+ }
+ },
+ "r5": {"dest_link": {"r3": {"shutdown": True}}},
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r3": {
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3-link1": {"shutdown": False},
+ "r3-link2": {"shutdown": False},
+ "r3-link3": {"shutdown": False},
+ "r3-link4": {"shutdown": False},
+ "r3-link5": {"shutdown": False},
+ "r3-link6": {"shutdown": False},
+ "r3-link7": {"shutdown": False},
+ }
+ },
+ "r5": {"dest_link": {"r3": {"shutdown": False}}},
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3-link1": {"shutdown": False},
+ "r3-link2": {"shutdown": False},
+ "r3-link3": {"shutdown": False},
+ "r3-link4": {"shutdown": False},
+ "r3-link5": {"shutdown": False},
+ "r3-link6": {"shutdown": False},
+ "r3-link7": {"shutdown": False},
+ }
+ },
+ "r5": {"dest_link": {"r3": {"shutdown": False}}},
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "The session should be established between R3 & R4. "
+ "Once after restart bgp, neighbor should come back up ."
+ )
+
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step(
+ "Verify eBGP session when same router ID is configured and neighbor config is deleted & reconfigured."
+ )
+
+ input_dict = {
+ "r3": {
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {},
+ }
+ },
+ "r5": {"dest_link": {"r3": {}}},
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {},
+ }
+ },
+ "r5": {"dest_link": {"r3": {}}},
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "The session should be established between R3 & R4. "
+ "Once after restart bgp, neighbor should come back up ."
+ )
+
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step(
+ "Verify eBGP session when same router ID is configured and FRR router is restarted."
+ )
+ stop_router(tgen, "r3")
+ start_router(tgen, "r3")
+
+ step(
+ "The session should be established between R3 & R4. "
+ "Once after restart bgp, neighbor should come back up ."
+ )
+
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step(
+ "Verify eBGP session when same router ID is configured and zebra process is restarted"
+ )
+
+ kill_router_daemons(tgen, "r3", ["zebra"])
+ start_router_daemons(tgen, "r3", ["zebra"])
+
+ step(
+ "The session should be established between R3 & R4. "
+ "Once after restart bgp, neighbor should come back up ."
+ )
+
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_unique_rid_chaos3_p2():
+ """
+ TC: 4
+ 4. Chaos - Verify bgp unique rid functionality when router reboots with same loopback id.
+
+ """
+ tgen = get_topogen()
+ global bgp_convergence
+
+ if bgp_convergence is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = inspect.stack()[0][3]
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ step("Configure base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ global topo
+ topo1 = deepcopy(topo)
+
+ for rtr in topo["routers"].keys():
+ topo1["routers"][rtr]["links"]["lo"]["ipv4"] = "192.168.1.1/32"
+
+ topo1["routers"]["r3"]["links"]["lo"]["ipv4"] = "192.168.1.3/32"
+ build_config_from_json(tgen, topo1, save_bkup=False)
+
+ step("verify bgp convergence before starting test case")
+
+ bgp_convergence = verify_bgp_convergence(tgen, topo1)
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step(
+ "Configure loopback on R1 to R5 with IP address 1.1.1.1 on all the routers. Change neighborship on all the routers using loopback neighborship ids."
+ )
+ for rtr in ["r1", "r2", "r4", "r5"]:
+ configure_bgp_on_rtr = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {"neighbor": {rtr: {"dest_link": {"lo": {}}}}}
+ }
+ }
+ },
+ }
+ }
+ result = create_router_bgp(tgen, topo1, configure_bgp_on_rtr)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ bgp_convergence = verify_bgp_convergence(tgen, topo1, dut="r3")
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step("Reboot the router (restart frr) or using watch frr.")
+ stop_router(tgen, "r3")
+ start_router(tgen, "r3")
+
+ step("Neighbors between R3, R4 and R3 to R5 should be in ESTB state.")
+ bgp_convergence = verify_bgp_convergence(tgen, topo1, dut="r3")
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step("Clear bgp process.")
+ clear_bgp_and_verify(tgen, topo, "r3")
+
+ step("Neighbors between R3, R4 and R3 to R5 should be in ESTB state.")
+ bgp_convergence = verify_bgp_convergence(tgen, topo1, dut="r3")
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_unique_rid_chaos4_p2():
+ """
+ TC: 6
+ 6. Chaos - Verify bgp unique rid functionality when router reboots without any ip addresses.
+
+ """
+ tgen = get_topogen()
+ global bgp_convergence
+
+ if bgp_convergence is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = inspect.stack()[0][3]
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ reset_config_on_routers(tgen)
+
+ global topo
+ topo1 = deepcopy(topo)
+ topo2 = deepcopy(topo)
+
+ step(
+ "Configure base config as per the topology without loopback as well as Ip address on any of the interface."
+ )
+ for rtr in topo["routers"].keys():
+ for intf in topo["routers"][rtr]["links"].keys():
+ topo1["routers"][rtr]["links"][intf].pop("ipv4")
+ topo1["routers"][rtr]["links"][intf].pop("ipv6")
+ if intf is "lo":
+ topo1["routers"][rtr]["links"][intf].pop("ipv4")
+
+ build_config_from_json(tgen, topo1, save_bkup=False)
+
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step("Configure the ip addresses on the physical interfaces")
+ build_config_from_json(tgen, topo2, save_bkup=False)
+
+ step("All the neighbors should be in ESTAB state.")
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step("Configure loopback addresses with higher IP address ")
+ build_config_from_json(tgen, topo, save_bkup=False)
+
+ step("All the neighbors should be in ESTAB state.")
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step("Reboot the router (restart frr) or using watch frr.")
+ stop_router(tgen, "r3")
+ start_router(tgen, "r3")
+
+ step("Neighbors between R3, R4 and R3 to R5 should be in ESTB state.")
+ bgp_convergence = verify_bgp_convergence(tgen, topo, dut="r3")
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_unique_rid/test_bgp_unique_rid_vrf.py b/tests/topotests/bgp_unique_rid/test_bgp_unique_rid_vrf.py
new file mode 100644
index 0000000000..e009efee87
--- /dev/null
+++ b/tests/topotests/bgp_unique_rid/test_bgp_unique_rid_vrf.py
@@ -0,0 +1,479 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+import sys
+import time
+import pytest
+import inspect
+import os
+from copy import deepcopy
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+"""Following tests are covered to test bgp unique rid functionality.
+1. Verify iBGP session when same and different router ID is configured in user VRF(GREEN).
+2. Verify eBGP session when same and different router ID is configured in user vrf (VRF RED)
+3. Verify two different eBGP sessions initiated with same router ID in user VRf (RED and GREEN)
+"""
+
+#################################
+# TOPOLOGY
+#################################
+"""
+
+ +-------+
+ +--------- | R2 |
+ | +-------+
+ |iBGP |
+ +-------+ |
+ | R1 | |iBGP
+ +-------+ |
+ | |
+ | iBGP +-------+ eBGP +-------+
+ +---------- | R3 |========= | R4 |
+ +-------+ +-------+
+ |
+ |eBGP
+ |
+ +-------+
+ | R5 |
+ +-------+
+
+
+"""
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json
+from lib.topolog import logger
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+# Required to instantiate the topology builder class.
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ step,
+ write_test_footer,
+ check_address_types,
+ reset_config_on_routers,
+ check_router_status,
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence,
+ create_router_bgp,
+ clear_bgp_and_verify,
+)
+
+# Global variables
+topo = None
+bgp_convergence = False
+NETWORK = {
+ "ipv4": [
+ "192.168.20.1/32",
+ "192.168.20.2/32",
+ "192.168.21.1/32",
+ "192.168.21.2/32",
+ "192.168.22.1/32",
+ "192.168.22.2/32",
+ ],
+ "ipv6": [
+ "fc07:50::1/128",
+ "fc07:50::2/128",
+ "fc07:150::1/128",
+ "fc07:150::2/128",
+ "fc07:1::1/128",
+ "fc07:1::2/128",
+ ],
+}
+
+bgp_convergence = False
+ADDR_TYPES = check_address_types()
+
+
+def setup_module(mod):
+ """setup_module.
+
+ Set up the pytest environment
+ * `mod`: module name
+ """
+ global topo
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/bgp_unique_rid_vrf.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Checking BGP convergence
+ global bgp_convergence
+ global ADDR_TYPES
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format(
+ bgp_convergence
+ )
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+# Tests starting
+#####################################################
+
+
+def test_bgp_unique_rid_ebgp_vrf_p0():
+ """
+ TC: 1
+ Verify iBGP session when same and different router ID is configured in user VRF(GREEN).
+ """
+ tgen = get_topogen()
+ global bgp_convergence
+
+ if bgp_convergence is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = inspect.stack()[0][3]
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ step("Configure base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step(
+ "Base config should be up, verify using BGP convergence on all \
+ the routers for IPv4 and IPv6 nbrs"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure the same router id between R4 and R3 10.10.10.10")
+ input_dict = {
+ "r3": {"bgp": {"router_id": "10.10.10.10", "local_as": 100, "vrf": "RED"}},
+ "r4": {"bgp": {"router_id": "10.10.10.10", "local_as": 200, "vrf": "RED"}},
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in ESTAB state.")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure the same router id between R5 and R3 (10.10.10.10)")
+ input_dict = {
+ "r3": {"bgp": {"router_id": "10.10.10.10", "local_as": 100, "vrf": "RED"}},
+ "r5": {"bgp": {"router_id": "10.10.10.10", "local_as": 300, "vrf": "RED"}},
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in ESTAB state.")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("modify the router id on r3 to different router id (11.11.11.11)")
+ input_dict = {
+ "r3": {"bgp": {"router_id": "11.11.11.11", "local_as": 100, "vrf": "RED"}}
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in ESTAB state.")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Reset bgp process")
+ step("Verify neighbours are in ESTAB state.")
+ dut = "r3"
+ result = clear_bgp_and_verify(tgen, topo, router="r3")
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Clear ip bgp process with *")
+ step("Verify neighbours are in ESTAB state.")
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure neighbours between R3 and R4 in EVPN address family.")
+ input_dict = {
+ "r3": {
+ "bgp": {
+ "local_as": 100,
+ "vrf": "RED",
+ "address_family": {
+ "l2vpn": {
+ "evpn": {
+ "advertise": {
+ "ipv4": {"unicast": {}},
+ "ipv6": {"unicast": {}},
+ }
+ }
+ }
+ },
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": 200,
+ "vrf": "RED",
+ "address_family": {
+ "l2vpn": {
+ "evpn": {
+ "advertise": {
+ "ipv4": {"unicast": {}},
+ "ipv6": {"unicast": {}},
+ }
+ }
+ }
+ },
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in ESTAB state.")
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_unique_rid_ibgp_vrf_p0():
+ """
+ TC: 2
+ Verify eBGP session when same and different router ID is configured in user vrf (VRF RED)
+ """
+ tgen = get_topogen()
+ global bgp_convergence
+
+ if bgp_convergence is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = inspect.stack()[0][3]
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ step("Configure base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step(
+ "Base config should be up, verify using BGP convergence on all \
+ the routers for IPv4 and IPv6 nbrs"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure the same router id between R1 and R3 (10.10.10.10)")
+ input_dict = {
+ "r3": {"bgp": {"router_id": "10.10.10.10", "local_as": 100, "vrf": "RED"}},
+ "r1": {"bgp": {"router_id": "10.10.10.10", "local_as": 100, "vrf": "RED"}},
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in ESTAB state.")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure the same router id between R2 and R3 (10.10.10.10)")
+ input_dict = {
+ "r3": {"bgp": {"router_id": "10.10.10.10", "local_as": 100, "vrf": "RED"}},
+ "r2": {"bgp": {"router_id": "10.10.10.10", "local_as": 100, "vrf": "RED"}},
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in ESTAB state.")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("modify the router id on r3 to different router id (11.11.11.11)")
+ input_dict = {
+ "r3": {"bgp": {"router_id": "11.11.11.11", "local_as": 100, "vrf": "RED"}}
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in ESTAB state.")
+ result = verify_bgp_convergence(tgen, topo, dut="r3")
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Reset bgp process")
+ step("Verify neighbours are in ESTAB state.")
+ dut = "r3"
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Clear ip bgp process with *")
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_unique_rid_multi_bgp_nbrs_vrf_p0():
+ """
+ TC: 3
+ Verify two different eBGP sessions initiated with same router ID in user VRf (RED and GREEN)
+
+ """
+ tgen = get_topogen()
+ global bgp_convergence, topo
+
+ if bgp_convergence is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = inspect.stack()[0][3]
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ step("Configure base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step(
+ "Base config should be up, verify using BGP convergence on all \
+ the routers for IPv4 and IPv6 nbrs"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure the same router id between R3, R4 and R5 (10.10.10.10)")
+ input_dict = {
+ "r3": {"bgp": {"router_id": "10.10.10.10", "local_as": 100, "vrf": "RED"}},
+ "r4": {"bgp": {"router_id": "10.10.10.10", "local_as": 200, "vrf": "RED"}},
+ "r5": {"bgp": {"router_id": "10.10.10.10", "local_as": 300, "vrf": "RED"}},
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify neighbours are in ESTAB state.")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure the same IP address on on R4 and R5 loopback address and \
+ change the neighborship to loopback neighbours between R3 to R4 \
+ and R3 to R5 respectively."
+ )
+
+ topo1 = deepcopy(topo)
+
+ for rtr in ["r4", "r5"]:
+ topo1["routers"][rtr]["links"]["lo"]["ipv4"] = "192.168.1.1/32"
+
+ topo1["routers"]["r3"]["links"]["lo"]["ipv4"] = "192.168.1.3/32"
+ build_config_from_json(tgen, topo1, save_bkup=False)
+
+ step(
+ "change the neighborship to loopback neighbours between R3 to R4 and R3 to R5 respectively."
+ )
+ for rtr in ["r4", "r5"]:
+ configure_bgp_on_rtr = {
+ "r3": {
+ "bgp": {
+ "local_as": 100,
+ "vrf": "RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {"neighbor": {rtr: {"dest_link": {"lo": {}}}}}
+ }
+ },
+ },
+ }
+ }
+ result = create_router_bgp(tgen, topo1, configure_bgp_on_rtr)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ bgp_convergence = verify_bgp_convergence(tgen, topo1, dut="r3")
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step("Change the IP address on the R4 loopback.")
+ topo1["routers"]["r4"]["links"]["lo"]["ipv4"] = "192.168.1.4/32"
+ build_config_from_json(tgen, topo1, save_bkup=False)
+
+ step("Verify neighbours should be again in ESTAB state. (show ip bgp neighbours)")
+ bgp_convergence = verify_bgp_convergence(tgen, topo1, dut="r3")
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step("Clear ip bgp process with *")
+ result = clear_bgp_and_verify(tgen, topo, router="r3")
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_vrf_route_leak_basic/r1/bgpd.conf b/tests/topotests/bgp_vrf_route_leak_basic/r1/bgpd.conf
index 03dfbf9322..0540a62096 100644
--- a/tests/topotests/bgp_vrf_route_leak_basic/r1/bgpd.conf
+++ b/tests/topotests/bgp_vrf_route_leak_basic/r1/bgpd.conf
@@ -1,5 +1,11 @@
hostname r1
+
+#debug bgp vpn leak-to-vrf
+#debug bgp vpn leak-from-vrf
+#debug bgp nht
+
+
router bgp 99 vrf DONNA
no bgp ebgp-requires-policy
address-family ipv4 unicast
diff --git a/tests/topotests/bgp_vrf_route_leak_basic/r1/zebra.conf b/tests/topotests/bgp_vrf_route_leak_basic/r1/zebra.conf
index 35038557df..731a00829d 100644
--- a/tests/topotests/bgp_vrf_route_leak_basic/r1/zebra.conf
+++ b/tests/topotests/bgp_vrf_route_leak_basic/r1/zebra.conf
@@ -16,3 +16,9 @@ int dummy4
ip address 10.0.3.1/24
no shut
!
+int EVA
+ no shut
+!
+int DONNA
+ no shut
+!
diff --git a/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py b/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py
index 191a0b53ec..be07c85997 100644
--- a/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py
+++ b/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py
@@ -29,6 +29,7 @@ import os
import sys
from functools import partial
import pytest
+import time
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
@@ -77,7 +78,117 @@ def teardown_module(mod):
tgen.stop_topology()
-def test_vrf_route_leak():
+def check_bgp_rib(router, vrf, in_fib):
+ if in_fib:
+ attr = [{"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]}]
+ else:
+ attr = [{"protocol": "bgp", "nexthops": []}]
+
+ if vrf == "DONNA":
+ expect = {
+ "10.0.0.0/24": [
+ {
+ "protocol": "connected",
+ }
+ ],
+ "10.0.1.0/24": attr,
+ "10.0.2.0/24": [{"protocol": "connected"}],
+ "10.0.3.0/24": attr,
+ }
+ else:
+ expect = {
+ "10.0.0.0/24": attr,
+ "10.0.1.0/24": [
+ {
+ "protocol": "connected",
+ }
+ ],
+ "10.0.2.0/24": attr,
+ "10.0.3.0/24": [
+ {
+ "protocol": "connected",
+ }
+ ],
+ }
+
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ip route vrf %s json" % vrf, expect
+ )
+ return topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+
+
+def check_bgp_fib(router, vrf, in_rib):
+ # Check FIB
+ # DONNA
+ # 10.0.1.0/24 dev EVA proto bgp metric 20
+ # 10.0.3.0/24 dev EVA proto bgp metric 20
+ # EVA
+ # 10.0.0.0/24 dev DONNA proto bgp metric 20
+ # 10.0.2.0/24 dev DONNA proto bgp metric 20
+
+ if vrf == "DONNA":
+ table = 1001
+ nh_vrf = "EVA"
+ else:
+ table = 1002
+ nh_vrf = "DONNA"
+
+ negate = "" if in_rib else "! "
+
+ cmd = "%sip route show table %s | grep %s" % (negate, table, nh_vrf)
+ result = False
+ retry = 5
+ output = ""
+ while retry:
+ retry -= 1
+ try:
+ output = router.cmd_raises(cmd)
+ result = True
+ break
+ except:
+ time.sleep(0.1)
+
+ logger.info("VRF %s leaked FIB content %s: %s", vrf, cmd, output)
+
+ return result, output
+
+
+def check_bgp_ping(router, vrf):
+ if vrf == "DONNA":
+ cmd = "ip vrf exec DONNA ping -c1 10.0.1.1 -I 10.0.0.1"
+ else:
+ cmd = "ip vrf exec EVA ping -c1 10.0.0.1 -I 10.0.1.1"
+
+ result = False
+ retry = 5
+ output = ""
+ while retry:
+ retry -= 1
+ try:
+ output = router.cmd_raises(cmd)
+ result = True
+ break
+ except:
+ time.sleep(0.1)
+
+ return result, output
+
+
+def check_bgp_ping_own_ip(router):
+ cmd = "ip vrf exec DONNA ping -c1 10.0.0.1 -I 10.0.0.1"
+
+ output = ""
+ try:
+ output = router.cmd_raises(cmd)
+ result = True
+ except:
+ result = False
+ pass
+
+ return result, output
+
+
+def test_vrf_route_leak_test1():
logger.info("Ensure that routes are leaked back and forth")
tgen = get_topogen()
# Don't run this test if we have any failure.
@@ -86,53 +197,86 @@ def test_vrf_route_leak():
r1 = tgen.gears["r1"]
- # Test DONNA VRF.
- expect = {
- "10.0.0.0/24": [
- {
- "protocol": "connected",
- }
- ],
- "10.0.1.0/24": [
- {"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]}
- ],
- "10.0.2.0/24": [{"protocol": "connected"}],
- "10.0.3.0/24": [
- {"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]}
- ],
- }
-
- test_func = partial(
- topotest.router_json_cmp, r1, "show ip route vrf DONNA json", expect
+ result, output = check_bgp_ping_own_ip(r1)
+ assert (
+ result
+ ), "Ping from VRF fails - check https://bugzilla.kernel.org/show_bug.cgi?id=203483\n:{}".format(
+ output
)
- result, diff = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
- assert result, "BGP VRF DONNA check failed:\n{}".format(diff)
-
- # Test EVA VRF.
- expect = {
- "10.0.0.0/24": [
- {"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]}
- ],
- "10.0.1.0/24": [
- {
- "protocol": "connected",
- }
- ],
- "10.0.2.0/24": [
- {"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]}
- ],
- "10.0.3.0/24": [
- {
- "protocol": "connected",
- }
- ],
- }
- test_func = partial(
- topotest.router_json_cmp, r1, "show ip route vrf EVA json", expect
+ for vrf in ["EVA", "DONNA"]:
+ result, diff = check_bgp_rib(r1, vrf, True)
+ assert result, "BGP RIB VRF {} check failed:\n{}".format(vrf, diff)
+ result, output = check_bgp_fib(r1, vrf, True)
+ assert result, "BGP FIB VRF {} check failed:\n{}".format(vrf, output)
+ result, output = check_bgp_ping(r1, vrf)
+ assert result, "Ping from VRF {} failed:\n{}".format(vrf, output)
+
+
+def test_vrf_route_leak_test2():
+ logger.info(
+ "Ensure that leaked are still present after VRF iface IP address deletion"
)
- result, diff = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
- assert result, "BGP VRF EVA check failed:\n{}".format(diff)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+
+ logger.info("Adding and removing an IPv4 address to EVA and DONNA VRF ifaces")
+ r1.cmd("ip address add 1.1.1.1/32 dev EVA && ip address del 1.1.1.1/32 dev EVA")
+ r1.cmd("ip address add 2.2.2.2/32 dev DONNA && ip address del 2.2.2.2/32 dev DONNA")
+
+ for vrf in ["EVA", "DONNA"]:
+ result, diff = check_bgp_rib(r1, vrf, True)
+ assert result, "BGP RIB VRF {} check failed:\n{}".format(vrf, diff)
+ result, output = check_bgp_fib(r1, vrf, True)
+ assert result, "BGP FIB VRF {} check failed:\n{}".format(vrf, output)
+ result, output = check_bgp_ping(r1, vrf)
+ assert result, "Ping from VRF {} failed:\n{}".format(vrf, output)
+
+
+def test_vrf_route_leak_test3():
+ logger.info("Ensure that setting down the VRF ifaces invalidates leaked routes")
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+
+ logger.info("Setting down EVA and DONNA VRF ifaces")
+ r1.cmd("ip link set EVA down")
+ r1.cmd("ip link set DONNA down")
+
+ for vrf in ["EVA", "DONNA"]:
+ result, diff = check_bgp_rib(r1, vrf, False)
+ assert result, "BGP RIB VRF {} check failed:\n{}".format(vrf, diff)
+ result, output = check_bgp_fib(r1, vrf, False)
+ assert result, "BGP FIB VRF {} check failed:\n{}".format(vrf, output)
+
+
+def test_vrf_route_leak_test4():
+ logger.info("Ensure that setting up the VRF ifaces validates leaked routes")
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+
+ logger.info("Setting up EVA and DONNA VRF ifaces")
+ r1.cmd("ip link set EVA up")
+ r1.cmd("ip link set DONNA up")
+
+ for vrf in ["EVA", "DONNA"]:
+ result, diff = check_bgp_rib(r1, vrf, True)
+ assert result, "BGP RIB VRF {} check failed:\n{}".format(vrf, diff)
+ result, output = check_bgp_fib(r1, vrf, True)
+ assert result, "BGP FIB VRF {} check failed:\n{}".format(vrf, output)
+ result, output = check_bgp_ping(r1, vrf)
+ assert result, "Ping from VRF {} failed:\n{}".format(vrf, output)
def test_memory_leak():
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
index a09b0b8b2e..2be0f5773b 100644
--- a/tests/topotests/lib/bgp.py
+++ b/tests/topotests/lib/bgp.py
@@ -1909,7 +1909,7 @@ def clear_bgp(tgen, addr_type, router, vrf=None, neighbor=None):
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
-def clear_bgp_and_verify(tgen, topo, router):
+def clear_bgp_and_verify(tgen, topo, router, rid=None):
"""
This API is to clear bgp neighborship and verify bgp neighborship
is coming up(BGP is converged) usinf "show bgp summary json" command
@@ -1959,7 +1959,11 @@ def clear_bgp_and_verify(tgen, topo, router):
return errormsg
# To find neighbor ip type
- bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+ try:
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+ except TypeError:
+ bgp_addr_type = topo["routers"][router]["bgp"][0]["address_family"]
+
total_peer = 0
for addr_type in bgp_addr_type.keys():
@@ -2019,10 +2023,15 @@ def clear_bgp_and_verify(tgen, topo, router):
logger.info("Clearing BGP neighborship for router %s..", router)
for addr_type in bgp_addr_type.keys():
if addr_type == "ipv4":
- run_frr_cmd(rnode, "clear ip bgp *")
+ if rid:
+ run_frr_cmd(rnode, "clear bgp ipv4 {}".format(rid))
+ else:
+ run_frr_cmd(rnode, "clear bgp ipv4 *")
elif addr_type == "ipv6":
- run_frr_cmd(rnode, "clear bgp ipv6 *")
-
+ if rid:
+ run_frr_cmd(rnode, "clear bgp ipv6 {}".format(rid))
+ else:
+ run_frr_cmd(rnode, "clear bgp ipv6 *")
peer_uptime_after_clear_bgp = {}
# Verifying BGP convergence after bgp clear command
for retry in range(50):
@@ -2042,7 +2051,11 @@ def clear_bgp_and_verify(tgen, topo, router):
return errormsg
# To find neighbor ip type
- bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+ try:
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+ except TypeError:
+ bgp_addr_type = topo["routers"][router]["bgp"][0]["address_family"]
+
total_peer = 0
for addr_type in bgp_addr_type.keys():
if not check_address_types(addr_type):
@@ -2797,7 +2810,11 @@ def verify_best_path_as_per_admin_distance(
if route in rib_routes_json:
st_found = True
# Verify next_hop in rib_routes_json
- if [nh for nh in rib_routes_json[route][0]["nexthops"] if nh['ip'] == _next_hop]:
+ if [
+ nh
+ for nh in rib_routes_json[route][0]["nexthops"]
+ if nh["ip"] == _next_hop
+ ]:
nh_found = True
else:
errormsg = (
diff --git a/tests/topotests/lib/bgprib.py b/tests/topotests/lib/bgprib.py
index 35a57d0a99..01439373c5 100644
--- a/tests/topotests/lib/bgprib.py
+++ b/tests/topotests/lib/bgprib.py
@@ -37,6 +37,7 @@
from lib.lutil import luCommand, luResult, LUtil
import json
import re
+import time
# gpz: get rib in json form and compare against desired routes
class BgpRib:
@@ -48,7 +49,15 @@ class BgpRib:
for pfx in pfxtbl.keys():
if debug:
self.log("trying pfx %s" % pfx)
- if pfx != want["p"]:
+ if "exist" in want and want["exist"] == False:
+ if pfx == want["p"]:
+ if debug:
+ self.log("unexpected route: pfx=" + want["p"])
+ return 0
+ if debug:
+ self.log("unwant pfx=" + want["p"] + ", not " + pfx)
+ continue
+ elif pfx != want["p"]:
if debug:
self.log("want pfx=" + want["p"] + ", not " + pfx)
continue
@@ -75,53 +84,67 @@ class BgpRib:
if debug:
self.log("missing route: pfx=" + want["p"] + ", nh=" + want["n"])
return 0
+ if "exist" in want and want["exist"] == False:
+ return 1
+ return 0
- def RequireVpnRoutes(self, target, title, wantroutes, debug=0):
+ def RequireVpnRoutes(self, target, title, wantroutes, retry=0, wait=1, debug=0):
import json
logstr = "RequireVpnRoutes " + str(wantroutes)
- # non json form for humans
- luCommand(
- target,
- 'vtysh -c "show bgp ipv4 vpn"',
- ".",
- "None",
- "Get VPN RIB (non-json)",
- )
- ret = luCommand(
- target,
- 'vtysh -c "show bgp ipv4 vpn json"',
- ".*",
- "None",
- "Get VPN RIB (json)",
- )
- if re.search(r"^\s*$", ret):
- # degenerate case: empty json means no routes
- if len(wantroutes) > 0:
- luResult(target, False, title, logstr)
- return
- luResult(target, True, title, logstr)
- rib = json.loads(ret)
- rds = rib["routes"]["routeDistinguishers"]
- for want in wantroutes:
- found = 0
- if debug:
- self.log("want rd %s" % want["rd"])
- for rd in rds.keys():
- if rd != want["rd"]:
- continue
+ retry += 1
+ while retry:
+ retry -= 1
+ # non json form for humans
+ luCommand(
+ target,
+ 'vtysh -c "show bgp ipv4 vpn"',
+ ".",
+ "None",
+ "Get VPN RIB (non-json)",
+ )
+ ret = luCommand(
+ target,
+ 'vtysh -c "show bgp ipv4 vpn json"',
+ ".*",
+ "None",
+ "Get VPN RIB (json)",
+ )
+ if re.search(r"^\s*$", ret):
+ # degenerate case: empty json means no routes
+ if len(wantroutes) > 0:
+ luResult(target, False, title, logstr)
+ return
+ luResult(target, True, title, logstr)
+ rib = json.loads(ret)
+ rds = rib["routes"]["routeDistinguishers"]
+ for want in wantroutes:
+ found = 0
if debug:
- self.log("found rd %s" % rd)
- table = rds[rd]
- if self.routes_include_wanted(table, want, debug):
- found = 1
- break
- if not found:
- luResult(target, False, title, logstr)
- return
- luResult(target, True, title, logstr)
+ self.log("want rd %s" % want["rd"])
+ for rd in rds.keys():
+ if rd != want["rd"]:
+ continue
+ if debug:
+ self.log("found rd %s" % rd)
+ table = rds[rd]
+ if self.routes_include_wanted(table, want, debug):
+ found = 1
+ break
+ if not found:
+ if retry:
+ break
+ luResult(target, False, title, logstr)
+ return
+ if not found and retry:
+ time.sleep(wait)
+ continue
+ luResult(target, True, title, logstr)
+ break
- def RequireUnicastRoutes(self, target, afi, vrf, title, wantroutes, debug=0):
+ def RequireUnicastRoutes(
+ self, target, afi, vrf, title, wantroutes, retry=0, wait=1, debug=0
+ ):
logstr = "RequireUnicastRoutes %s" % str(wantroutes)
vrfstr = ""
if vrf != "":
@@ -130,48 +153,62 @@ class BgpRib:
if (afi != "ipv4") and (afi != "ipv6"):
self.log("ERROR invalid afi")
- cmdstr = "show bgp %s %s unicast" % (vrfstr, afi)
- # non json form for humans
- cmd = 'vtysh -c "%s"' % cmdstr
- luCommand(target, cmd, ".", "None", "Get %s %s RIB (non-json)" % (vrfstr, afi))
- cmd = 'vtysh -c "%s json"' % cmdstr
- ret = luCommand(
- target, cmd, ".*", "None", "Get %s %s RIB (json)" % (vrfstr, afi)
- )
- if re.search(r"^\s*$", ret):
- # degenerate case: empty json means no routes
- if len(wantroutes) > 0:
- luResult(target, False, title, logstr)
+ retry += 1
+ while retry:
+ retry -= 1
+ cmdstr = "show bgp %s %s unicast" % (vrfstr, afi)
+ # non json form for humans
+ cmd = 'vtysh -c "%s"' % cmdstr
+ luCommand(
+ target, cmd, ".", "None", "Get %s %s RIB (non-json)" % (vrfstr, afi)
+ )
+ cmd = 'vtysh -c "%s json"' % cmdstr
+ ret = luCommand(
+ target, cmd, ".*", "None", "Get %s %s RIB (json)" % (vrfstr, afi)
+ )
+ if re.search(r"^\s*$", ret):
+ # degenerate case: empty json means no routes
+ if len(wantroutes) > 0:
+ luResult(target, False, title, logstr)
+ return
+ luResult(target, True, title, logstr)
+ rib = json.loads(ret)
+ try:
+ table = rib["routes"]
+ # KeyError: 'routes' probably means missing/bad VRF
+ except KeyError as err:
+ if vrf != "":
+ errstr = "-script ERROR: check if wrong vrf (%s)" % (vrf)
+ else:
+ errstr = "-script ERROR: check if vrf missing"
+ if retry:
+ time.sleep(wait)
+ continue
+ luResult(target, False, title + errstr, logstr)
return
+ # if debug:
+ # self.log("table=%s" % table)
+ for want in wantroutes:
+ if debug:
+ self.log("want=%s" % want)
+ if not self.routes_include_wanted(table, want, debug):
+ if retry:
+ time.sleep(wait)
+ continue
+ luResult(target, False, title, logstr)
+ return
luResult(target, True, title, logstr)
- rib = json.loads(ret)
- try:
- table = rib["routes"]
- # KeyError: 'routes' probably means missing/bad VRF
- except KeyError as err:
- if vrf != "":
- errstr = "-script ERROR: check if wrong vrf (%s)" % (vrf)
- else:
- errstr = "-script ERROR: check if vrf missing"
- luResult(target, False, title + errstr, logstr)
- return
- # if debug:
- # self.log("table=%s" % table)
- for want in wantroutes:
- if debug:
- self.log("want=%s" % want)
- if not self.routes_include_wanted(table, want, debug):
- luResult(target, False, title, logstr)
- return
- luResult(target, True, title, logstr)
+ break
BgpRib = BgpRib()
-def bgpribRequireVpnRoutes(target, title, wantroutes, debug=0):
- BgpRib.RequireVpnRoutes(target, title, wantroutes, debug)
+def bgpribRequireVpnRoutes(target, title, wantroutes, retry=0, wait=1, debug=0):
+ BgpRib.RequireVpnRoutes(target, title, wantroutes, retry, wait, debug)
-def bgpribRequireUnicastRoutes(target, afi, vrf, title, wantroutes, debug=0):
- BgpRib.RequireUnicastRoutes(target, afi, vrf, title, wantroutes, debug)
+def bgpribRequireUnicastRoutes(
+ target, afi, vrf, title, wantroutes, retry=0, wait=1, debug=0
+):
+ BgpRib.RequireUnicastRoutes(target, afi, vrf, title, wantroutes, retry, wait, debug)
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt
index 86c089ab3b..6bafbbb556 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt
@@ -5,5 +5,5 @@ B>* 10.0.3.0/24 [20/20] via 10.0.30.3, r1-eth2 (vrf neno), weight 1, XX:XX:XX
O>* 10.0.4.0/24 [110/20] via 10.0.20.2, r1-eth1, weight 1, XX:XX:XX
O 10.0.20.0/24 [110/10] is directly connected, r1-eth1, weight 1, XX:XX:XX
C>* 10.0.20.0/24 is directly connected, r1-eth1, XX:XX:XX
-B>* 10.0.30.0/24 [20/0] is directly connected, r1-eth2 (vrf neno), weight 1, XX:XX:XX
+B>* 10.0.30.0/24 [20/0] is directly connected, neno (vrf neno), weight 1, XX:XX:XX
O>* 10.0.40.0/24 [110/20] via 10.0.20.2, r1-eth1, weight 1, XX:XX:XX
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt
index 9681d8a04e..3ed6b1b3a1 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt
@@ -7,4 +7,4 @@ B>* 10.0.4.0/24 [20/20] via 10.0.40.4, r2-eth2 (vrf ray), weight 1, XX:XX:XX
O 10.0.20.0/24 [110/10] is directly connected, r2-eth1, weight 1, XX:XX:XX
C>* 10.0.20.0/24 is directly connected, r2-eth1, XX:XX:XX
O>* 10.0.30.0/24 [110/20] via 10.0.20.1, r2-eth1, weight 1, XX:XX:XX
-B>* 10.0.40.0/24 [20/0] is directly connected, r2-eth2 (vrf ray), weight 1, XX:XX:XX
+B>* 10.0.40.0/24 [20/0] is directly connected, ray (vrf ray), weight 1, XX:XX:XX
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt
index ce9903ae71..4ad8441d85 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt
@@ -1,9 +1,9 @@
VRF ray:
B 10.0.1.0/24 [20/20] via 10.0.20.1, r2-eth1 (vrf default) inactive, weight 1, XX:XX:XX
-B 10.0.2.0/24 [20/0] is directly connected, r2-eth0 (vrf default) inactive, weight 1, XX:XX:XX
+B 10.0.2.0/24 [20/0] is directly connected, lo (vrf default) inactive, weight 1, XX:XX:XX
B>* 10.0.3.0/24 [20/20] via 10.0.20.1, r2-eth1 (vrf default), weight 1, XX:XX:XX
O>* 10.0.4.0/24 [110/20] via 10.0.40.4, r2-eth2, weight 1, XX:XX:XX
-B 10.0.20.0/24 [20/0] is directly connected, r2-eth1 (vrf default) inactive, weight 1, XX:XX:XX
+B 10.0.20.0/24 [20/0] is directly connected, lo (vrf default) inactive, weight 1, XX:XX:XX
B>* 10.0.30.0/24 [20/20] via 10.0.20.1, r2-eth1 (vrf default), weight 1, XX:XX:XX
O 10.0.40.0/24 [110/10] is directly connected, r2-eth2, weight 1, XX:XX:XX
C>* 10.0.40.0/24 is directly connected, r2-eth2, XX:XX:XX
diff --git a/tools/frr-reload.py b/tools/frr-reload.py
index 7c5d91d4dc..bf402e1bef 100755
--- a/tools/frr-reload.py
+++ b/tools/frr-reload.py
@@ -1514,6 +1514,7 @@ def ignore_unconfigurable_lines(lines_to_add, lines_to_del):
[
ctx_keys[0].startswith(x)
for x in [
+ "agentx",
"frr version",
"frr defaults",
"username",
diff --git a/zebra/connected.c b/zebra/connected.c
index c01be58e82..57c7f1925b 100644
--- a/zebra/connected.c
+++ b/zebra/connected.c
@@ -387,10 +387,14 @@ void connected_down(struct interface *ifp, struct connected *ifc)
.ifindex = ifp->ifindex,
.vrf_id = ifp->vrf->vrf_id,
};
- struct zebra_vrf *zvrf;
- uint32_t count = 0;
+ struct zebra_vrf *zvrf, *zvrf_iter;
+ uint32_t count_ipv4 = 0;
struct listnode *cnode;
struct connected *c;
+ struct route_table *table;
+ struct route_node *rn;
+ struct route_entry *re, *next;
+ struct vrf *vrf;
zvrf = ifp->vrf->info;
if (!zvrf) {
@@ -456,12 +460,14 @@ void connected_down(struct interface *ifp, struct connected *ifc)
prefix_copy(&cp, CONNECTED_PREFIX(c));
apply_mask(&cp);
- if (prefix_same(&p, &cp) &&
- !CHECK_FLAG(c->conf, ZEBRA_IFC_DOWN))
- count++;
+ if (CHECK_FLAG(c->conf, ZEBRA_IFC_DOWN))
+ continue;
- if (count >= 1)
+ if (prefix_same(&p, &cp))
return;
+
+ if (cp.family == AF_INET)
+ count_ipv4++;
}
/*
@@ -474,6 +480,60 @@ void connected_down(struct interface *ifp, struct connected *ifc)
rib_delete(afi, SAFI_MULTICAST, zvrf->vrf->vrf_id, ZEBRA_ROUTE_CONNECT,
0, 0, &p, NULL, &nh, 0, zvrf->table_id, 0, 0, false);
+ /* When the last IPv4 address of an interface is deleted, Linux removes
+ * all routes using this interface without any Netlink advertisement.
+ * The removed routes include those that only have this particular
+ * interface as a nexthop. Among those, remove the kernel one from the
+ * FRR RIB and reinstall the other that have been added from FRR.
+ */
+ if (afi == AFI_IP && count_ipv4 == 0 && if_is_operative(ifp)) {
+ RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) {
+ zvrf_iter = vrf->info;
+
+ if (!zvrf_iter)
+ continue;
+
+ table = zvrf_iter->table[AFI_IP][SAFI_UNICAST];
+ if (!table)
+ continue;
+
+ for (rn = route_top(table); rn;
+ rn = srcdest_route_next(rn)) {
+ RNODE_FOREACH_RE_SAFE (rn, re, next) {
+ if (CHECK_FLAG(re->status,
+ ROUTE_ENTRY_REMOVED))
+ continue;
+ if (re->nhe->ifp != ifp)
+ continue;
+ if (re->type == ZEBRA_ROUTE_KERNEL)
+ rib_delete(
+ afi, SAFI_UNICAST,
+ zvrf_iter->vrf->vrf_id,
+ re->type, 0, re->flags,
+ &rn->p, NULL, &nh, 0,
+ zvrf_iter->table_id,
+ re->metric,
+ re->distance, false);
+ else if (re->type !=
+ ZEBRA_ROUTE_CONNECT) {
+ SET_FLAG(re->status,
+ ROUTE_ENTRY_CHANGED);
+ UNSET_FLAG(
+ re->status,
+ ROUTE_ENTRY_INSTALLED);
+ rib_add(afi, SAFI_UNICAST,
+ zvrf_iter->vrf->vrf_id,
+ re->type, 0, 0, &rn->p,
+ NULL, &nh, re->nhe_id,
+ zvrf_iter->table_id,
+ re->metric, 0,
+ re->distance, 0, false);
+ }
+ }
+ }
+ }
+ }
+
/* Schedule LSP forwarding entries for processing, if appropriate. */
if (zvrf->vrf->vrf_id == VRF_DEFAULT) {
if (IS_ZEBRA_DEBUG_MPLS)
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index 8ed8abe304..aac1c9b471 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -2208,7 +2208,7 @@ DEFPY (show_route_detail,
[json$json] [nexthop-group$ng]",
SHOW_STR
IP_STR
- "IPv6 forwarding table\n"
+ "IP forwarding table\n"
"IP routing table\n"
VRF_FULL_CMD_HELP_STR
"Network in the IP routing table to display\n"
diff --git a/zebra/zserv.c b/zebra/zserv.c
index d788811d3d..d4fa6dadae 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -1037,7 +1037,8 @@ static void zebra_show_client_detail(struct vty *vty, struct zserv *client)
} else
vty_out(vty, "Not registered for Nexthop Updates\n");
- vty_out(vty, "Client will %sbe notified about it's routes status\n",
+ vty_out(vty,
+ "Client will %sbe notified about the status of its routes.\n",
client->notify_owner ? "" : "Not ");
last_read_time = (time_t)atomic_load_explicit(&client->last_read_time,