summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bgpd/bgp_labelpool.c24
-rw-r--r--bgpd/bgp_mplsvpn.h20
-rw-r--r--bgpd/bgp_nexthop.c33
-rw-r--r--bgpd/bgp_nexthop.h8
-rw-r--r--bgpd/bgp_nht.c38
-rw-r--r--bgpd/bgp_vty.c121
-rw-r--r--bgpd/bgp_zebra.c14
-rw-r--r--bgpd/bgp_zebra.h3
-rw-r--r--bgpd/bgpd.c3
-rw-r--r--bgpd/bgpd.h6
-rw-r--r--doc/developer/topotests.rst9
-rw-r--r--doc/user/bgp.rst26
-rw-r--r--doc/user/pbr.rst2
-rw-r--r--doc/user/zebra.rst25
-rw-r--r--lib/mgmt_msg.c2
-rw-r--r--lib/vty.c9
-rw-r--r--lib/vty.h2
-rw-r--r--ospf6d/ospf6_interface.c12
-rw-r--r--ospf6d/ospf6_top.c2
-rw-r--r--ospfd/ospf_vty.c10
-rw-r--r--pbrd/pbr_vty.c2
-rw-r--r--pimd/pim_cmd.c2
-rw-r--r--tests/bgpd/test_peer_attr.c5
-rw-r--r--tests/bgpd/test_peer_attr.py1
-rw-r--r--tests/topotests/bgp_accept_own/pe1/bgpd.conf4
-rw-r--r--tests/topotests/bgp_l3vpn_label_export/__init__.py0
-rw-r--r--tests/topotests/bgp_l3vpn_label_export/r1/bgpd.conf22
-rw-r--r--tests/topotests/bgp_l3vpn_label_export/r1/ldpd.conf26
-rw-r--r--tests/topotests/bgp_l3vpn_label_export/r1/staticd.conf1
-rw-r--r--tests/topotests/bgp_l3vpn_label_export/r1/zebra.conf7
-rw-r--r--tests/topotests/bgp_l3vpn_label_export/r2/bgpd.conf23
-rw-r--r--tests/topotests/bgp_l3vpn_label_export/r2/ldpd.conf24
-rw-r--r--tests/topotests/bgp_l3vpn_label_export/r2/staticd.conf1
-rw-r--r--tests/topotests/bgp_l3vpn_label_export/r2/zebra.conf7
-rw-r--r--tests/topotests/bgp_l3vpn_label_export/test_bgp_l3vpn_label_export.py587
-rw-r--r--tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json2
-rw-r--r--tests/topotests/bgp_route_server_client/r1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r1/vpnv4_rib.json1
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r2/vpnv4_rib.json1
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib.json1
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_disabled.json1
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_reenabled.json1
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib.json1
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_disabled.json1
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_reenabled.json1
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib.json1
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_disabled.json1
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_reenabled.json1
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib.json1
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_disabled.json1
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_reenabled.json1
-rw-r--r--tests/topotests/bgp_vpn_5549_route_map/test_bgp_vpn_5549_route_map.py2
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/r1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/r2/bgpd.conf7
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/r3/bgpd.conf1
-rw-r--r--tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py16
-rw-r--r--tests/topotests/bgp_vpnv6_per_nexthop_label/test_bgp_vpnv6_per_nexthop_label.py22
-rw-r--r--tests/topotests/lib/topotest.py106
-rw-r--r--tests/topotests/ospf6_gr_topo1/rt1/show_ipv6_route.json14
-rw-r--r--tests/topotests/ospf6_gr_topo1/rt2/show_ipv6_route.json14
-rw-r--r--tests/topotests/ospf6_gr_topo1/rt3/show_ipv6_route.json14
-rw-r--r--tests/topotests/ospf6_gr_topo1/rt4/show_ipv6_route.json14
-rw-r--r--tests/topotests/ospf6_gr_topo1/rt5/show_ipv6_route.json14
-rw-r--r--tests/topotests/ospf6_gr_topo1/rt6/show_ipv6_route.json14
-rw-r--r--tests/topotests/ospf6_gr_topo1/rt7/show_ipv6_route.json14
-rw-r--r--tests/topotests/ospf6_loopback_cost/__init__.py0
-rw-r--r--tests/topotests/ospf6_loopback_cost/r1/frr.conf16
-rw-r--r--tests/topotests/ospf6_loopback_cost/r2/frr.conf16
-rw-r--r--tests/topotests/ospf6_loopback_cost/test_ospf6_loopback_cost.py89
-rw-r--r--tests/zebra/test_lm_plugin.c2
-rw-r--r--yang/frr-bgp-neighbor.yang2
-rw-r--r--zebra/interface.c2
-rw-r--r--zebra/label_manager.c186
-rw-r--r--zebra/label_manager.h13
-rw-r--r--zebra/rt_netlink.c22
-rw-r--r--zebra/zebra_mpls_vty.c3
-rw-r--r--zebra/zebra_vxlan.c137
77 files changed, 1564 insertions, 275 deletions
diff --git a/bgpd/bgp_labelpool.c b/bgpd/bgp_labelpool.c
index 883338610c..b2bb49b943 100644
--- a/bgpd/bgp_labelpool.c
+++ b/bgpd/bgp_labelpool.c
@@ -448,7 +448,7 @@ void bgp_lp_get(
if (lp_fifo_count(&lp->requests) > lp->pending_count) {
if (!bgp_zebra_request_label_range(MPLS_LABEL_BASE_ANY,
- lp->next_chunksize))
+ lp->next_chunksize, true))
return;
lp->pending_count += lp->next_chunksize;
@@ -494,8 +494,18 @@ void bgp_lp_release(
bf_release_index(chunk->allocated_map, index);
chunk->nfree += 1;
deallocated = true;
+ break;
}
assert(deallocated);
+ if (deallocated &&
+ chunk->nfree == chunk->last - chunk->first + 1 &&
+ lp_fifo_count(&lp->requests) == 0) {
+ bgp_zebra_release_label_range(chunk->first,
+ chunk->last);
+ list_delete_node(lp->chunks, node);
+ lp_chunk_free(chunk);
+ lp->next_chunksize = LP_CHUNK_SIZE_MIN;
+ }
}
}
}
@@ -642,7 +652,11 @@ void bgp_lp_event_zebra_up(void)
}
/* round up */
- chunks_needed = (labels_needed / lp->next_chunksize) + 1;
+ if (((float)labels_needed / (float)lp->next_chunksize) >
+ (labels_needed / lp->next_chunksize))
+ chunks_needed = (labels_needed / lp->next_chunksize) + 1;
+ else
+ chunks_needed = (labels_needed / lp->next_chunksize);
labels_needed = chunks_needed * lp->next_chunksize;
/*
@@ -650,10 +664,10 @@ void bgp_lp_event_zebra_up(void)
*/
list_delete_all_node(lp->chunks);
- if (!bgp_zebra_request_label_range(MPLS_LABEL_BASE_ANY, labels_needed))
+ if (labels_needed && !bgp_zebra_request_label_range(MPLS_LABEL_BASE_ANY,
+ labels_needed, true))
return;
-
- lp->pending_count = labels_needed;
+ lp->pending_count += labels_needed;
/*
* Invalidate any existing labels and requeue them as requests
diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h
index 19b6f4eb77..b2bdfcec00 100644
--- a/bgpd/bgp_mplsvpn.h
+++ b/bgpd/bgp_mplsvpn.h
@@ -13,6 +13,7 @@
#include "bgpd/bgp_rd.h"
#include "bgpd/bgp_zebra.h"
#include "bgpd/bgp_vty.h"
+#include "bgpd/bgp_label.h"
#define MPLS_LABEL_IS_SPECIAL(label) ((label) <= MPLS_LABEL_EXTENSION)
#define MPLS_LABEL_IS_NULL(label) \
@@ -165,6 +166,25 @@ static inline int vpn_leak_to_vpn_active(struct bgp *bgp_vrf, afi_t afi,
return 0;
}
+ /* Is there a "manual" export label that isn't allocated yet? */
+ if (!CHECK_FLAG(bgp_vrf->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_AUTO) &&
+ bgp_vrf->vpn_policy[afi].tovpn_label != BGP_PREVENT_VRF_2_VRF_LEAK &&
+ bgp_vrf->vpn_policy[afi].tovpn_label != MPLS_LABEL_NONE &&
+ (bgp_vrf->vpn_policy[afi].tovpn_label >= MPLS_LABEL_UNRESERVED_MIN &&
+ !CHECK_FLAG(bgp_vrf->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG))) {
+ if (!bgp_zebra_request_label_range(bgp_vrf->vpn_policy[afi]
+ .tovpn_label,
+ 1, false)) {
+ if (pmsg)
+ *pmsg = "manual label could not be allocated";
+ return 0;
+ }
+ SET_FLAG(bgp_vrf->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG);
+ }
+
return 1;
}
diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c
index d12dc22330..44241b8582 100644
--- a/bgpd/bgp_nexthop.c
+++ b/bgpd/bgp_nexthop.c
@@ -58,7 +58,8 @@ void bnc_nexthop_free(struct bgp_nexthop_cache *bnc)
struct bgp_nexthop_cache *bnc_new(struct bgp_nexthop_cache_head *tree,
struct prefix *prefix, uint32_t srte_color,
- ifindex_t ifindex)
+ ifindex_t ifindex, bool import_check_table,
+ bool nexthop_check_table)
{
struct bgp_nexthop_cache *bnc;
@@ -68,6 +69,9 @@ struct bgp_nexthop_cache *bnc_new(struct bgp_nexthop_cache_head *tree,
bnc->ifindex_ipv6_ll = ifindex;
bnc->srte_color = srte_color;
bnc->tree = tree;
+ bnc->import_check_table = import_check_table;
+ bnc->nexthop_check_table = nexthop_check_table;
+
LIST_INIT(&(bnc->paths));
bgp_nexthop_cache_add(tree, bnc);
@@ -968,7 +972,7 @@ static void bgp_show_nexthops_detail(struct vty *vty, struct bgp *bgp,
static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp,
struct bgp_nexthop_cache *bnc, bool specific,
- json_object *json)
+ bool import_check_table, json_object *json)
{
char buf[PREFIX2STR_BUFFER];
time_t tbuf;
@@ -977,6 +981,12 @@ static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp,
json_object *json_last_update = NULL;
json_object *json_nexthop = NULL;
+ if (bnc->import_check_table && !import_check_table)
+ return;
+
+ if (bnc->nexthop_check_table && import_check_table)
+ return;
+
peer = (struct peer *)bnc->nht_info;
if (json)
@@ -1103,16 +1113,14 @@ static void bgp_show_nexthops(struct vty *vty, struct bgp *bgp,
else
vty_out(vty, "Current BGP nexthop cache:\n");
}
- if (import_table)
- tree = &bgp->import_check_table;
- else
- tree = &bgp->nexthop_cache_table;
+ tree = &bgp->nexthop_cache_table;
if (afi == AFI_IP || afi == AFI_IP6) {
if (json)
json_afi = json_object_new_object();
frr_each (bgp_nexthop_cache, &(*tree)[afi], bnc) {
- bgp_show_nexthop(vty, bgp, bnc, detail, json_afi);
+ bgp_show_nexthop(vty, bgp, bnc, detail, import_table,
+ json_afi);
found = true;
}
if (found && json)
@@ -1126,7 +1134,8 @@ static void bgp_show_nexthops(struct vty *vty, struct bgp *bgp,
if (json && (afi == AFI_IP || afi == AFI_IP6))
json_afi = json_object_new_object();
frr_each (bgp_nexthop_cache, &(*tree)[afi], bnc)
- bgp_show_nexthop(vty, bgp, bnc, detail, json_afi);
+ bgp_show_nexthop(vty, bgp, bnc, detail, import_table,
+ json_afi);
if (json && (afi == AFI_IP || afi == AFI_IP6))
json_object_object_add(
json, (afi == AFI_IP) ? "ipv4" : "ipv6",
@@ -1162,15 +1171,15 @@ static int show_ip_bgp_nexthop_table(struct vty *vty, const char *name,
vty_out(vty, "nexthop address is malformed\n");
return CMD_WARNING;
}
- tree = import_table ? &bgp->import_check_table
- : &bgp->nexthop_cache_table;
+ tree = &bgp->nexthop_cache_table;
if (json)
json_afi = json_object_new_object();
frr_each (bgp_nexthop_cache, &(*tree)[family2afi(nhop.family)],
bnc) {
if (prefix_cmp(&bnc->prefix, &nhop))
continue;
- bgp_show_nexthop(vty, bgp, bnc, true, json_afi);
+ bgp_show_nexthop(vty, bgp, bnc, true, import_table,
+ json_afi);
found = true;
}
if (json)
@@ -1313,7 +1322,6 @@ void bgp_scan_init(struct bgp *bgp)
for (afi = AFI_IP; afi < AFI_MAX; afi++) {
bgp_nexthop_cache_init(&bgp->nexthop_cache_table[afi]);
- bgp_nexthop_cache_init(&bgp->import_check_table[afi]);
bgp->connected_table[afi] = bgp_table_init(bgp, afi,
SAFI_UNICAST);
}
@@ -1333,7 +1341,6 @@ void bgp_scan_finish(struct bgp *bgp)
for (afi = AFI_IP; afi < AFI_MAX; afi++) {
/* Only the current one needs to be reset. */
bgp_nexthop_cache_reset(&bgp->nexthop_cache_table[afi]);
- bgp_nexthop_cache_reset(&bgp->import_check_table[afi]);
bgp->connected_table[afi]->route_table->cleanup =
bgp_connected_cleanup;
diff --git a/bgpd/bgp_nexthop.h b/bgpd/bgp_nexthop.h
index 49cbbaf885..c1d4d088a3 100644
--- a/bgpd/bgp_nexthop.h
+++ b/bgpd/bgp_nexthop.h
@@ -91,6 +91,9 @@ struct bgp_nexthop_cache {
* nexthop.
*/
bool is_evpn_gwip_nexthop;
+
+ bool import_check_table;
+ bool nexthop_check_table;
};
extern int bgp_nexthop_cache_compare(const struct bgp_nexthop_cache *a,
@@ -132,8 +135,9 @@ extern bool bgp_nexthop_self(struct bgp *bgp, afi_t afi, uint8_t type,
struct bgp_dest *dest);
extern struct bgp_nexthop_cache *bnc_new(struct bgp_nexthop_cache_head *tree,
struct prefix *prefix,
- uint32_t srte_color,
- ifindex_t ifindex);
+ uint32_t srte_color, ifindex_t ifindex,
+ bool import_check_table,
+ bool nexthop_check_table);
extern bool bnc_existing_for_prefix(struct bgp_nexthop_cache *bnc);
extern void bnc_free(struct bgp_nexthop_cache *bnc);
extern struct bgp_nexthop_cache *bnc_find(struct bgp_nexthop_cache_head *tree,
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index 60d6f74e14..aa37303fca 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -378,14 +378,12 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
} else
return 0;
- if (is_bgp_static_route)
- tree = &bgp_nexthop->import_check_table[afi];
- else
- tree = &bgp_nexthop->nexthop_cache_table[afi];
+ tree = &bgp_nexthop->nexthop_cache_table[afi];
bnc = bnc_find(tree, &p, srte_color, ifindex);
if (!bnc) {
- bnc = bnc_new(tree, &p, srte_color, ifindex);
+ bnc = bnc_new(tree, &p, srte_color, ifindex,
+ is_bgp_static_route, !is_bgp_static_route);
bnc->bgp = bgp_nexthop;
if (BGP_DEBUG(nht, NHT))
zlog_debug("Allocated bnc %pFX(%d)(%u)(%s) peer %p",
@@ -393,6 +391,11 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
bnc->srte_color, bnc->bgp->name_pretty,
peer);
} else {
+ if (is_bgp_static_route)
+ bnc->import_check_table = true;
+ else
+ bnc->nexthop_check_table = true;
+
if (BGP_DEBUG(nht, NHT))
zlog_debug(
"Found existing bnc %pFX(%d)(%s) flags 0x%x ifindex %d #paths %d peer %p",
@@ -819,12 +822,8 @@ static void bgp_nht_ifp_handle(struct interface *ifp, bool up)
bgp_nht_ifp_table_handle(bgp, &bgp->nexthop_cache_table[AFI_IP], ifp,
up);
- bgp_nht_ifp_table_handle(bgp, &bgp->import_check_table[AFI_IP], ifp,
- up);
bgp_nht_ifp_table_handle(bgp, &bgp->nexthop_cache_table[AFI_IP6], ifp,
up);
- bgp_nht_ifp_table_handle(bgp, &bgp->import_check_table[AFI_IP6], ifp,
- up);
}
void bgp_nht_ifp_up(struct interface *ifp)
@@ -900,7 +899,7 @@ void bgp_nht_interface_events(struct peer *peer)
void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
{
struct bgp_nexthop_cache_head *tree = NULL;
- struct bgp_nexthop_cache *bnc_nhc, *bnc_import;
+ struct bgp_nexthop_cache *bnc_nhc;
struct bgp *bgp;
struct prefix match;
struct zapi_route nhr;
@@ -930,19 +929,12 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
zlog_debug(
"parse nexthop update %pFX(%u)(%s): bnc info not found for nexthop cache",
&nhr.prefix, nhr.srte_color, bgp->name_pretty);
- } else
- bgp_process_nexthop_update(bnc_nhc, &nhr, false);
-
- tree = &bgp->import_check_table[afi];
-
- bnc_import = bnc_find(tree, &match, nhr.srte_color, 0);
- if (!bnc_import) {
- if (BGP_DEBUG(nht, NHT))
- zlog_debug(
- "parse nexthop update %pFX(%u)(%s): bnc info not found for import check",
- &nhr.prefix, nhr.srte_color, bgp->name_pretty);
- } else
- bgp_process_nexthop_update(bnc_import, &nhr, true);
+ } else {
+ if (bnc_nhc->nexthop_check_table)
+ bgp_process_nexthop_update(bnc_nhc, &nhr, false);
+ if (bnc_nhc->import_check_table)
+ bgp_process_nexthop_update(bnc_nhc, &nhr, true);
+ }
/*
* HACK: if any BGP route is dependant on an SR-policy that doesn't
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index 5d6ae589fa..bcf1b81a1a 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -122,6 +122,10 @@ FRR_CFG_DEFAULT_BOOL(BGP_SOFT_VERSION_CAPABILITY,
{ .val_bool = true, .match_profile = "datacenter", },
{ .val_bool = false },
);
+FRR_CFG_DEFAULT_BOOL(BGP_ENFORCE_FIRST_AS,
+ { .val_bool = false, .match_version = "< 9.1", },
+ { .val_bool = true },
+);
DEFINE_HOOK(bgp_inst_config_write,
(struct bgp *bgp, struct vty *vty),
@@ -615,6 +619,8 @@ int bgp_get_vty(struct bgp **bgp, as_t *as, const char *name,
if (DFLT_BGP_SOFT_VERSION_CAPABILITY)
SET_FLAG((*bgp)->flags,
BGP_FLAG_SOFT_VERSION_CAPABILITY);
+ if (DFLT_BGP_ENFORCE_FIRST_AS)
+ SET_FLAG((*bgp)->flags, BGP_FLAG_ENFORCE_FIRST_AS);
ret = BGP_SUCCESS;
}
@@ -2828,6 +2834,23 @@ DEFUN(no_bgp_ebgp_requires_policy, no_bgp_ebgp_requires_policy_cmd,
return CMD_SUCCESS;
}
+DEFPY(bgp_enforce_first_as,
+ bgp_enforce_first_as_cmd,
+ "[no] bgp enforce-first-as",
+ NO_STR
+ BGP_STR
+ "Enforce the first AS for EBGP routes\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+
+ if (no)
+ UNSET_FLAG(bgp->flags, BGP_FLAG_ENFORCE_FIRST_AS);
+ else
+ SET_FLAG(bgp->flags, BGP_FLAG_ENFORCE_FIRST_AS);
+
+ return CMD_SUCCESS;
+}
+
DEFPY(bgp_lu_uses_explicit_null, bgp_lu_uses_explicit_null_cmd,
"[no] bgp labeled-unicast <explicit-null|ipv4-explicit-null|ipv6-explicit-null>$value",
NO_STR BGP_STR
@@ -9461,7 +9484,7 @@ DEFPY (af_label_vpn_export,
"Automatically assign a label\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
- mpls_label_t label = MPLS_LABEL_NONE;
+ mpls_label_t label = (mpls_label_t)label_val;
afi_t afi;
int idx = 0;
bool yes = true;
@@ -9469,24 +9492,28 @@ DEFPY (af_label_vpn_export,
if (argv_find(argv, argc, "no", &idx))
yes = false;
- /* If "no ...", squash trailing parameter */
- if (!yes)
- label_auto = NULL;
-
- if (yes) {
- if (!label_auto)
- label = label_val; /* parser should force unsigned */
- }
-
afi = vpn_policy_getafi(vty, bgp, false);
if (afi == AFI_MAX)
return CMD_WARNING_CONFIG_FAILED;
-
- if (label_auto && CHECK_FLAG(bgp->vpn_policy[afi].flags,
- BGP_VPN_POLICY_TOVPN_LABEL_AUTO))
- /* no change */
- return CMD_SUCCESS;
+ if (yes) {
+ if (label_auto && CHECK_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_AUTO))
+ /* no change */
+ return CMD_SUCCESS;
+ if (!label_auto && label == bgp->vpn_policy[afi].tovpn_label)
+ /* no change */
+ return CMD_SUCCESS;
+ } else {
+ if (label_auto && !CHECK_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_AUTO))
+ /* no match */
+ return CMD_WARNING_CONFIG_FAILED;
+ if (!label_auto && label_val &&
+ label != bgp->vpn_policy[afi].tovpn_label)
+ /* no change */
+ return CMD_WARNING_CONFIG_FAILED;
+ }
/*
* pre-change: un-export vpn routes (vpn->vrf routes unaffected)
@@ -9494,9 +9521,16 @@ DEFPY (af_label_vpn_export,
vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi,
bgp_get_default(), bgp);
- if (!label_auto && CHECK_FLAG(bgp->vpn_policy[afi].flags,
- BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) {
+ if (CHECK_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG)) {
+ bgp_zebra_release_label_range(bgp->vpn_policy[afi].tovpn_label,
+ bgp->vpn_policy[afi].tovpn_label);
+ UNSET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG);
+ } else if (CHECK_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) {
+ /* release any previous auto label */
if (bgp->vpn_policy[afi].tovpn_label != MPLS_LABEL_NONE) {
/*
@@ -9513,16 +9547,32 @@ DEFPY (af_label_vpn_export,
&bgp->vpn_policy[afi],
bgp->vpn_policy[afi].tovpn_label);
}
- UNSET_FLAG(bgp->vpn_policy[afi].flags,
- BGP_VPN_POLICY_TOVPN_LABEL_AUTO);
}
- bgp->vpn_policy[afi].tovpn_label = label;
- if (label_auto) {
- SET_FLAG(bgp->vpn_policy[afi].flags,
- BGP_VPN_POLICY_TOVPN_LABEL_AUTO);
- bgp_lp_get(LP_TYPE_VRF, &bgp->vpn_policy[afi],
- vpn_leak_label_callback);
+ if (yes) {
+ if (label_auto) {
+ SET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_AUTO);
+ /* fetch a label */
+ bgp->vpn_policy[afi].tovpn_label = MPLS_LABEL_NONE;
+ bgp_lp_get(LP_TYPE_VRF, &bgp->vpn_policy[afi],
+ vpn_leak_label_callback);
+ } else {
+ bgp->vpn_policy[afi].tovpn_label = label;
+ UNSET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_AUTO);
+ if (bgp->vpn_policy[afi].tovpn_label >=
+ MPLS_LABEL_UNRESERVED_MIN &&
+ bgp_zebra_request_label_range(bgp->vpn_policy[afi]
+ .tovpn_label,
+ 1, false))
+ SET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG);
+ }
+ } else {
+ UNSET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_AUTO);
+ bgp->vpn_policy[afi].tovpn_label = MPLS_LABEL_NONE;
}
/* post-change: re-export vpn routes */
@@ -17973,8 +18023,13 @@ static void bgp_config_write_peer_global(struct vty *vty, struct bgp *bgp,
addr);
/* enforce-first-as */
- if (peergroup_flag_check(peer, PEER_FLAG_ENFORCE_FIRST_AS))
- vty_out(vty, " neighbor %s enforce-first-as\n", addr);
+ if (CHECK_FLAG(bgp->flags, BGP_FLAG_ENFORCE_FIRST_AS)) {
+ if (!peergroup_flag_check(peer, PEER_FLAG_ENFORCE_FIRST_AS))
+ vty_out(vty, " no neighbor %s enforce-first-as\n", addr);
+ } else {
+ if (peergroup_flag_check(peer, PEER_FLAG_ENFORCE_FIRST_AS))
+ vty_out(vty, " neighbor %s enforce-first-as\n", addr);
+ }
/* update-source */
if (peergroup_flag_check(peer, PEER_FLAG_UPDATE_SOURCE)) {
@@ -18599,6 +18654,15 @@ int bgp_config_write(struct vty *vty)
? ""
: "no ");
+ /* bgp enforce-first-as */
+ if (!!CHECK_FLAG(bgp->flags, BGP_FLAG_ENFORCE_FIRST_AS) !=
+ SAVE_BGP_ENFORCE_FIRST_AS)
+ vty_out(vty, " %sbgp enforce-first-as\n",
+ CHECK_FLAG(bgp->flags,
+ BGP_FLAG_ENFORCE_FIRST_AS)
+ ? ""
+ : "no ");
+
if (!!CHECK_FLAG(bgp->flags, BGP_FLAG_LU_IPV4_EXPLICIT_NULL) &&
!!CHECK_FLAG(bgp->flags, BGP_FLAG_LU_IPV6_EXPLICIT_NULL))
vty_out(vty, " bgp labeled-unicast explicit-null\n");
@@ -19594,6 +19658,9 @@ void bgp_vty_init(void)
install_element(BGP_NODE, &bgp_ebgp_requires_policy_cmd);
install_element(BGP_NODE, &no_bgp_ebgp_requires_policy_cmd);
+ /* bgp enforce-first-as */
+ install_element(BGP_NODE, &bgp_enforce_first_as_cmd);
+
/* bgp labeled-unicast explicit-null */
install_element(BGP_NODE, &bgp_lu_uses_explicit_null_cmd);
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index 212b7f398b..e53416044e 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -3423,6 +3423,9 @@ static bool bgp_zebra_label_manager_connect(void)
/* tell label pool that zebra is connected */
bgp_lp_event_zebra_up();
+ /* tell BGP L3VPN that label manager is available */
+ if (bgp_get_default())
+ vpn_leak_postchange_all();
return true;
}
@@ -3921,7 +3924,8 @@ void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label,
zebra_send_mpls_labels(zclient, cmd, &zl);
}
-bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size)
+bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size,
+ bool label_auto)
{
int ret;
uint32_t start, end;
@@ -3943,7 +3947,13 @@ bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size)
return false;
}
- bgp_lp_event_chunk(start, end);
+ if (label_auto)
+ /* label automatic is serviced by the bgp label pool
+ * manager, which allocates label chunks in
+ * pre-pools, and which needs to be notified about
+ * new chunks availability
+ */
+ bgp_lp_event_chunk(start, end);
return true;
}
diff --git a/bgpd/bgp_zebra.h b/bgpd/bgp_zebra.h
index 0edae041d2..4696e4dc44 100644
--- a/bgpd/bgp_zebra.h
+++ b/bgpd/bgp_zebra.h
@@ -124,6 +124,7 @@ extern void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label,
enum lsp_types_t ltype,
struct prefix *p, uint32_t num_labels,
mpls_label_t out_labels[]);
-extern bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size);
+extern bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size,
+ bool label_auto);
extern void bgp_zebra_release_label_range(uint32_t start, uint32_t end);
#endif /* _QUAGGA_BGP_ZEBRA_H */
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 0a01d71968..6ca0b06450 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -1919,6 +1919,9 @@ struct peer *peer_create(union sockunion *su, const char *conf_if,
}
}
+ if (CHECK_FLAG(bgp->flags, BGP_FLAG_ENFORCE_FIRST_AS))
+ SET_FLAG(peer->flags, PEER_FLAG_ENFORCE_FIRST_AS);
+
/* auto shutdown if configured */
if (bgp->autoshutdown)
peer_flag_set(peer, PEER_FLAG_SHUTDOWN);
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 42e4c167f6..db3991cd07 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -219,6 +219,8 @@ struct vpn_policy {
#define BGP_VPN_POLICY_TOVPN_NEXTHOP_SET (1 << 2)
#define BGP_VPN_POLICY_TOVPN_SID_AUTO (1 << 3)
#define BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP (1 << 4)
+/* Manual label is registered with zebra label manager */
+#define BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG (1 << 5)
/*
* If we are importing another vrf into us keep a list of
@@ -516,6 +518,7 @@ struct bgp {
/* For BGP-LU, force IPv6 local prefixes to use ipv6-explicit-null label */
#define BGP_FLAG_LU_IPV6_EXPLICIT_NULL (1ULL << 34)
#define BGP_FLAG_SOFT_VERSION_CAPABILITY (1ULL << 35)
+#define BGP_FLAG_ENFORCE_FIRST_AS (1ULL << 36)
/* BGP default address-families.
* New peers inherit enabled afi/safis from bgp instance.
@@ -556,9 +559,6 @@ struct bgp {
/* Tree for next-hop lookup cache. */
struct bgp_nexthop_cache_head nexthop_cache_table[AFI_MAX];
- /* Tree for import-check */
- struct bgp_nexthop_cache_head import_check_table[AFI_MAX];
-
struct bgp_table *connected_table[AFI_MAX];
struct hash *address_hash;
diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst
index 7cd9858d31..35c2bd4202 100644
--- a/doc/developer/topotests.rst
+++ b/doc/developer/topotests.rst
@@ -559,6 +559,8 @@ Here's an example of launching ``vtysh`` on routers ``rt1`` and ``rt2``.
sudo -E pytest --vtysh=rt1,rt2 all-protocol-startup
+.. _debug_with_gdb:
+
Debugging with GDB
""""""""""""""""""
@@ -647,6 +649,13 @@ memleak detection is enabled.
sudo -E pytest --valgrind-memleaks all-protocol-startup
+.. note:: GDB can be used in conjection with valgrind.
+
+ When you enable ``--valgrind-memleaks`` and you also launch various daemons
+ under GDB (debug_with_gdb_) topotest will connect the two utilities using
+ ``--vgdb-error=0`` and attaching to a ``vgdb`` process. This is very
+ useful for debugging bugs with use of uninitialized errors, et al.
+
Collecting Performance Data using perf(1)
"""""""""""""""""""""""""""""""""""""""""
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index 43572be07e..f7203a599a 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -527,6 +527,27 @@ Reject routes with AS_SET or AS_CONFED_SET types
This command enables rejection of incoming and outgoing routes having AS_SET or AS_CONFED_SET type.
+Enforce first AS
+----------------
+
+.. clicmd:: bgp enforce-first-as
+
+ To configure a router to deny an update received from an external BGP (eBGP)
+ peer that does not list its autonomous system number at the beginning of
+ the `AS_PATH` in the incoming update, use the ``bgp enforce-first-as`` command
+ in router configuration mode.
+
+ In order to exclude an arbitrary neighbor from this enforcement, use the
+ command ``no neighbor NAME enforce-first-as``. And vice-versa if a global
+ enforcement is disabled, you can override this behavior per neighbor too.
+
+ Default: enabled.
+
+.. note::
+
+ If you have a peering to RS (Route-Server), most likely you MUST disable the
+ first AS enforcement.
+
Suppress duplicate updates
--------------------------
@@ -1526,7 +1547,10 @@ Configuring Peers
Discard updates received from the specified (eBGP) peer if the AS_PATH
attribute does not contain the PEER's ASN as the first AS_PATH segment.
- Default: disabled.
+ You can enable or disable this enforcement globally too using
+ ``bgp enforce-first-as`` command.
+
+ Default: enabled.
.. clicmd:: neighbor PEER extended-optional-parameters
diff --git a/doc/user/pbr.rst b/doc/user/pbr.rst
index 83abfa220e..7a4effd3fc 100644
--- a/doc/user/pbr.rst
+++ b/doc/user/pbr.rst
@@ -267,7 +267,7 @@ specified in the rule are also applied to the packet.
this action,
so this field will be ignored unless another dataplane provider is used.
-.. clicmd:: show pbr map [NAME] [detail|json]
+.. clicmd:: show pbr map [NAME] [detail] [json]
Display pbr maps either all or by ``NAME``. If ``detail`` is set, it will
give information about each rule's unique internal ID and some extra
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index ba6e3bf37f..7c50212d48 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -783,6 +783,22 @@ presence of the entry.
21 Static 10.125.0.2 IPv4 Explicit Null
+MPLS label chunks
+-----------------
+
+MPLS label chunks are handled in the zebra label manager service,
+which ensures a same label value or label chunk can not be used by
+multiple CP routing daemons at the same time.
+
+Label requests originate from CP routing daemons, and are resolved
+over the default MPLS range (16-1048575). There are two kind of
+requests:
+- Static label requests request an exact label value or range. For
+instance, segment routing label blocks requests originating from
+IS-IS are part of it.
+- Dynamic label requests only need a range of label values. The
+'bgp l3vpn export auto' command uses such requests.
+
Allocated label chunks table can be dumped using the command
.. clicmd:: show debugging label-table
@@ -796,6 +812,15 @@ Allocated label chunks table can be dumped using the command
Proto ospf: [20000/21000]
Proto isis: [22000/23000]
+.. clicmd:: mpls label dynamic-block (16-1048575) (16-1048575)
+
+ Define a range of labels where dynamic label requests will
+ allocate label chunks from. This command guarantees that
+ static label values outside that range will not conflict
+ with the dynamic label requests. When the dynamic-block
+ range is configured, static label requests that match that
+ range are not accepted.
+
.. _zebra-srv6:
Segment-Routing IPv6
diff --git a/lib/mgmt_msg.c b/lib/mgmt_msg.c
index 70332bd5f3..12432a06e2 100644
--- a/lib/mgmt_msg.c
+++ b/lib/mgmt_msg.c
@@ -427,6 +427,8 @@ void mgmt_msg_destroy(struct mgmt_msg_state *ms)
mgmt_msg_reset_writes(ms);
if (ms->ins)
stream_free(ms->ins);
+ if (ms->outs)
+ stream_free(ms->outs);
free(ms->idtag);
}
diff --git a/lib/vty.c b/lib/vty.c
index ffb7274b90..f395fd3ea1 100644
--- a/lib/vty.c
+++ b/lib/vty.c
@@ -383,11 +383,14 @@ int vty_json_no_pretty(struct vty *vty, struct json_object *json)
return vty_json_helper(vty, json, JSON_C_TO_STRING_NOSLASHESCAPE);
}
-void vty_json_empty(struct vty *vty)
+void vty_json_empty(struct vty *vty, struct json_object *json)
{
- json_object *json = json_object_new_object();
+ json_object *jsonobj = json;
- vty_json(vty, json);
+ if (!json)
+ jsonobj = json_object_new_object();
+
+ vty_json(vty, jsonobj);
}
/* Output current time to the vty. */
diff --git a/lib/vty.h b/lib/vty.h
index a8654f8b69..346e44910a 100644
--- a/lib/vty.h
+++ b/lib/vty.h
@@ -377,7 +377,7 @@ extern bool vty_set_include(struct vty *vty, const char *regexp);
*/
extern int vty_json(struct vty *vty, struct json_object *json);
extern int vty_json_no_pretty(struct vty *vty, struct json_object *json);
-extern void vty_json_empty(struct vty *vty);
+extern void vty_json_empty(struct vty *vty, struct json_object *json);
/* post fd to be passed to the vtysh client
* fd is owned by the VTY code after this and will be closed when done
*/
diff --git a/ospf6d/ospf6_interface.c b/ospf6d/ospf6_interface.c
index a20ddf6c10..2a6b816844 100644
--- a/ospf6d/ospf6_interface.c
+++ b/ospf6d/ospf6_interface.c
@@ -128,9 +128,19 @@ static uint8_t ospf6_default_iftype(struct interface *ifp)
static uint32_t ospf6_interface_get_cost(struct ospf6_interface *oi)
{
/* If all else fails, use default OSPF cost */
- uint32_t cost;
+ uint32_t cost = 0;
uint32_t bw, refbw;
struct ospf6 *ospf6;
+
+ /* If the interface type is point-to-multipoint or the interface
+ * is in the state Loopback, the global scope IPv6 addresses
+ * associated with the interface (if any) are copied into the
+ * intra-area-prefix-LSA with the PrefixOptions LA-bit set, the
+ * PrefixLength set to 128, and the metric set to 0.
+ */
+ if (if_is_loopback(oi->interface))
+ return cost;
+
/* interface speed and bw can be 0 in some platforms,
* use ospf default bw. If bw is configured then it would
* be used.
diff --git a/ospf6d/ospf6_top.c b/ospf6d/ospf6_top.c
index 4c40298799..216837d8a5 100644
--- a/ospf6d/ospf6_top.c
+++ b/ospf6d/ospf6_top.c
@@ -2015,7 +2015,7 @@ ospf6_show_summary_address(struct vty *vty, struct ospf6 *ospf6,
if (!uj) {
ospf6_show_vrf_name(vty, ospf6, json_vrf);
- vty_out(vty, "aggregation delay interval :%u(in seconds)\n\n",
+ vty_out(vty, "aggregation delay interval: %u(in seconds)\n\n",
ospf6->aggr_delay_interval);
vty_out(vty, "%s\n", header);
} else {
diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c
index 8c3ad7f372..ab412e3606 100644
--- a/ospfd/ospf_vty.c
+++ b/ospfd/ospf_vty.c
@@ -5709,7 +5709,7 @@ DEFPY(show_ip_ospf_neighbor_id,
"%% OSPF is not enabled in vrf %s\n",
vrf_name);
else
- vty_json_empty(vty);
+ vty_json_empty(vty, NULL);
return CMD_SUCCESS;
}
ret = show_ip_ospf_neighbor_id_common(
@@ -6210,7 +6210,7 @@ DEFPY(show_ip_ospf_neighbor_int,
if (!ospf || !ospf->oi_running) {
if (json)
- vty_json_empty(vty);
+ vty_json_empty(vty, NULL);
return ret;
}
@@ -6220,7 +6220,7 @@ DEFPY(show_ip_ospf_neighbor_int,
ifp = if_lookup_by_name(ifname, vrf_id);
if (!ifp) {
if (json)
- vty_json_empty(vty);
+ vty_json_empty(vty, NULL);
else
vty_out(vty, "No such interface.\n");
return ret;
@@ -6257,7 +6257,7 @@ DEFPY(show_ip_ospf_neighbor_int_detail,
"%% OSPF is not enabled in vrf %s\n",
vrf_name);
else
- vty_json_empty(vty);
+ vty_json_empty(vty, NULL);
return CMD_SUCCESS;
}
return show_ip_ospf_neighbor_int_detail_common(
@@ -11994,7 +11994,7 @@ static int ospf_show_summary_address(struct vty *vty, struct ospf *ospf,
ospf_show_vrf_name(ospf, vty, json_vrf, use_vrf);
if (!uj) {
- vty_out(vty, "aggregation delay interval :%u(in seconds)\n\n",
+ vty_out(vty, "aggregation delay interval: %u(in seconds)\n\n",
ospf->aggr_delay_interval);
} else {
json_object_int_add(json_vrf, "aggregationDelayInterval",
diff --git a/pbrd/pbr_vty.c b/pbrd/pbr_vty.c
index 582ffac9b2..9589e5be23 100644
--- a/pbrd/pbr_vty.c
+++ b/pbrd/pbr_vty.c
@@ -1818,7 +1818,7 @@ static void vty_json_pbr_map(json_object *j, struct vty *vty,
DEFPY (show_pbr_map,
show_pbr_map_cmd,
- "show pbr map [NAME$name] [detail$detail|json$json]",
+ "show pbr map [NAME$name] [detail$detail] [json$json]",
SHOW_STR
PBR_STR
"PBR Map\n"
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index 2e90cf9053..628a445945 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -78,7 +78,7 @@ static struct vrf *pim_cmd_lookup_vrf(struct vty *vty, struct cmd_token *argv[],
if (!vrf) {
if (uj)
- vty_json_empty(vty);
+ vty_json_empty(vty, NULL);
else
vty_out(vty, "Specified VRF: %s does not exist\n",
argv[*idx]->arg);
diff --git a/tests/bgpd/test_peer_attr.c b/tests/bgpd/test_peer_attr.c
index af0b984ffd..231ecd2066 100644
--- a/tests/bgpd/test_peer_attr.c
+++ b/tests/bgpd/test_peer_attr.c
@@ -283,11 +283,6 @@ static struct test_peer_attr test_peer_attrs[] = {
.type = PEER_AT_GLOBAL_FLAG,
},
{
- .cmd = "enforce-first-as",
- .u.flag = PEER_FLAG_ENFORCE_FIRST_AS,
- .type = PEER_AT_GLOBAL_FLAG,
- },
- {
.cmd = "local-as",
.peer_cmd = "local-as 1",
.group_cmd = "local-as 2",
diff --git a/tests/bgpd/test_peer_attr.py b/tests/bgpd/test_peer_attr.py
index eb57618434..bd8b06e2f0 100644
--- a/tests/bgpd/test_peer_attr.py
+++ b/tests/bgpd/test_peer_attr.py
@@ -15,7 +15,6 @@ TestFlag.okfail("peer\\capability extended-nexthop")
TestFlag.okfail("peer\\description")
TestFlag.okfail("peer\\disable-connected-check")
TestFlag.okfail("peer\\dont-capability-negotiate")
-TestFlag.okfail("peer\\enforce-first-as")
TestFlag.okfail("peer\\local-as")
TestFlag.okfail("peer\\local-as 1 no-prepend")
TestFlag.okfail("peer\\local-as 1 no-prepend replace-as")
diff --git a/tests/topotests/bgp_accept_own/pe1/bgpd.conf b/tests/topotests/bgp_accept_own/pe1/bgpd.conf
index 15466b4259..1f7abac98f 100644
--- a/tests/topotests/bgp_accept_own/pe1/bgpd.conf
+++ b/tests/topotests/bgp_accept_own/pe1/bgpd.conf
@@ -25,7 +25,7 @@ router bgp 65001 vrf Customer
neighbor 192.168.1.1 timers connect 1
address-family ipv4 unicast
redistribute connected
- label vpn export 10
+ label vpn export 250
rd vpn export 192.168.1.2:2
rt vpn import 192.168.1.2:2
rt vpn export 192.168.1.2:2
@@ -40,7 +40,7 @@ router bgp 65001 vrf Service
neighbor 192.168.2.1 timers 1 3
neighbor 192.168.2.1 timers connect 1
address-family ipv4 unicast
- label vpn export 20
+ label vpn export 350
rd vpn export 192.168.2.2:2
rt vpn import 192.168.2.2:2
rt vpn export 192.168.2.2:2
diff --git a/tests/topotests/bgp_l3vpn_label_export/__init__.py b/tests/topotests/bgp_l3vpn_label_export/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_l3vpn_label_export/__init__.py
diff --git a/tests/topotests/bgp_l3vpn_label_export/r1/bgpd.conf b/tests/topotests/bgp_l3vpn_label_export/r1/bgpd.conf
new file mode 100644
index 0000000000..bb1ed4c1ea
--- /dev/null
+++ b/tests/topotests/bgp_l3vpn_label_export/r1/bgpd.conf
@@ -0,0 +1,22 @@
+router bgp 65001
+ bgp router-id 192.0.2.1
+ no bgp default ipv4-unicast
+ no bgp ebgp-requires-policy
+ neighbor 192.0.2.2 remote-as 65002
+ neighbor 192.0.2.2 timers 1 3
+ neighbor 192.0.2.2 timers connect 1
+ neighbor 192.0.2.2 ebgp-multihop 2
+ address-family ipv4 vpn
+ neighbor 192.0.2.2 activate
+ exit-address-family
+!
+router bgp 65001 vrf vrf1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 1111
+ rd vpn export 101:1
+ rt vpn both 52:100
+ import vpn
+ export vpn
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_l3vpn_label_export/r1/ldpd.conf b/tests/topotests/bgp_l3vpn_label_export/r1/ldpd.conf
new file mode 100644
index 0000000000..04ae06877a
--- /dev/null
+++ b/tests/topotests/bgp_l3vpn_label_export/r1/ldpd.conf
@@ -0,0 +1,26 @@
+hostname r1
+log file ldpd.log
+password zebra
+!
+! debug mpls ldp zebra
+! debug mpls ldp event
+! debug mpls ldp errors
+! debug mpls ldp messages recv
+! debug mpls ldp messages sent
+! debug mpls ldp discovery hello recv
+! debug mpls ldp discovery hello sent
+!
+mpls ldp
+ router-id 192.0.2.1
+ !
+ address-family ipv4
+ discovery transport-address 192.0.2.1
+ !
+ interface r1-eth0
+ !
+ interface r1-eth1
+ !
+ !
+!
+line vty
+!
diff --git a/tests/topotests/bgp_l3vpn_label_export/r1/staticd.conf b/tests/topotests/bgp_l3vpn_label_export/r1/staticd.conf
new file mode 100644
index 0000000000..7f2f057bfe
--- /dev/null
+++ b/tests/topotests/bgp_l3vpn_label_export/r1/staticd.conf
@@ -0,0 +1 @@
+ip route 192.0.2.2/32 192.168.1.2
diff --git a/tests/topotests/bgp_l3vpn_label_export/r1/zebra.conf b/tests/topotests/bgp_l3vpn_label_export/r1/zebra.conf
new file mode 100644
index 0000000000..7bdacb1ca3
--- /dev/null
+++ b/tests/topotests/bgp_l3vpn_label_export/r1/zebra.conf
@@ -0,0 +1,7 @@
+!
+interface lo
+ ip address 192.0.2.1/32
+!
+interface r1-eth0
+ ip address 192.168.1.1/24
+!
diff --git a/tests/topotests/bgp_l3vpn_label_export/r2/bgpd.conf b/tests/topotests/bgp_l3vpn_label_export/r2/bgpd.conf
new file mode 100644
index 0000000000..18a11cfb40
--- /dev/null
+++ b/tests/topotests/bgp_l3vpn_label_export/r2/bgpd.conf
@@ -0,0 +1,23 @@
+router bgp 65002
+ bgp router-id 192.0.2.2
+ no bgp default ipv4-unicast
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as 65001
+ neighbor 192.168.1.1 timers 1 3
+ neighbor 192.168.1.1 timers connect 1
+ neighbor 192.168.1.1 ebgp-multihop 2
+ neighbor 192.168.1.1 update-source 192.0.2.2
+ address-family ipv4 vpn
+ neighbor 192.168.1.1 activate
+ exit-address-family
+!
+router bgp 65002 vrf vrf1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 2222
+ rd vpn export 102:1
+ rt vpn both 52:100
+ import vpn
+ export vpn
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_l3vpn_label_export/r2/ldpd.conf b/tests/topotests/bgp_l3vpn_label_export/r2/ldpd.conf
new file mode 100644
index 0000000000..f4307f1ab0
--- /dev/null
+++ b/tests/topotests/bgp_l3vpn_label_export/r2/ldpd.conf
@@ -0,0 +1,24 @@
+hostname r2
+log file ldpd.log
+password zebra
+!
+! debug mpls ldp zebra
+! debug mpls ldp event
+! debug mpls ldp errors
+! debug mpls ldp messages recv
+! debug mpls ldp messages sent
+! debug mpls ldp discovery hello recv
+! debug mpls ldp discovery hello sent
+!
+mpls ldp
+ router-id 192.0.2.2
+ !
+ address-family ipv4
+ discovery transport-address 192.0.2.2
+ !
+ interface r2-eth0
+ !
+ !
+!
+line vty
+!
diff --git a/tests/topotests/bgp_l3vpn_label_export/r2/staticd.conf b/tests/topotests/bgp_l3vpn_label_export/r2/staticd.conf
new file mode 100644
index 0000000000..e3f5d7dba0
--- /dev/null
+++ b/tests/topotests/bgp_l3vpn_label_export/r2/staticd.conf
@@ -0,0 +1 @@
+ip route 192.0.2.1/32 192.168.1.1
diff --git a/tests/topotests/bgp_l3vpn_label_export/r2/zebra.conf b/tests/topotests/bgp_l3vpn_label_export/r2/zebra.conf
new file mode 100644
index 0000000000..40dfa9854c
--- /dev/null
+++ b/tests/topotests/bgp_l3vpn_label_export/r2/zebra.conf
@@ -0,0 +1,7 @@
+!
+int lo
+ ip address 192.0.2.2/32
+!
+interface r2-eth0
+ ip address 192.168.1.2/24
+!
diff --git a/tests/topotests/bgp_l3vpn_label_export/test_bgp_l3vpn_label_export.py b/tests/topotests/bgp_l3vpn_label_export/test_bgp_l3vpn_label_export.py
new file mode 100644
index 0000000000..7c23a3e899
--- /dev/null
+++ b/tests/topotests/bgp_l3vpn_label_export/test_bgp_l3vpn_label_export.py
@@ -0,0 +1,587 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# Copyright (c) 2023 by Louis Scalbert <louis.scalbert@6wind.com>
+# Copyright 2023 6WIND S.A.
+#
+
+"""
+
+"""
+
+import os
+import re
+import sys
+import json
+import pytest
+import functools
+
+from copy import deepcopy
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import kill_router_daemons, start_router_daemons, step
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ for rtr in [1, 2]:
+ tgen.add_router("r{}".format(rtr))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for rtr in [1, 2]:
+ tgen.gears["r{}".format(rtr)].cmd("ip link add vrf1 type vrf table 10")
+ tgen.gears["r{}".format(rtr)].cmd("ip link set vrf1 up")
+ tgen.gears["r{}".format(rtr)].cmd(
+ "ip address add dev vrf1 192.0.3.{}/32".format(rtr)
+ )
+ tgen.gears["r{}".format(rtr)].run(
+ "sysctl -w net.mpls.conf.r{}-eth0.input=1".format(rtr)
+ )
+ tgen.gears["r{}".format(rtr)].run("sysctl -w net.mpls.conf.vrf1.input=1")
+
+ for i, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_STATIC, os.path.join(CWD, "{}/staticd.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def check_bgp_vpn_prefix(label, rname="r1", rd=None):
+ tgen = get_topogen()
+
+ if rd:
+ output = json.loads(
+ tgen.gears[rname].vtysh_cmd(
+ "show bgp ipv4 vpn rd {} 192.0.3.2/32 json".format(rd)
+ )
+ )
+ else:
+ output = json.loads(
+ tgen.gears[rname].vtysh_cmd(
+ "show bgp vrf vrf1 ipv4 unicast 192.0.3.2/32 json"
+ )
+ )
+
+ if label == "auto":
+ expected = {
+ "paths": [
+ {
+ "valid": True,
+ "aspath": {"string": "65002"},
+ "nexthops": [{"ip": "192.0.2.2"}],
+ },
+ ]
+ }
+ elif label and not rd:
+ expected = {
+ "paths": [
+ {
+ "valid": True,
+ "remoteLabel": label,
+ "aspath": {"string": "65002"},
+ "nexthops": [{"ip": "192.0.2.2"}],
+ },
+ ]
+ }
+ elif label and rd:
+ expected = {
+ "102:1": {
+ "prefix": "192.0.3.2/32",
+ "paths": [
+ {
+ "valid": True,
+ "remoteLabel": label,
+ "nexthops": [{"ip": "0.0.0.0"}],
+ }
+ ],
+ }
+ }
+ else:
+ expected = {}
+
+ return topotest.json_cmp(output, expected, exact=(label is None))
+
+
+def check_mpls_table(label, protocol):
+ tgen = get_topogen()
+
+ if label == "auto":
+ cmd = "show mpls table json"
+ else:
+ cmd = "show mpls table {} json".format(label)
+
+ output = json.loads(tgen.gears["r2"].vtysh_cmd(cmd))
+
+ if label == "auto" and protocol:
+ output_copy = deepcopy(output)
+ for key, data in output_copy.items():
+ for nexthop in data.get("nexthops", []):
+ if nexthop.get("type", None) != protocol:
+ continue
+ output = data
+ break
+
+ if protocol:
+ expected = {
+ "nexthops": [
+ {
+ "type": protocol,
+ },
+ ]
+ }
+ else:
+ expected = {}
+
+ return topotest.json_cmp(output, expected, exact=(protocol is None))
+
+
+def check_mpls_ldp_binding():
+ tgen = get_topogen()
+
+ output = json.loads(
+ tgen.gears["r1"].vtysh_cmd("show mpls ldp binding 192.0.2.2/32 json")
+ )
+ expected = {
+ "bindings": [
+ {
+ "prefix": "192.0.2.2/32",
+ "localLabel": "16", # first available label
+ "inUse": 1,
+ },
+ ]
+ }
+
+ return topotest.json_cmp(output, expected)
+
+
+def test_convergence():
+ "Test protocol convergence"
+
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Check BGP and LDP convergence")
+ test_func = functools.partial(check_bgp_vpn_prefix, 2222)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see BGP prefix on R1"
+
+ test_func = functools.partial(check_mpls_ldp_binding)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see LDP binding on R2"
+
+ test_func = functools.partial(check_mpls_table, 16, "LDP")
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see LDP label on R2"
+
+ test_func = functools.partial(check_mpls_table, 2222, "BGP")
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see BGP label on R2"
+
+ output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto")
+ assert re.match(
+ r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output
+ ), "Failed to see LDP label chunk"
+
+ output = tgen.gears["r2"].vtysh_cmd("show debugging label-table")
+ assert "Proto bgp: [2222/2222]" in output, "Failed to see BGP label chunk"
+
+
+def test_vpn_label_export_16():
+ "Test that assigning the label value of 16 is not possible because it used by LDP"
+
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ tgen.gears["r2"].vtysh_cmd(
+ "conf\n"
+ "router bgp 65002 vrf vrf1\n"
+ "address-family ipv4 unicast\n"
+ "label vpn export 16"
+ )
+
+ step("Check that label vpn export 16 fails")
+ test_func = functools.partial(check_bgp_vpn_prefix, None)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Unexpected BGP prefix on R1"
+
+ test_func = functools.partial(check_mpls_ldp_binding)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see LDP binding on R2"
+
+ test_func = functools.partial(check_mpls_table, 16, "LDP")
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see LDP label on R2"
+
+ test_func = functools.partial(check_mpls_table, 2222, None)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Unexpected BGP label on R2"
+
+ output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto")
+ assert re.match(
+ r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output
+ ), "Failed to see LDP label chunk"
+
+ output = tgen.gears["r2"].vtysh_cmd("show debugging label-table")
+ assert "Proto bgp" not in output, "Unexpected BGP label chunk"
+
+
+def test_vpn_label_export_2222():
+ "Test that setting back the label value of 2222 works"
+
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ tgen.gears["r2"].vtysh_cmd(
+ "conf\n"
+ "router bgp 65002 vrf vrf1\n"
+ "address-family ipv4 unicast\n"
+ "label vpn export 2222"
+ )
+
+ step("Check that label vpn export 2222 is OK")
+ test_func = functools.partial(check_bgp_vpn_prefix, 2222)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see BGP prefix on R1"
+
+ test_func = functools.partial(check_mpls_ldp_binding)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see LDP binding on R2"
+
+ test_func = functools.partial(check_mpls_table, 16, "LDP")
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see LDP label on R2"
+
+ test_func = functools.partial(check_mpls_table, "auto", "BGP")
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Unexpected BGP label on R2"
+
+ output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto")
+ assert re.match(
+ r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output
+ ), "Failed to see LDP label chunk"
+
+ output = tgen.gears["r2"].vtysh_cmd("show debugging label-table")
+ assert "Proto bgp: [2222/2222]" in output, "Failed to see BGP label chunk"
+
+
+def test_vpn_label_export_auto():
+ "Test that setting label vpn export auto works"
+
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ tgen.gears["r2"].vtysh_cmd(
+ "conf\n"
+ "router bgp 65002 vrf vrf1\n"
+ "address-family ipv4 unicast\n"
+ "label vpn export auto"
+ )
+
+ step("Check that label vpn export auto is OK")
+ test_func = functools.partial(check_bgp_vpn_prefix, "auto")
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see BGP prefix on R1"
+
+ test_func = functools.partial(check_mpls_ldp_binding)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see LDP binding on R2"
+
+ test_func = functools.partial(check_mpls_table, 16, "LDP")
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see LDP label on R2"
+
+ test_func = functools.partial(check_mpls_table, "auto", "BGP")
+ _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert result is None, "Failed to see BGP label on R2"
+
+ output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto")
+ assert re.match(
+ r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output
+ ), "Failed to see LDP label chunk"
+
+ output = tgen.gears["r2"].vtysh_cmd("show debugging label-table")
+ assert "Proto bgp: " in output, "Failed to see BGP label chunk"
+
+
+def test_vpn_label_export_no_auto():
+ "Test that UNsetting label vpn export auto removes the prefix from R1 table and R2 LDP table"
+
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ output = json.loads(
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 ipv4 unicast 192.0.3.2/32 json")
+ )
+
+ auto_label = output.get("paths")[0].get("remoteLabel", None)
+ assert auto_label is not None, "Failed to fetch prefix label on R1"
+
+ tgen.gears["r2"].vtysh_cmd(
+ "conf\n"
+ "router bgp 65002 vrf vrf1\n"
+ "address-family ipv4 unicast\n"
+ "no label vpn export auto"
+ )
+
+ step("Check that no label vpn export auto is OK")
+ test_func = functools.partial(check_bgp_vpn_prefix, 3, rname="r2", rd="102:1")
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Unexpected BGP prefix on R2"
+
+ test_func = functools.partial(check_mpls_ldp_binding)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see LDP binding on R2"
+
+ test_func = functools.partial(check_mpls_table, 16, "LDP")
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see LDP label on R2"
+
+ test_func = functools.partial(check_mpls_table, auto_label, None)
+ _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert result is None, "Unexpected BGP label on R2"
+
+ output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto")
+ assert re.match(
+ r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output
+ ), "Failed to see LDP label chunk"
+
+ output = tgen.gears["r2"].vtysh_cmd("show debugging label-table")
+ assert "Proto bgp: " not in output, "Unexpected BGP label chunk"
+
+
+def test_vpn_label_export_auto_back():
+ "Test that setting back label vpn export auto works"
+
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ output = json.loads(
+ tgen.gears["r2"].vtysh_cmd("show bgp vrf vrf1 ipv4 unicast 192.0.3.2/32 json")
+ )
+
+ tgen.gears["r2"].vtysh_cmd(
+ "conf\n"
+ "router bgp 65002 vrf vrf1\n"
+ "address-family ipv4 unicast\n"
+ "label vpn export auto"
+ )
+
+ step("Check that label vpn export auto is OK")
+ test_func = functools.partial(check_bgp_vpn_prefix, "auto")
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see BGP prefix on R1"
+
+ test_func = functools.partial(check_mpls_ldp_binding)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see LDP binding on R2"
+
+ test_func = functools.partial(check_mpls_table, 16, "LDP")
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see LDP label on R2"
+
+ test_func = functools.partial(check_mpls_table, "auto", "BGP")
+ _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert result is None, "Failed to see BGP label on R2"
+
+ output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto")
+ assert re.match(
+ r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output
+ ), "Failed to see LDP label chunk"
+
+ output = tgen.gears["r2"].vtysh_cmd("show debugging label-table")
+ assert "Proto bgp: " in output, "Failed to see BGP label chunk"
+
+
+def test_vpn_label_export_manual_from_auto():
+ "Test that setting a manual label value from the BGP chunk range works"
+
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ output = json.loads(
+ tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 ipv4 unicast 192.0.3.2/32 json")
+ )
+
+ auto_label = output.get("paths")[0].get("remoteLabel", None)
+ assert auto_label is not None, "Failed to fetch prefix label on R1"
+
+ auto_label = auto_label + 1
+
+ tgen.gears["r2"].vtysh_cmd(
+ "conf\n"
+ "router bgp 65002 vrf vrf1\n"
+ "address-family ipv4 unicast\n"
+ "label vpn export {}".format(auto_label)
+ )
+
+ step("Check that label vpn export {} is OK".format(auto_label))
+ test_func = functools.partial(
+ check_bgp_vpn_prefix, auto_label, rname="r2", rd="102:1"
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see BGP prefix on R2"
+
+ test_func = functools.partial(check_mpls_ldp_binding)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see LDP binding on R2"
+
+ test_func = functools.partial(check_mpls_table, 16, "LDP")
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see LDP label on R2"
+
+ test_func = functools.partial(check_mpls_table, auto_label, "BGP")
+ _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert result is None, "Failed to see BGP label on R2"
+
+ output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto")
+ assert re.match(
+ r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output
+ ), "Failed to see LDP label chunk"
+
+ output = tgen.gears["r2"].vtysh_cmd("show debugging label-table")
+ assert "Proto bgp: " in output, "Failed to see BGP label chunk"
+
+
+def test_vpn_label_configure_dynamic_range():
+ "Test that if a dynamic range is configured, then the next dynamic allocations will be done in that block"
+
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ tgen.gears["r2"].vtysh_cmd("conf\n" "mpls label dynamic-block 500 1000\n")
+ tgen.gears["r2"].vtysh_cmd(
+ "conf\n"
+ "router bgp 65002 vrf vrf1\n"
+ "address-family ipv4 unicast\n"
+ "label vpn export auto"
+ )
+ step("Check that label vpn export auto starting at 500 is OK")
+ test_func = functools.partial(check_bgp_vpn_prefix, 500, rname="r2", rd="102:1")
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Unexpected BGP prefix on R2"
+
+ test_func = functools.partial(check_mpls_table, 500, "BGP")
+ _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert result is None, "Unexpected BGP label on R2"
+
+ output = tgen.gears["r2"].vtysh_cmd("show debugging label-table")
+ assert "Proto bgp: " in output, "Failed to see BGP label chunk"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
+
+
+def test_vpn_label_restart_ldp():
+ "Test that if a dynamic range is configured, then when LDP restarts, it follows the new dynamic range"
+
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router_list = tgen.routers()
+
+ step("Kill LDP on R2")
+ kill_router_daemons(tgen, "r2", ["ldpd"])
+
+ output = tgen.gears["r2"].vtysh_cmd("show debugging label-table")
+ assert "Proto ldp: " not in output, "Unexpected LDP label chunk"
+
+ step("Bring up LDP on R2")
+
+ start_router_daemons(tgen, "r2", ["ldpd"])
+
+ test_func = functools.partial(check_mpls_table, 628, "LDP")
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see LDP label on R2"
+
+ output = tgen.gears["r2"].vtysh_cmd("show debugging label-table")
+ assert "Proto ldp: [628/691]" in output, "Failed to see LDP label chunk [628/691]"
+ assert "Proto ldp: [692/755]" in output, "Failed to see LDP label chunk [692/755]"
+
+
+def test_vpn_label_unconfigure_dynamic_range():
+ "Test that if the dynamic range is unconfigured, then the next dynamic allocations will be done at the first free place."
+
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ tgen.gears["r2"].vtysh_cmd("conf\n" "no mpls label dynamic-block 500 1000\n")
+ step("Check that unconfiguring label vpn export auto will remove BGP label chunk")
+ tgen.gears["r2"].vtysh_cmd(
+ "conf\n"
+ "router bgp 65002 vrf vrf1\n"
+ "address-family ipv4 unicast\n"
+ "no label vpn export auto"
+ )
+
+ output = tgen.gears["r2"].vtysh_cmd("show debugging label-table")
+ assert "Proto bgp: " not in output, "Unexpected BGP label chunk"
+
+ tgen.gears["r2"].vtysh_cmd(
+ "conf\n"
+ "router bgp 65002 vrf vrf1\n"
+ "address-family ipv4 unicast\n"
+ "label vpn export auto"
+ )
+ step("Check that label vpn export auto starting at 16 is OK")
+ test_func = functools.partial(check_bgp_vpn_prefix, 16, rname="r2", rd="102:1")
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Unexpected BGP prefix on R2"
+
+ test_func = functools.partial(check_mpls_table, 16, "BGP")
+ _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert result is None, "Unexpected BGP label on R2"
+
+ output = tgen.gears["r2"].vtysh_cmd("show debugging label-table")
+ assert "Proto bgp: " in output, "Failed to see BGP label chunk"
diff --git a/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json b/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json
index d35e4ef463..17b9accb4a 100644
--- a/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json
+++ b/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json
@@ -2,5 +2,5 @@
"ledger":0,
"inUse":0,
"requests":0,
- "labelChunks":1
+ "labelChunks":0
}
diff --git a/tests/topotests/bgp_route_server_client/r1/bgpd.conf b/tests/topotests/bgp_route_server_client/r1/bgpd.conf
index 9826b671f9..e464e6c50b 100644
--- a/tests/topotests/bgp_route_server_client/r1/bgpd.conf
+++ b/tests/topotests/bgp_route_server_client/r1/bgpd.conf
@@ -2,6 +2,7 @@
router bgp 65001
bgp router-id 10.10.10.1
no bgp ebgp-requires-policy
+ no bgp enforce-first-as
neighbor 2001:db8:1::1 remote-as external
neighbor 2001:db8:1::1 timers 3 10
neighbor 2001:db8:1::1 timers connect 5
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r1/vpnv4_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r1/vpnv4_rib.json
index 3cc2fddcfa..7a4e0d7452 100644
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r1/vpnv4_rib.json
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r1/vpnv4_rib.json
@@ -1,7 +1,6 @@
{
"vrfId": 0,
"vrfName": "default",
- "tableVersion": 2,
"routerId": "1.1.1.1",
"defaultLocPrf": 100,
"localAS": 1,
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r2/vpnv4_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r2/vpnv4_rib.json
index 95570541c8..0dcdec678f 100644
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r2/vpnv4_rib.json
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r2/vpnv4_rib.json
@@ -1,7 +1,6 @@
{
"vrfId": 0,
"vrfName": "default",
- "tableVersion": 2,
"routerId": "2.2.2.2",
"defaultLocPrf": 100,
"localAS": 2,
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib.json
index 3cc2fddcfa..7a4e0d7452 100644
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib.json
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib.json
@@ -1,7 +1,6 @@
{
"vrfId": 0,
"vrfName": "default",
- "tableVersion": 2,
"routerId": "1.1.1.1",
"defaultLocPrf": 100,
"localAS": 1,
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_disabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_disabled.json
index eb3433301b..205079574c 100644
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_disabled.json
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_disabled.json
@@ -1,7 +1,6 @@
{
"vrfId": 0,
"vrfName": "default",
- "tableVersion": 4,
"routerId": "1.1.1.1",
"defaultLocPrf": 100,
"localAS": 1,
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_reenabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_reenabled.json
index 5517fc738a..7a4e0d7452 100644
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_reenabled.json
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_reenabled.json
@@ -1,7 +1,6 @@
{
"vrfId": 0,
"vrfName": "default",
- "tableVersion": 6,
"routerId": "1.1.1.1",
"defaultLocPrf": 100,
"localAS": 1,
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib.json
index 25b7a8616f..0fdd3d6dc0 100644
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib.json
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib.json
@@ -1,7 +1,6 @@
{
"vrfId": 0,
"vrfName": "default",
- "tableVersion": 2,
"routerId": "1.1.1.1",
"defaultLocPrf": 100,
"localAS": 1,
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_disabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_disabled.json
index a1f21585d7..e289df1d44 100644
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_disabled.json
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_disabled.json
@@ -1,7 +1,6 @@
{
"vrfId": 0,
"vrfName": "default",
- "tableVersion": 4,
"routerId": "1.1.1.1",
"defaultLocPrf": 100,
"localAS": 1,
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_reenabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_reenabled.json
index 7eeccd1496..0fdd3d6dc0 100644
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_reenabled.json
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_reenabled.json
@@ -1,7 +1,6 @@
{
"vrfId": 0,
"vrfName": "default",
- "tableVersion": 6,
"routerId": "1.1.1.1",
"defaultLocPrf": 100,
"localAS": 1,
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib.json
index 95570541c8..0dcdec678f 100644
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib.json
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib.json
@@ -1,7 +1,6 @@
{
"vrfId": 0,
"vrfName": "default",
- "tableVersion": 2,
"routerId": "2.2.2.2",
"defaultLocPrf": 100,
"localAS": 2,
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_disabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_disabled.json
index d801671fdc..a440ab4248 100644
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_disabled.json
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_disabled.json
@@ -1,7 +1,6 @@
{
"vrfId": 0,
"vrfName": "default",
- "tableVersion": 4,
"routerId": "2.2.2.2",
"defaultLocPrf": 100,
"localAS": 2,
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_reenabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_reenabled.json
index 25da05b0d4..0dcdec678f 100644
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_reenabled.json
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_reenabled.json
@@ -1,7 +1,6 @@
{
"vrfId": 0,
"vrfName": "default",
- "tableVersion": 6,
"routerId": "2.2.2.2",
"defaultLocPrf": 100,
"localAS": 2,
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib.json
index 2cd47b9ce5..03bbcc008d 100644
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib.json
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib.json
@@ -1,7 +1,6 @@
{
"vrfId": 0,
"vrfName": "default",
- "tableVersion": 2,
"routerId": "2.2.2.2",
"defaultLocPrf": 100,
"localAS": 2,
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_disabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_disabled.json
index f390ef69b1..5c70cf6450 100644
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_disabled.json
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_disabled.json
@@ -1,7 +1,6 @@
{
"vrfId": 0,
"vrfName": "default",
- "tableVersion": 4,
"routerId": "2.2.2.2",
"defaultLocPrf": 100,
"localAS": 2,
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_reenabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_reenabled.json
index 3353d75eda..03bbcc008d 100644
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_reenabled.json
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_reenabled.json
@@ -1,7 +1,6 @@
{
"vrfId": 0,
"vrfName": "default",
- "tableVersion": 6,
"routerId": "2.2.2.2",
"defaultLocPrf": 100,
"localAS": 2,
diff --git a/tests/topotests/bgp_vpn_5549_route_map/test_bgp_vpn_5549_route_map.py b/tests/topotests/bgp_vpn_5549_route_map/test_bgp_vpn_5549_route_map.py
index 84ef603d6e..eb29875d50 100644
--- a/tests/topotests/bgp_vpn_5549_route_map/test_bgp_vpn_5549_route_map.py
+++ b/tests/topotests/bgp_vpn_5549_route_map/test_bgp_vpn_5549_route_map.py
@@ -144,7 +144,7 @@ def test_bgp_vpn_5549():
"2001:db8:1::1": {
"valid": True,
"complete": True,
- "igpMetric": 20,
+ "igpMetric": 10,
"pathCount": 2,
"peer": "2001:db8:1::1",
"nexthops": [{"interfaceName": "pe2-eth0"}],
diff --git a/tests/topotests/bgp_vpnv4_asbr/r1/bgpd.conf b/tests/topotests/bgp_vpnv4_asbr/r1/bgpd.conf
index 3bbcc20e9e..473e56b32a 100644
--- a/tests/topotests/bgp_vpnv4_asbr/r1/bgpd.conf
+++ b/tests/topotests/bgp_vpnv4_asbr/r1/bgpd.conf
@@ -1,6 +1,7 @@
router bgp 65500
bgp router-id 192.0.2.1
no bgp ebgp-requires-policy
+ no bgp enforce-first-as
neighbor 192.0.2.100 remote-as 65500
neighbor 192.0.2.100 update-source lo
neighbor 192.168.0.100 remote-as 65500
diff --git a/tests/topotests/bgp_vpnv4_asbr/r2/bgpd.conf b/tests/topotests/bgp_vpnv4_asbr/r2/bgpd.conf
index 4c84d52bd9..c7244c0e1f 100644
--- a/tests/topotests/bgp_vpnv4_asbr/r2/bgpd.conf
+++ b/tests/topotests/bgp_vpnv4_asbr/r2/bgpd.conf
@@ -1,9 +1,10 @@
-debug bgp nht
-debug bgp zebra
-debug bgp labelpool
+!debug bgp nht
+!debug bgp zebra
+!debug bgp labelpool
router bgp 65500
bgp router-id 192.0.2.2
no bgp ebgp-requires-policy
+ no bgp enforce-first-as
neighbor 192.0.2.100 remote-as 65500
neighbor 192.0.2.100 update-source lo
neighbor 192.168.0.100 remote-as 65500
diff --git a/tests/topotests/bgp_vpnv4_asbr/r3/bgpd.conf b/tests/topotests/bgp_vpnv4_asbr/r3/bgpd.conf
index c5d5727fba..b7592e444d 100644
--- a/tests/topotests/bgp_vpnv4_asbr/r3/bgpd.conf
+++ b/tests/topotests/bgp_vpnv4_asbr/r3/bgpd.conf
@@ -1,6 +1,7 @@
router bgp 65501
bgp router-id 192.0.2.3
no bgp ebgp-requires-policy
+ no bgp enforce-first-as
neighbor 192.168.1.200 remote-as 65502
address-family ipv4 unicast
no neighbor 192.168.1.200 activate
diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py b/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py
index ce278ed7a7..d4c355a44a 100644
--- a/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py
+++ b/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py
@@ -151,6 +151,16 @@ def teardown_module(_mod):
tgen.stop_topology()
+def check_bgp_vpnv4_prefix_presence(router, prefix):
+ "Check the presence of a prefix"
+ tgen = get_topogen()
+
+ dump = router.vtysh_cmd("show bgp ipv4 vpn {} json".format(prefix), isjson=True)
+ if not dump:
+ return "{}, prefix ipv4 vpn {} is not installed yet".format(router.name, prefix)
+ return None
+
+
def bgp_vpnv4_table_check(router, group, label_list=None, label_value_expected=None):
"""
Dump and check that vpnv4 entries have the same MPLS label value
@@ -163,6 +173,12 @@ def bgp_vpnv4_table_check(router, group, label_list=None, label_value_expected=N
stored_label_inited = False
for prefix in group:
+ test_func = functools.partial(check_bgp_vpnv4_prefix_presence, router, prefix)
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "{}, prefix ipv4 vpn {} is not installed yet".format(
+ router.name, prefix
+ )
+
dump = router.vtysh_cmd("show bgp ipv4 vpn {} json".format(prefix), isjson=True)
assert dump, "{0}, {1}, route distinguisher not present".format(
router.name, prefix
diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/test_bgp_vpnv6_per_nexthop_label.py b/tests/topotests/bgp_vpnv6_per_nexthop_label/test_bgp_vpnv6_per_nexthop_label.py
index e936ccc1e4..3d5f8f643b 100644
--- a/tests/topotests/bgp_vpnv6_per_nexthop_label/test_bgp_vpnv6_per_nexthop_label.py
+++ b/tests/topotests/bgp_vpnv6_per_nexthop_label/test_bgp_vpnv6_per_nexthop_label.py
@@ -54,7 +54,7 @@ pytestmark = [pytest.mark.bgpd]
PREFIXES_R11 = ["172:31::11/128", "172:31::20/128", "172:31::111/128"]
PREFIXES_R12 = ["172:31::12/128", "172:31::15/128"]
PREFIXES_REDIST_R14 = ["172:31::14/128"]
-PREFIXES_CONNECTED = ["192:168::255/112", "192:2::/64"]
+PREFIXES_CONNECTED = ["192:168::255:0/112", "192:2::/64"]
def build_topo(tgen):
@@ -150,6 +150,16 @@ def teardown_module(_mod):
tgen.stop_topology()
+def check_bgp_vpnv6_prefix_presence(router, prefix):
+ "Check the presence of a prefix"
+ tgen = get_topogen()
+
+ dump = router.vtysh_cmd("show bgp ipv6 vpn {} json".format(prefix), isjson=True)
+ if not dump:
+ return "{}, prefix ipv6 vpn {} is not installed yet".format(router.name, prefix)
+ return None
+
+
def bgp_vpnv6_table_check(router, group, label_list=None, label_value_expected=None):
"""
Dump and check that vpnv6 entries have the same MPLS label value
@@ -162,6 +172,12 @@ def bgp_vpnv6_table_check(router, group, label_list=None, label_value_expected=N
stored_label_inited = False
for prefix in group:
+ test_func = functools.partial(check_bgp_vpnv6_prefix_presence, router, prefix)
+ success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assert success, "{}, prefix ipv6 vpn {} is not installed yet".format(
+ router.name, prefix
+ )
+
dump = router.vtysh_cmd("show bgp ipv6 vpn {} json".format(prefix), isjson=True)
for rd, pathes in dump.items():
for path in pathes["paths"]:
@@ -237,7 +253,9 @@ def check_show_mpls_table(router, blacklist=None, label_list=None, whitelist=Non
label_list.add(in_label)
for nh in label_info["nexthops"]:
if "installed" not in nh.keys():
- return "{} {} is not installed yet on {}".format(in_label, label_info, router.name)
+ return "{} {} is not installed yet on {}".format(
+ in_label, label_info, router.name
+ )
if nh["installed"] != True or nh["type"] != "BGP":
return "{}, show mpls table, nexthop is not installed".format(
router.name
diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py
index 8491314e16..3eb808ac4f 100644
--- a/tests/topotests/lib/topotest.py
+++ b/tests/topotests/lib/topotest.py
@@ -1383,6 +1383,7 @@ class Router(Node):
)
self.perf_daemons = {}
+ self.valgrind_gdb_daemons = {}
# If this topology is using old API and doesn't have logdir
# specified, then attempt to generate an unique logdir.
@@ -1880,6 +1881,19 @@ class Router(Node):
# do not since apparently presence of the pidfile impacts BGP GR
self.cmd_status("rm -f {0}.pid {0}.vty".format(runbase))
+ def do_gdb():
+ return (
+ (gdb_routers or gdb_daemons)
+ and (
+ not gdb_routers
+ or self.name in gdb_routers
+ or "all" in gdb_routers
+ )
+ and (
+ not gdb_daemons or daemon in gdb_daemons or "all" in gdb_daemons
+ )
+ )
+
rediropt = " > {0}.out 2> {0}.err".format(daemon)
if daemon == "snmpd":
binary = "/usr/sbin/snmpd"
@@ -1915,13 +1929,21 @@ class Router(Node):
supp_file = os.path.abspath(
os.path.join(this_dir, "../../../tools/valgrind.supp")
)
- cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(
- daemon, self.logdir, self.name, supp_file
+
+ valgrind_logbase = f"{self.logdir}/{self.name}.valgrind.{daemon}"
+ if do_gdb():
+ cmdenv += " exec"
+ cmdenv += (
+ " /usr/bin/valgrind --num-callers=50"
+ f" --log-file={valgrind_logbase}.%p"
+ f" --leak-check=full --suppressions={supp_file}"
)
if valgrind_extra:
cmdenv += (
" --gen-suppressions=all --expensive-definedness-checks=yes"
)
+ if do_gdb():
+ cmdenv += " --vgdb-error=0"
elif daemon in strace_daemons or "all" in strace_daemons:
cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(
daemon, self.logdir, self.name
@@ -1941,13 +1963,8 @@ class Router(Node):
cmdopt += " " + extra_opts
if (
- (not gdb_use_emacs or Router.gdb_emacs_router)
- and (gdb_routers or gdb_daemons)
- and (
- not gdb_routers or self.name in gdb_routers or "all" in gdb_routers
- )
- and (not gdb_daemons or daemon in gdb_daemons or "all" in gdb_daemons)
- ):
+ not gdb_use_emacs or Router.gdb_emacs_router or valgrind_memleaks
+ ) and do_gdb():
if Router.gdb_emacs_router is not None:
logger.warning(
"--gdb-use-emacs can only run a single router and daemon, using"
@@ -1963,20 +1980,69 @@ class Router(Node):
gdbcmd += " -ex 'set breakpoint pending on'"
for bp in gdb_breakpoints:
gdbcmd += " -ex 'b {}'".format(bp)
- gdbcmd += " -ex 'run {}'".format(cmdopt)
- self.run_in_window(gdbcmd, daemon)
- logger.info(
- "%s: %s %s launched in gdb window", self, self.routertype, daemon
- )
- elif (
- gdb_use_emacs
- and (daemon in gdb_daemons)
- and (not gdb_routers or self.name in gdb_routers)
- ):
+ if not valgrind_memleaks:
+ gdbcmd += " -ex 'run {}'".format(cmdopt)
+ self.run_in_window(gdbcmd, daemon)
+
+ logger.info(
+ "%s: %s %s launched in gdb window",
+ self,
+ self.routertype,
+ daemon,
+ )
+
+ else:
+ cmd = " ".join([cmdenv, binary, cmdopt])
+ p = self.popen(cmd)
+ self.valgrind_gdb_daemons[daemon] = p
+ if p.poll() and p.returncode:
+ self.logger.error(
+ '%s: Failed to launch "%s" (%s) with perf using: %s',
+ self,
+ daemon,
+ p.returncode,
+ cmd,
+ )
+ assert False, "Faled to launch valgrind with gdb"
+ logger.debug(
+ "%s: %s %s started with perf", self, self.routertype, daemon
+ )
+ # Now read the erorr log file until we ae given launch priority
+ timeout = Timeout(30)
+ vpid = None
+ for remaining in timeout:
+ try:
+ fname = f"{valgrind_logbase}.{p.pid}"
+ logging.info("Checking %s for valgrind launch info", fname)
+ o = open(fname, encoding="ascii").read()
+ except FileNotFoundError:
+ logging.info("%s not present yet", fname)
+ else:
+ m = re.search(r"target remote \| (.*vgdb) --pid=(\d+)", o)
+ if m:
+ vgdb_cmd = m.group(0)
+ break
+ time.sleep(1)
+ else:
+ assert False, "Faled to get launch info for valgrind with gdb"
+
+ gdbcmd += f" -ex '{vgdb_cmd}'"
+ gdbcmd += " -ex 'c'"
+ self.run_in_window(gdbcmd, daemon)
+
+ logger.info(
+ "%s: %s %s launched in gdb window",
+ self,
+ self.routertype,
+ daemon,
+ )
+ elif gdb_use_emacs and do_gdb():
assert Router.gdb_emacs_router is None
Router.gdb_emacs_router = self
+ assert not valgrind_memleaks, "vagrind gdb in emacs not supported yet"
+
if daemon == "snmpd":
cmdopt += " -f "
cmdopt += rediropt
@@ -2033,7 +2099,7 @@ class Router(Node):
f'(gud-gdb-run-command-fetch-lines "br {bp}" "*gud-gdb*")',
]
)
- # gdb run cmd
+
self.cmd_raises(
ecbin
+ [
diff --git a/tests/topotests/ospf6_gr_topo1/rt1/show_ipv6_route.json b/tests/topotests/ospf6_gr_topo1/rt1/show_ipv6_route.json
index 66ee57ce84..181d376774 100644
--- a/tests/topotests/ospf6_gr_topo1/rt1/show_ipv6_route.json
+++ b/tests/topotests/ospf6_gr_topo1/rt1/show_ipv6_route.json
@@ -6,7 +6,7 @@
"vrfId":0,
"vrfName":"default",
"distance":110,
- "metric":10,
+ "metric":0,
"nexthops":[
{
"directlyConnected":true,
@@ -25,7 +25,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":20,
+ "metric":10,
"installed":true,
"nexthops":[
{
@@ -45,7 +45,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":30,
+ "metric":20,
"installed":true,
"nexthops":[
{
@@ -65,7 +65,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":40,
+ "metric":30,
"installed":true,
"nexthops":[
{
@@ -85,7 +85,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":50,
+ "metric":40,
"installed":true,
"nexthops":[
{
@@ -105,7 +105,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":40,
+ "metric":30,
"installed":true,
"nexthops":[
{
@@ -125,7 +125,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":50,
+ "metric":40,
"installed":true,
"nexthops":[
{
diff --git a/tests/topotests/ospf6_gr_topo1/rt2/show_ipv6_route.json b/tests/topotests/ospf6_gr_topo1/rt2/show_ipv6_route.json
index 624ff709e3..13b5cd4468 100644
--- a/tests/topotests/ospf6_gr_topo1/rt2/show_ipv6_route.json
+++ b/tests/topotests/ospf6_gr_topo1/rt2/show_ipv6_route.json
@@ -8,7 +8,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":20,
+ "metric":10,
"installed":true,
"nexthops":[
{
@@ -26,7 +26,7 @@
"vrfId":0,
"vrfName":"default",
"distance":110,
- "metric":10,
+ "metric":0,
"nexthops":[
{
"directlyConnected":true,
@@ -45,7 +45,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":20,
+ "metric":10,
"installed":true,
"nexthops":[
{
@@ -65,7 +65,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":30,
+ "metric":20,
"installed":true,
"nexthops":[
{
@@ -85,7 +85,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":40,
+ "metric":30,
"installed":true,
"nexthops":[
{
@@ -105,7 +105,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":30,
+ "metric":20,
"installed":true,
"nexthops":[
{
@@ -125,7 +125,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":40,
+ "metric":30,
"installed":true,
"nexthops":[
{
diff --git a/tests/topotests/ospf6_gr_topo1/rt3/show_ipv6_route.json b/tests/topotests/ospf6_gr_topo1/rt3/show_ipv6_route.json
index f9b43dcdb9..db6ec3e3a8 100644
--- a/tests/topotests/ospf6_gr_topo1/rt3/show_ipv6_route.json
+++ b/tests/topotests/ospf6_gr_topo1/rt3/show_ipv6_route.json
@@ -8,7 +8,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":30,
+ "metric":20,
"installed":true,
"nexthops":[
{
@@ -28,7 +28,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":20,
+ "metric":10,
"installed":true,
"nexthops":[
{
@@ -46,7 +46,7 @@
"vrfId":0,
"vrfName":"default",
"distance":110,
- "metric":10,
+ "metric":0,
"nexthops":[
{
"directlyConnected":true,
@@ -65,7 +65,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":20,
+ "metric":10,
"installed":true,
"nexthops":[
{
@@ -85,7 +85,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":30,
+ "metric":20,
"installed":true,
"nexthops":[
{
@@ -105,7 +105,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":20,
+ "metric":10,
"installed":true,
"nexthops":[
{
@@ -125,7 +125,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":30,
+ "metric":20,
"installed":true,
"nexthops":[
{
diff --git a/tests/topotests/ospf6_gr_topo1/rt4/show_ipv6_route.json b/tests/topotests/ospf6_gr_topo1/rt4/show_ipv6_route.json
index f5212da4f6..08ccff2fc5 100644
--- a/tests/topotests/ospf6_gr_topo1/rt4/show_ipv6_route.json
+++ b/tests/topotests/ospf6_gr_topo1/rt4/show_ipv6_route.json
@@ -8,7 +8,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":40,
+ "metric":30,
"installed":true,
"nexthops":[
{
@@ -28,7 +28,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":30,
+ "metric":20,
"installed":true,
"nexthops":[
{
@@ -48,7 +48,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":20,
+ "metric":10,
"installed":true,
"nexthops":[
{
@@ -66,7 +66,7 @@
"vrfId":0,
"vrfName":"default",
"distance":110,
- "metric":10,
+ "metric":0,
"nexthops":[
{
"directlyConnected":true,
@@ -85,7 +85,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":20,
+ "metric":10,
"installed":true,
"nexthops":[
{
@@ -105,7 +105,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":30,
+ "metric":20,
"installed":true,
"nexthops":[
{
@@ -125,7 +125,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":40,
+ "metric":30,
"installed":true,
"nexthops":[
{
diff --git a/tests/topotests/ospf6_gr_topo1/rt5/show_ipv6_route.json b/tests/topotests/ospf6_gr_topo1/rt5/show_ipv6_route.json
index 5ea4f699fe..8ddd55b132 100644
--- a/tests/topotests/ospf6_gr_topo1/rt5/show_ipv6_route.json
+++ b/tests/topotests/ospf6_gr_topo1/rt5/show_ipv6_route.json
@@ -8,7 +8,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":50,
+ "metric":40,
"installed":true,
"nexthops":[
{
@@ -28,7 +28,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":40,
+ "metric":30,
"installed":true,
"nexthops":[
{
@@ -48,7 +48,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":30,
+ "metric":20,
"installed":true,
"nexthops":[
{
@@ -68,7 +68,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":20,
+ "metric":10,
"installed":true,
"nexthops":[
{
@@ -86,7 +86,7 @@
"vrfId":0,
"vrfName":"default",
"distance":110,
- "metric":10,
+ "metric":0,
"nexthops":[
{
"directlyConnected":true,
@@ -105,7 +105,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":40,
+ "metric":30,
"installed":true,
"nexthops":[
{
@@ -125,7 +125,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":50,
+ "metric":40,
"installed":true,
"nexthops":[
{
diff --git a/tests/topotests/ospf6_gr_topo1/rt6/show_ipv6_route.json b/tests/topotests/ospf6_gr_topo1/rt6/show_ipv6_route.json
index 862f1baffb..9d45b09be8 100644
--- a/tests/topotests/ospf6_gr_topo1/rt6/show_ipv6_route.json
+++ b/tests/topotests/ospf6_gr_topo1/rt6/show_ipv6_route.json
@@ -8,7 +8,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":40,
+ "metric":30,
"installed":true,
"nexthops":[
{
@@ -28,7 +28,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":30,
+ "metric":20,
"installed":true,
"nexthops":[
{
@@ -48,7 +48,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":20,
+ "metric":10,
"installed":true,
"nexthops":[
{
@@ -68,7 +68,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":30,
+ "metric":20,
"installed":true,
"nexthops":[
{
@@ -88,7 +88,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":40,
+ "metric":30,
"installed":true,
"nexthops":[
{
@@ -106,7 +106,7 @@
"vrfId":0,
"vrfName":"default",
"distance":110,
- "metric":10,
+ "metric":0,
"nexthops":[
{
"directlyConnected":true,
@@ -125,7 +125,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":20,
+ "metric":10,
"installed":true,
"nexthops":[
{
diff --git a/tests/topotests/ospf6_gr_topo1/rt7/show_ipv6_route.json b/tests/topotests/ospf6_gr_topo1/rt7/show_ipv6_route.json
index f5f8f710e5..c4f841468d 100644
--- a/tests/topotests/ospf6_gr_topo1/rt7/show_ipv6_route.json
+++ b/tests/topotests/ospf6_gr_topo1/rt7/show_ipv6_route.json
@@ -8,7 +8,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":50,
+ "metric":40,
"installed":true,
"nexthops":[
{
@@ -28,7 +28,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":40,
+ "metric":30,
"installed":true,
"nexthops":[
{
@@ -48,7 +48,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":30,
+ "metric":20,
"installed":true,
"nexthops":[
{
@@ -68,7 +68,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":40,
+ "metric":30,
"installed":true,
"nexthops":[
{
@@ -88,7 +88,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":50,
+ "metric":40,
"installed":true,
"nexthops":[
{
@@ -108,7 +108,7 @@
"selected":true,
"destSelected":true,
"distance":110,
- "metric":20,
+ "metric":10,
"installed":true,
"nexthops":[
{
@@ -126,7 +126,7 @@
"vrfId":0,
"vrfName":"default",
"distance":110,
- "metric":10,
+ "metric":0,
"nexthops":[
{
"directlyConnected":true,
diff --git a/tests/topotests/ospf6_loopback_cost/__init__.py b/tests/topotests/ospf6_loopback_cost/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/ospf6_loopback_cost/__init__.py
diff --git a/tests/topotests/ospf6_loopback_cost/r1/frr.conf b/tests/topotests/ospf6_loopback_cost/r1/frr.conf
new file mode 100644
index 0000000000..d85166bc6c
--- /dev/null
+++ b/tests/topotests/ospf6_loopback_cost/r1/frr.conf
@@ -0,0 +1,16 @@
+!
+int lo
+ ipv6 address 2001:db8::1/128
+ ipv6 ospf6 area 0.0.0.0
+ ipv6 ospf6 passive
+!
+int r1-eth0
+ ipv6 address 2001:db8:1::1/64
+ ipv6 ospf6 area 0.0.0.0
+ ipv6 ospf6 hello-interval 1
+ ipv6 ospf6 dead-interval 4
+!
+router ospf6
+ ospf6 router-id 0.0.0.1
+exit
+!
diff --git a/tests/topotests/ospf6_loopback_cost/r2/frr.conf b/tests/topotests/ospf6_loopback_cost/r2/frr.conf
new file mode 100644
index 0000000000..8f3e2caab6
--- /dev/null
+++ b/tests/topotests/ospf6_loopback_cost/r2/frr.conf
@@ -0,0 +1,16 @@
+!
+int lo
+ ipv6 address 2001:db8::2/128
+ ipv6 ospf6 area 0.0.0.0
+ ipv6 ospf6 passive
+!
+int r2-eth0
+ ipv6 address 2001:db8:1::2/64
+ ipv6 ospf6 area 0.0.0.0
+ ipv6 ospf6 hello-interval 1
+ ipv6 ospf6 dead-interval 4
+!
+router ospf6
+ ospf6 router-id 0.0.0.2
+exit
+!
diff --git a/tests/topotests/ospf6_loopback_cost/test_ospf6_loopback_cost.py b/tests/topotests/ospf6_loopback_cost/test_ospf6_loopback_cost.py
new file mode 100644
index 0000000000..8e7a7ea40a
--- /dev/null
+++ b/tests/topotests/ospf6_loopback_cost/test_ospf6_loopback_cost.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+# Copyright (c) 2023 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+
+"""
+Test if OSPFv3 loopback interfaces get a cost of 0.
+
+https://www.rfc-editor.org/rfc/rfc5340.html#page-37:
+
+If the interface type is point-to-multipoint or the interface is
+in the state Loopback, the global scope IPv6 addresses associated
+with the interface (if any) are copied into the intra-area-prefix-LSA
+with the PrefixOptions LA-bit set, the PrefixLength set to 128, and
+the metric set to 0.
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+pytestmark = pytest.mark.ospf6d
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+
+
+def setup_module(mod):
+ topodef = {"s1": ("r1", "r2")}
+ tgen = Topogen(topodef, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for _, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_ospf6_loopback_cost():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+
+ def _show_ipv6_route():
+ output = json.loads(r1.vtysh_cmd("show ipv6 route json"))
+ expected = {
+ "2001:db8::1/128": [
+ {
+ "metric": 0,
+ "distance": 110,
+ }
+ ],
+ "2001:db8::2/128": [
+ {
+ "metric": 10,
+ "distance": 110,
+ }
+ ],
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(
+ _show_ipv6_route,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assert result is None, "Loopback cost isn't 0"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/zebra/test_lm_plugin.c b/tests/zebra/test_lm_plugin.c
index 9ad0bc4e17..9895c025f0 100644
--- a/tests/zebra/test_lm_plugin.c
+++ b/tests/zebra/test_lm_plugin.c
@@ -48,7 +48,7 @@ static int lm_get_chunk_pi(struct label_manager_chunk **lmc,
uint32_t base, vrf_id_t vrf_id)
{
if (base == 0)
- *lmc = create_label_chunk(10, 55, 0, 1, 50, 50 + size);
+ *lmc = create_label_chunk(10, 55, 0, 1, 50, 50 + size, true);
else
*lmc = assign_label_chunk(10, 55, 0, 1, size, base);
diff --git a/yang/frr-bgp-neighbor.yang b/yang/frr-bgp-neighbor.yang
index 5a4c37974f..b199ab9469 100644
--- a/yang/frr-bgp-neighbor.yang
+++ b/yang/frr-bgp-neighbor.yang
@@ -76,7 +76,7 @@ submodule frr-bgp-neighbor {
leaf enforce-first-as {
type boolean;
- default "false";
+ default "true";
description
"When set to 'true' it will enforce the first AS for EBGP routes.";
}
diff --git a/zebra/interface.c b/zebra/interface.c
index 919cd11bc8..1afd9d5a7d 100644
--- a/zebra/interface.c
+++ b/zebra/interface.c
@@ -3133,7 +3133,7 @@ static void if_dump_vty_json(struct vty *vty, struct interface *ifp,
json_object_string_add(json_if, "lastLinkDown",
zebra_if->down_last);
- zebra_ptm_show_status(vty, json, ifp);
+ zebra_ptm_show_status(vty, json_if, ifp);
json_object_string_add(json_if, "vrfName", ifp->vrf->name);
diff --git a/zebra/label_manager.c b/zebra/label_manager.c
index fa7dbb0a25..8ae6e0cc3f 100644
--- a/zebra/label_manager.c
+++ b/zebra/label_manager.c
@@ -51,10 +51,14 @@ DEFINE_HOOK(lm_get_chunk,
DEFINE_HOOK(lm_release_chunk,
(struct zserv *client, uint32_t start, uint32_t end),
(client, start, end));
+/* show running-config needs an API for dynamic-block */
+DEFINE_HOOK(lm_write_label_block_config,
+ (struct vty *vty, struct zebra_vrf *zvrf),
+ (vty, zvrf));
DEFINE_HOOK(lm_cbs_inited, (), ());
-/* define wrappers to be called in zapi_msg.c (as hooks must be called in
- * source file where they were defined)
+/* define wrappers to be called in zapi_msg.c or zebra_mpls_vty.c (as hooks
+ * must be called in source file where they were defined)
*/
void lm_client_connect_call(struct zserv *client, vrf_id_t vrf_id)
{
@@ -71,6 +75,11 @@ void lm_release_chunk_call(struct zserv *client, uint32_t start, uint32_t end)
hook_call(lm_release_chunk, client, start, end);
}
+int lm_write_label_block_config_call(struct vty *vty, struct zebra_vrf *zvrf)
+{
+ return hook_call(lm_write_label_block_config, vty, zvrf);
+}
+
/* forward declarations of the static functions to be used for some hooks */
static int label_manager_connect(struct zserv *client, vrf_id_t vrf_id);
static int label_manager_disconnect(struct zserv *client);
@@ -80,6 +89,8 @@ static int label_manager_get_chunk(struct label_manager_chunk **lmc,
vrf_id_t vrf_id);
static int label_manager_release_label_chunk(struct zserv *client,
uint32_t start, uint32_t end);
+static int label_manager_write_label_block_config(struct vty *vty,
+ struct zebra_vrf *zvrf);
void delete_label_chunk(void *val)
{
@@ -138,6 +149,8 @@ void lm_hooks_register(void)
hook_register(lm_client_disconnect, label_manager_disconnect);
hook_register(lm_get_chunk, label_manager_get_chunk);
hook_register(lm_release_chunk, label_manager_release_label_chunk);
+ hook_register(lm_write_label_block_config,
+ label_manager_write_label_block_config);
}
void lm_hooks_unregister(void)
{
@@ -145,24 +158,129 @@ void lm_hooks_unregister(void)
hook_unregister(lm_client_disconnect, label_manager_disconnect);
hook_unregister(lm_get_chunk, label_manager_get_chunk);
hook_unregister(lm_release_chunk, label_manager_release_label_chunk);
+ hook_unregister(lm_write_label_block_config,
+ label_manager_write_label_block_config);
}
-DEFPY(show_label_table, show_label_table_cmd, "show debugging label-table",
+static json_object *lmc_json(struct label_manager_chunk *lmc)
+{
+ json_object *json = json_object_new_object();
+
+ json_object_string_add(json, "protocol", zebra_route_string(lmc->proto));
+ json_object_int_add(json, "instance", lmc->instance);
+ json_object_int_add(json, "sessionId", lmc->session_id);
+ json_object_int_add(json, "start", lmc->start);
+ json_object_int_add(json, "end", lmc->end);
+ json_object_boolean_add(json, "dynamic", lmc->is_dynamic);
+ return json;
+}
+
+DEFPY(show_label_table, show_label_table_cmd, "show debugging label-table [json$uj]",
SHOW_STR
DEBUG_STR
- "Display allocated label chunks\n")
+ "Display allocated label chunks\n"
+ JSON_STR)
{
struct label_manager_chunk *lmc;
struct listnode *node;
+ json_object *json_array = NULL, *json_global = NULL, *json_dyn_block;
+
+ if (uj) {
+ json_array = json_object_new_array();
+ json_global = json_object_new_object();
+ json_dyn_block = json_object_new_object();
+ json_object_int_add(json_dyn_block, "lowerBound",
+ lbl_mgr.dynamic_block_start);
+ json_object_int_add(json_dyn_block, "upperBound",
+ lbl_mgr.dynamic_block_end);
+ json_object_object_add(json_global, "dynamicBlock",
+ json_dyn_block);
+ } else
+ vty_out(vty, "Dynamic block: lower-bound %u, upper-bound %u\n",
+ lbl_mgr.dynamic_block_start, lbl_mgr.dynamic_block_end);
for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) {
+ if (uj) {
+ json_object_array_add(json_array, lmc_json(lmc));
+ continue;
+ }
vty_out(vty, "Proto %s: [%u/%u]\n",
zebra_route_string(lmc->proto), lmc->start, lmc->end);
}
+ if (uj) {
+ json_object_object_add(json_global, "chunks", json_array);
+ vty_json(vty, json_global);
+ }
+ return CMD_SUCCESS;
+}
+
+DEFPY(mpls_label_dynamic_block, mpls_label_dynamic_block_cmd,
+ "[no$no] mpls label dynamic-block [(16-1048575)$start (16-1048575)$end]",
+ NO_STR
+ MPLS_STR
+ "Label configuration\n"
+ "Configure dynamic label block\n"
+ "Start label\n"
+ "End label\n")
+{
+ struct listnode *node;
+ struct label_manager_chunk *lmc;
+ /* unset dynamic range */
+ if (no ||
+ (start == MPLS_LABEL_UNRESERVED_MIN && end == MPLS_LABEL_MAX)) {
+ lbl_mgr.dynamic_block_start = MPLS_LABEL_UNRESERVED_MIN;
+ lbl_mgr.dynamic_block_end = MPLS_LABEL_MAX;
+ return CMD_SUCCESS;
+ }
+ if (!start || !end) {
+ vty_out(vty,
+ "%% label dynamic-block, range missing, aborting\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ if (start > end) {
+ vty_out(vty,
+ "%% label dynamic-block, wrong range (%ld > %ld), aborting\n",
+ start, end);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) {
+ if (lmc->proto == NO_PROTO)
+ continue;
+ if (!lmc->is_dynamic && lmc->start >= (uint32_t)start &&
+ lmc->end <= (uint32_t)end) {
+ vty_out(vty,
+ "%% Found a static label chunk [%u-%u] for %s in conflict with the dynamic label block\n",
+ lmc->start, lmc->end,
+ zebra_route_string(lmc->proto));
+ return CMD_WARNING_CONFIG_FAILED;
+ } else if (lmc->is_dynamic && (lmc->end > (uint32_t)end ||
+ lmc->start < (uint32_t)start)) {
+ vty_out(vty,
+ "%% Found a dynamic label chunk [%u-%u] for %s outside the new dynamic label block, consider restart the service\n",
+ lmc->start, lmc->end,
+ zebra_route_string(lmc->proto));
+ }
+ }
+ lbl_mgr.dynamic_block_start = start;
+ lbl_mgr.dynamic_block_end = end;
return CMD_SUCCESS;
}
+static int label_manager_write_label_block_config(struct vty *vty,
+ struct zebra_vrf *zvrf)
+{
+ if (zvrf_id(zvrf) != VRF_DEFAULT)
+ return 0;
+ if (lbl_mgr.dynamic_block_start == MPLS_LABEL_UNRESERVED_MIN &&
+ lbl_mgr.dynamic_block_end == MPLS_LABEL_MAX)
+ return 0;
+ vty_out(vty, "mpls label dynamic-block %u %u\n",
+ lbl_mgr.dynamic_block_start, lbl_mgr.dynamic_block_end);
+ return 1;
+}
+
/**
* Init label manager (or proxy to an external one)
*/
@@ -170,6 +288,8 @@ void label_manager_init(void)
{
lbl_mgr.lc_list = list_new();
lbl_mgr.lc_list->del = delete_label_chunk;
+ lbl_mgr.dynamic_block_start = MPLS_LABEL_UNRESERVED_MIN;
+ lbl_mgr.dynamic_block_end = MPLS_LABEL_MAX;
hook_register(zserv_client_close, lm_client_disconnect_cb);
/* register default hooks for the label manager actions */
@@ -179,12 +299,13 @@ void label_manager_init(void)
hook_call(lm_cbs_inited);
install_element(VIEW_NODE, &show_label_table_cmd);
+ install_element(CONFIG_NODE, &mpls_label_dynamic_block_cmd);
}
/* alloc and fill a label chunk */
struct label_manager_chunk *
create_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id,
- uint8_t keep, uint32_t start, uint32_t end)
+ uint8_t keep, uint32_t start, uint32_t end, bool is_dynamic)
{
/* alloc chunk, fill it and return it */
struct label_manager_chunk *lmc =
@@ -196,6 +317,7 @@ create_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id,
lmc->instance = instance;
lmc->session_id = session_id;
lmc->keep = keep;
+ lmc->is_dynamic = is_dynamic;
return lmc;
}
@@ -223,6 +345,15 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance,
return NULL;
}
+ if ((lbl_mgr.dynamic_block_start != MPLS_LABEL_UNRESERVED_MIN ||
+ lbl_mgr.dynamic_block_end != MPLS_LABEL_MAX) &&
+ base >= lbl_mgr.dynamic_block_start &&
+ end <= lbl_mgr.dynamic_block_end) {
+ zlog_warn("Invalid LM request arguments: base: %u, size: %u for %s in conflict with the dynamic label block",
+ base, size, zebra_route_string(proto));
+ return NULL;
+ }
+
/* Scan the existing chunks to see if the requested range of labels
* falls inside any of such chunks */
for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) {
@@ -254,7 +385,7 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance,
/* insert chunk between existing chunks */
if (insert_node) {
lmc = create_label_chunk(proto, instance, session_id, keep,
- base, end);
+ base, end, false);
listnode_add_before(lbl_mgr.lc_list, insert_node, lmc);
return lmc;
}
@@ -277,7 +408,7 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance,
}
lmc = create_label_chunk(proto, instance, session_id, keep,
- base, end);
+ base, end, false);
if (last_node)
listnode_add_before(lbl_mgr.lc_list, last_node, lmc);
else
@@ -288,7 +419,7 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance,
/* create a new chunk past all the existing ones and link at
* tail */
lmc = create_label_chunk(proto, instance, session_id, keep,
- base, end);
+ base, end, false);
listnode_add(lbl_mgr.lc_list, lmc);
return lmc;
}
@@ -313,9 +444,13 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id,
{
struct label_manager_chunk *lmc;
struct listnode *node;
- uint32_t prev_end = MPLS_LABEL_UNRESERVED_MIN;
+ uint32_t prev_end = lbl_mgr.dynamic_block_start - 1;
+ struct label_manager_chunk *lmc_block_last = NULL;
- /* handle chunks request with a specific base label */
+ /* handle chunks request with a specific base label
+ * - static label requests: BGP hardset value, Pathd
+ * - segment routing label requests
+ */
if (base != MPLS_LABEL_BASE_ANY)
return assign_specific_label_chunk(proto, instance, session_id,
keep, size, base);
@@ -325,37 +460,44 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id,
/* first check if there's one available */
for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) {
- if (lmc->proto == NO_PROTO
- && lmc->end - lmc->start + 1 == size) {
+ if (lmc->start <= prev_end)
+ continue;
+ if (lmc->proto == NO_PROTO &&
+ lmc->end - lmc->start + 1 == size &&
+ lmc->end <= lbl_mgr.dynamic_block_end) {
lmc->proto = proto;
lmc->instance = instance;
lmc->session_id = session_id;
lmc->keep = keep;
+ lmc->is_dynamic = true;
return lmc;
}
/* check if we hadve a "hole" behind us that we can squeeze into
*/
- if ((lmc->start > prev_end) && (lmc->start - prev_end > size)) {
+ if (lmc->start - prev_end > size &&
+ prev_end + 1 + size <= lbl_mgr.dynamic_block_end) {
lmc = create_label_chunk(proto, instance, session_id,
keep, prev_end + 1,
- prev_end + size);
+ prev_end + size, true);
listnode_add_before(lbl_mgr.lc_list, node, lmc);
return lmc;
}
prev_end = lmc->end;
+
+ /* check if we have a chunk that goes over the end block */
+ if (lmc->end > lbl_mgr.dynamic_block_end)
+ continue;
+ lmc_block_last = lmc;
}
/* otherwise create a new one */
uint32_t start_free;
- if (list_isempty(lbl_mgr.lc_list))
- start_free = MPLS_LABEL_UNRESERVED_MIN;
+ if (lmc_block_last == NULL)
+ start_free = lbl_mgr.dynamic_block_start;
else
- start_free = ((struct label_manager_chunk *)listgetdata(
- listtail(lbl_mgr.lc_list)))
- ->end
- + 1;
+ start_free = lmc_block_last->end + 1;
- if (start_free > MPLS_LABEL_UNRESERVED_MAX - size + 1) {
+ if (start_free > lbl_mgr.dynamic_block_end - size + 1) {
flog_err(EC_ZEBRA_LM_EXHAUSTED_LABELS,
"Reached max labels. Start: %u, size: %u", start_free,
size);
@@ -364,7 +506,7 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id,
/* create chunk and link at tail */
lmc = create_label_chunk(proto, instance, session_id, keep, start_free,
- start_free + size - 1);
+ start_free + size - 1, true);
listnode_add(lbl_mgr.lc_list, lmc);
return lmc;
}
diff --git a/zebra/label_manager.h b/zebra/label_manager.h
index 74f40fab23..ab6ad7f639 100644
--- a/zebra/label_manager.h
+++ b/zebra/label_manager.h
@@ -42,6 +42,7 @@ struct label_manager_chunk {
unsigned short instance;
uint32_t session_id;
uint8_t keep;
+ uint8_t is_dynamic; /* Tell if chunk is dynamic or static */
uint32_t start; /* First label of the chunk */
uint32_t end; /* Last label of the chunk */
};
@@ -61,11 +62,14 @@ DECLARE_HOOK(lm_get_chunk,
DECLARE_HOOK(lm_release_chunk,
(struct zserv *client, uint32_t start, uint32_t end),
(client, start, end));
+DECLARE_HOOK(lm_write_label_block_config,
+ (struct vty *vty, struct zebra_vrf *zvrf),
+ (vty, zvrf));
DECLARE_HOOK(lm_cbs_inited, (), ());
-/* declare wrappers to be called in zapi_msg.c (as hooks must be called in
- * source file where they were defined)
+/* declare wrappers to be called in zapi_msg.c or zebra_mpls_vty.c (as hooks
+ * must be called in source file where they were defined)
*/
void lm_client_connect_call(struct zserv *client, vrf_id_t vrf_id);
void lm_get_chunk_call(struct label_manager_chunk **lmc, struct zserv *client,
@@ -73,6 +77,7 @@ void lm_get_chunk_call(struct label_manager_chunk **lmc, struct zserv *client,
vrf_id_t vrf_id);
void lm_release_chunk_call(struct zserv *client, uint32_t start,
uint32_t end);
+int lm_write_label_block_config_call(struct vty *vty, struct zebra_vrf *zvrf);
/* API for an external LM to return responses for requests */
int lm_client_connect_response(uint8_t proto, uint16_t instance,
@@ -82,7 +87,7 @@ int lm_client_connect_response(uint8_t proto, uint16_t instance,
/* convenience function to allocate an lmc to be consumed by the above API */
struct label_manager_chunk *
create_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id,
- uint8_t keep, uint32_t start, uint32_t end);
+ uint8_t keep, uint32_t start, uint32_t end, bool is_dynamic);
void delete_label_chunk(void *val);
/* register/unregister callbacks for hooks */
@@ -95,6 +100,8 @@ void lm_hooks_unregister(void);
*/
struct label_manager {
struct list *lc_list;
+ uint32_t dynamic_block_start;
+ uint32_t dynamic_block_end;
};
void label_manager_init(void);
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index a5dec0458c..ec35842b0a 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -2413,19 +2413,21 @@ ssize_t netlink_route_multipath_msg_encode(int cmd, struct zebra_dplane_ctx *ctx
p, routedesc, bytelen, nexthop,
&req->n, &req->r, datalen, cmd))
return 0;
+
+ /*
+ * Add encapsulation information when
+ * installing via FPM.
+ */
+ if (fpm) {
+ if (!netlink_route_nexthop_encap(&req->n,
+ datalen,
+ nexthop))
+ return 0;
+ }
+
nexthop_num++;
break;
}
-
- /*
- * Add encapsulation information when installing via
- * FPM.
- */
- if (fpm) {
- if (!netlink_route_nexthop_encap(
- &req->n, datalen, nexthop))
- return 0;
- }
}
if (setsrc) {
diff --git a/zebra/zebra_mpls_vty.c b/zebra/zebra_mpls_vty.c
index e64e7009b4..fd09e6b444 100644
--- a/zebra/zebra_mpls_vty.c
+++ b/zebra/zebra_mpls_vty.c
@@ -22,6 +22,7 @@
#include "zebra/zebra_rnh.h"
#include "zebra/redistribute.h"
#include "zebra/zebra_routemap.h"
+#include "zebra/label_manager.h"
static int zebra_mpls_transit_lsp(struct vty *vty, int add_cmd,
const char *inlabel_str, const char *gate_str,
@@ -270,6 +271,8 @@ static int zebra_mpls_config(struct vty *vty)
write += zebra_mpls_write_lsp_config(vty, zvrf);
write += zebra_mpls_write_fec_config(vty, zvrf);
write += zebra_mpls_write_label_block_config(vty, zvrf);
+ write += lm_write_label_block_config_call(vty, zvrf);
+
return write;
}
diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c
index 50a7462d89..5480a94d27 100644
--- a/zebra/zebra_vxlan.c
+++ b/zebra/zebra_vxlan.c
@@ -308,7 +308,7 @@ static void zevpn_print_neigh_hash_all_evpn_detail(struct hash_bucket *bucket,
zevpn = (struct zebra_evpn *)bucket->data;
if (!zevpn) {
if (json)
- vty_out(vty, "{}\n");
+ vty_json_empty(vty, json);
return;
}
num_neigh = hashcount(zevpn->neigh_table);
@@ -515,7 +515,7 @@ static void zevpn_print_mac_hash_all_evpn_detail(struct hash_bucket *bucket,
zevpn = (struct zebra_evpn *)bucket->data;
if (!zevpn) {
if (json)
- vty_out(vty, "{}\n");
+ vty_json_empty(vty, json);
return;
}
wctx->zevpn = zevpn;
@@ -2590,14 +2590,15 @@ void zebra_vxlan_print_specific_rmac_l3vni(struct vty *vty, vni_t l3vni,
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
zl3vni = zl3vni_lookup(l3vni);
if (!zl3vni) {
if (use_json)
- vty_json(vty, json);
+ vty_json_empty(vty, json);
else
vty_out(vty, "%% L3-VNI %u doesn't exist\n", l3vni);
return;
@@ -2631,14 +2632,15 @@ void zebra_vxlan_print_rmacs_l3vni(struct vty *vty, vni_t l3vni, bool use_json)
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
zl3vni = zl3vni_lookup(l3vni);
if (!zl3vni) {
if (use_json)
- vty_json(vty, json);
+ vty_json_empty(vty, json);
else
vty_out(vty, "%% L3-VNI %u does not exist\n", l3vni);
return;
@@ -2672,7 +2674,8 @@ void zebra_vxlan_print_rmacs_all_l3vni(struct vty *vty, bool use_json)
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
@@ -2698,7 +2701,8 @@ void zebra_vxlan_print_specific_nh_l3vni(struct vty *vty, vni_t l3vni,
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
@@ -2709,7 +2713,7 @@ void zebra_vxlan_print_specific_nh_l3vni(struct vty *vty, vni_t l3vni,
zl3vni = zl3vni_lookup(l3vni);
if (!zl3vni) {
if (use_json)
- vty_out(vty, "{}\n");
+ vty_json(vty, json);
else
vty_out(vty, "%% L3-VNI %u does not exist\n",
l3vni);
@@ -2721,7 +2725,7 @@ void zebra_vxlan_print_specific_nh_l3vni(struct vty *vty, vni_t l3vni,
if (!n) {
if (use_json)
- vty_out(vty, "{}\n");
+ vty_json_empty(vty, json);
else
vty_out(vty,
"%% Requested next-hop not present for L3-VNI %u\n",
@@ -2742,13 +2746,16 @@ static void l3vni_print_nh_table(struct hash *nh_table, struct vty *vty,
struct nh_walk_ctx wctx;
json_object *json = NULL;
- num_nh = hashcount(nh_table);
- if (!num_nh)
- return;
-
if (use_json)
json = json_object_new_object();
+ num_nh = hashcount(nh_table);
+ if (!num_nh) {
+ if (use_json)
+ vty_json_empty(vty, json);
+ return;
+ }
+
wctx.vty = vty;
wctx.json = json;
if (!use_json) {
@@ -2770,14 +2777,14 @@ void zebra_vxlan_print_nh_l3vni(struct vty *vty, vni_t l3vni, bool use_json)
if (!is_evpn_enabled()) {
if (use_json)
- vty_out(vty, "{}\n");
+ vty_json_empty(vty, NULL);
return;
}
zl3vni = zl3vni_lookup(l3vni);
if (!zl3vni) {
if (use_json)
- vty_out(vty, "{}\n");
+ vty_json_empty(vty, NULL);
else
vty_out(vty, "%% L3-VNI %u does not exist\n", l3vni);
return;
@@ -2790,7 +2797,7 @@ void zebra_vxlan_print_nh_svd(struct vty *vty, bool use_json)
{
if (!is_evpn_enabled()) {
if (use_json)
- vty_out(vty, "{}\n");
+ vty_json_empty(vty, NULL);
return;
}
@@ -2806,7 +2813,8 @@ void zebra_vxlan_print_nh_all_l3vni(struct vty *vty, bool use_json)
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
@@ -2834,14 +2842,15 @@ void zebra_vxlan_print_l3vni(struct vty *vty, vni_t vni, bool use_json)
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
zl3vni = zl3vni_lookup(vni);
if (!zl3vni) {
if (use_json)
- vty_json(vty, json);
+ vty_json_empty(vty, json);
else
vty_out(vty, "%% VNI %u does not exist\n", vni);
return;
@@ -2905,14 +2914,15 @@ void zebra_vxlan_print_neigh_vni(struct vty *vty, struct zebra_vrf *zvrf,
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
zevpn = zebra_evpn_lookup(vni);
if (!zevpn) {
if (use_json)
- vty_json(vty, json);
+ vty_json_empty(vty, json);
else
vty_out(vty, "%% VNI %u does not exist\n", vni);
return;
@@ -2959,7 +2969,8 @@ void zebra_vxlan_print_neigh_all_vni(struct vty *vty, struct zebra_vrf *zvrf,
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
@@ -2989,7 +3000,8 @@ void zebra_vxlan_print_neigh_all_vni_detail(struct vty *vty,
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
@@ -3020,14 +3032,15 @@ void zebra_vxlan_print_specific_neigh_vni(struct vty *vty,
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
zevpn = zebra_evpn_lookup(vni);
if (!zevpn) {
if (use_json)
- vty_json(vty, json);
+ vty_json_empty(vty, json);
else
vty_out(vty, "%% VNI %u does not exist\n", vni);
return;
@@ -3064,14 +3077,15 @@ void zebra_vxlan_print_neigh_vni_vtep(struct vty *vty, struct zebra_vrf *zvrf,
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
zevpn = zebra_evpn_lookup(vni);
if (!zevpn) {
if (use_json)
- vty_json(vty, json);
+ vty_json_empty(vty, json);
else
vty_out(vty, "%% VNI %u does not exist\n", vni);
return;
@@ -3113,14 +3127,15 @@ void zebra_vxlan_print_neigh_vni_dad(struct vty *vty,
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
zevpn = zebra_evpn_lookup(vni);
if (!zevpn) {
if (use_json)
- vty_json(vty, json);
+ vty_json_empty(vty, json);
else
vty_out(vty, "%% VNI %u does not exist\n", vni);
return;
@@ -3177,21 +3192,24 @@ void zebra_vxlan_print_macs_vni(struct vty *vty, struct zebra_vrf *zvrf,
if (!is_evpn_enabled()) {
if (use_json)
- vty_out(vty, "{}\n");
+ vty_json_empty(vty, NULL);
return;
}
zevpn = zebra_evpn_lookup(vni);
if (!zevpn) {
if (use_json)
- vty_out(vty, "{}\n");
+ vty_json_empty(vty, NULL);
else
vty_out(vty, "%% VNI %u does not exist\n", vni);
return;
}
num_macs = num_valid_macs(zevpn);
- if (!num_macs)
+ if (!num_macs) {
+ if (use_json)
+ vty_json_empty(vty, NULL);
return;
+ }
if (use_json) {
json = json_object_new_object();
@@ -3250,7 +3268,8 @@ void zebra_vxlan_print_macs_all_vni(struct vty *vty, struct zebra_vrf *zvrf,
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
@@ -3278,7 +3297,8 @@ void zebra_vxlan_print_macs_all_vni_detail(struct vty *vty,
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
@@ -3307,7 +3327,8 @@ void zebra_vxlan_print_macs_all_vni_vtep(struct vty *vty,
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
@@ -3337,7 +3358,8 @@ void zebra_vxlan_print_specific_mac_vni(struct vty *vty, struct zebra_vrf *zvrf,
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
@@ -3377,22 +3399,34 @@ void zebra_vxlan_print_macs_vni_dad(struct vty *vty,
json_object *json = NULL;
json_object *json_mac = NULL;
- if (!is_evpn_enabled())
+ if (!is_evpn_enabled()) {
+ if (use_json)
+ vty_json_empty(vty, NULL);
return;
+ }
zevpn = zebra_evpn_lookup(vni);
if (!zevpn) {
- vty_out(vty, "%% VNI %u does not exist\n", vni);
+ if (use_json)
+ vty_json_empty(vty, NULL);
+ else
+ vty_out(vty, "%% VNI %u does not exist\n", vni);
return;
}
num_macs = num_valid_macs(zevpn);
- if (!num_macs)
+ if (!num_macs) {
+ if (use_json)
+ vty_json_empty(vty, NULL);
return;
+ }
num_macs = num_dup_detected_macs(zevpn);
- if (!num_macs)
+ if (!num_macs) {
+ if (use_json)
+ vty_json_empty(vty, NULL);
return;
+ }
if (use_json) {
json = json_object_new_object();
@@ -3727,21 +3761,25 @@ void zebra_vxlan_print_macs_vni_vtep(struct vty *vty, struct zebra_vrf *zvrf,
json_object *json_mac = NULL;
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, NULL);
return;
}
zevpn = zebra_evpn_lookup(vni);
if (!zevpn) {
if (use_json)
- vty_out(vty, "{}\n");
+ vty_json_empty(vty, NULL);
else
vty_out(vty, "%% VNI %u does not exist\n", vni);
return;
}
num_macs = num_valid_macs(zevpn);
- if (!num_macs)
+ if (!num_macs) {
+ if (use_json)
+ vty_json_empty(vty, NULL);
return;
+ }
if (use_json) {
json = json_object_new_object();
@@ -3785,7 +3823,8 @@ void zebra_vxlan_print_vni(struct vty *vty, struct zebra_vrf *zvrf, vni_t vni,
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
@@ -3829,7 +3868,8 @@ void zebra_vxlan_print_evpn(struct vty *vty, bool uj)
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (uj)
+ vty_json(vty, json);
return;
}
@@ -3906,7 +3946,8 @@ void zebra_vxlan_print_vnis(struct vty *vty, struct zebra_vrf *zvrf,
json = json_object_new_object();
if (!is_evpn_enabled()) {
- vty_json(vty, json);
+ if (use_json)
+ vty_json_empty(vty, json);
return;
}
@@ -3990,7 +4031,7 @@ void zebra_vxlan_print_vnis_detail(struct vty *vty, struct zebra_vrf *zvrf,
if (!is_evpn_enabled()) {
if (use_json)
- vty_out(vty, "{}\n");
+ vty_json_empty(vty, NULL);
return;
}