summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bgpd/bgp_attr.c7
-rw-r--r--bgpd/bgp_bfd.c4
-rw-r--r--bgpd/bgp_community.c15
-rw-r--r--bgpd/bgp_ecommunity.c12
-rw-r--r--bgpd/bgp_lcommunity.c8
-rw-r--r--bgpd/bgp_route.c152
-rw-r--r--bgpd/bgp_updgrp_packet.c2
-rw-r--r--bgpd/bgpd.c11
-rw-r--r--configure.ac2
-rw-r--r--ospf6d/ospf6_message.c1
-rw-r--r--tests/topotests/bgp_conditional_advertisement/r1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_conditional_advertisement/r2/bgpd.conf1
-rw-r--r--tests/topotests/bgp_conditional_advertisement/r3/bgpd.conf1
-rw-r--r--tests/topotests/bgp_default_route/r2/bgpd.conf3
-rw-r--r--tests/topotests/bgp_default_route/test_bgp_default-originate.py27
-rwxr-xr-xtests/topotests/conftest.py23
-rwxr-xr-xtests/topotests/docker/frr-topotests.sh10
-rw-r--r--tests/topotests/evpn_type5_test_topo1/evpn_type5_topo1.json191
-rw-r--r--tests/topotests/lib/bgp.py412
-rw-r--r--tests/topotests/lib/common_config.py117
-rw-r--r--tests/topotests/lib/topogen.py4
-rw-r--r--tests/topotests/lib/topotest.py26
-rwxr-xr-xtools/frr-reload.py46
-rw-r--r--zebra/zapi_msg.c14
-rw-r--r--zebra/zebra_nhg.c9
-rw-r--r--zebra/zebra_rnh.c3
-rw-r--r--zebra/zebra_rnh.h3
27 files changed, 688 insertions, 417 deletions
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index 24b48178d2..adf408220e 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -2915,11 +2915,8 @@ static bgp_attr_parse_ret_t bgp_attr_unknown(struct bgp_attr_parser_args *args)
if (!transit)
transit = XCALLOC(MTYPE_TRANSIT, sizeof(struct transit));
- if (transit->val)
- transit->val = XREALLOC(MTYPE_TRANSIT_VAL, transit->val,
- transit->length + total);
- else
- transit->val = XMALLOC(MTYPE_TRANSIT_VAL, total);
+ transit->val = XREALLOC(MTYPE_TRANSIT_VAL, transit->val,
+ transit->length + total);
memcpy(transit->val + transit->length, startp, total);
transit->length += total;
diff --git a/bgpd/bgp_bfd.c b/bgpd/bgp_bfd.c
index f6f2f5f6e4..4995f9a1fd 100644
--- a/bgpd/bgp_bfd.c
+++ b/bgpd/bgp_bfd.c
@@ -326,7 +326,9 @@ static void bgp_peer_remove_bfd(struct peer *p)
return;
}
- bfd_sess_free(&p->bfd_config->session);
+ if (p->bfd_config)
+ bfd_sess_free(&p->bfd_config->session);
+
XFREE(MTYPE_BFD_CONFIG, p->bfd_config);
}
diff --git a/bgpd/bgp_community.c b/bgpd/bgp_community.c
index 2aa6a56a5e..e91166449a 100644
--- a/bgpd/bgp_community.c
+++ b/bgpd/bgp_community.c
@@ -60,11 +60,7 @@ void community_free(struct community **com)
void community_add_val(struct community *com, uint32_t val)
{
com->size++;
- if (com->val)
- com->val = XREALLOC(MTYPE_COMMUNITY_VAL, com->val,
- com_length(com));
- else
- com->val = XMALLOC(MTYPE_COMMUNITY_VAL, com_length(com));
+ com->val = XREALLOC(MTYPE_COMMUNITY_VAL, com->val, com_length(com));
val = htonl(val);
memcpy(com_lastval(com), &val, sizeof(uint32_t));
@@ -618,13 +614,8 @@ bool community_cmp(const struct community *com1, const struct community *com2)
struct community *community_merge(struct community *com1,
struct community *com2)
{
- if (com1->val)
- com1->val =
- XREALLOC(MTYPE_COMMUNITY_VAL, com1->val,
- (com1->size + com2->size) * COMMUNITY_SIZE);
- else
- com1->val = XMALLOC(MTYPE_COMMUNITY_VAL,
- (com1->size + com2->size) * COMMUNITY_SIZE);
+ com1->val = XREALLOC(MTYPE_COMMUNITY_VAL, com1->val,
+ (com1->size + com2->size) * COMMUNITY_SIZE);
memcpy(com1->val + com1->size, com2->val, com2->size * COMMUNITY_SIZE);
com1->size += com2->size;
diff --git a/bgpd/bgp_ecommunity.c b/bgpd/bgp_ecommunity.c
index 923c9b0d7e..3a951e6e80 100644
--- a/bgpd/bgp_ecommunity.c
+++ b/bgpd/bgp_ecommunity.c
@@ -158,7 +158,6 @@ static bool ecommunity_add_val_internal(struct ecommunity *ecom,
ecom->val = XREALLOC(MTYPE_ECOMMUNITY_VAL, ecom->val,
ecom_length_size(ecom, ecom_size));
-
memmove(ecom->val + ((ins_idx + 1) * ecom_size),
ecom->val + (ins_idx * ecom_size),
(ecom->size - 1 - ins_idx) * ecom_size);
@@ -287,14 +286,9 @@ char *ecommunity_str(struct ecommunity *ecom)
struct ecommunity *ecommunity_merge(struct ecommunity *ecom1,
struct ecommunity *ecom2)
{
- if (ecom1->val)
- ecom1->val = XREALLOC(MTYPE_ECOMMUNITY_VAL, ecom1->val,
- (size_t)(ecom1->size + ecom2->size)
- * (size_t)ecom1->unit_size);
- else
- ecom1->val = XMALLOC(MTYPE_ECOMMUNITY_VAL,
- (size_t)(ecom1->size + ecom2->size)
- * (size_t)ecom1->unit_size);
+ ecom1->val = XREALLOC(MTYPE_ECOMMUNITY_VAL, ecom1->val,
+ (size_t)(ecom1->size + ecom2->size)
+ * (size_t)ecom1->unit_size);
memcpy(ecom1->val + (ecom1->size * ecom1->unit_size), ecom2->val,
(size_t)ecom2->size * (size_t)ecom1->unit_size);
diff --git a/bgpd/bgp_lcommunity.c b/bgpd/bgp_lcommunity.c
index fa4d4aee28..6121c4905f 100644
--- a/bgpd/bgp_lcommunity.c
+++ b/bgpd/bgp_lcommunity.c
@@ -166,12 +166,8 @@ struct lcommunity *lcommunity_dup(struct lcommunity *lcom)
struct lcommunity *lcommunity_merge(struct lcommunity *lcom1,
struct lcommunity *lcom2)
{
- if (lcom1->val)
- lcom1->val = XREALLOC(MTYPE_LCOMMUNITY_VAL, lcom1->val,
- lcom_length(lcom1) + lcom_length(lcom2));
- else
- lcom1->val = XMALLOC(MTYPE_LCOMMUNITY_VAL,
- lcom_length(lcom1) + lcom_length(lcom2));
+ lcom1->val = XREALLOC(MTYPE_LCOMMUNITY_VAL, lcom1->val,
+ lcom_length(lcom1) + lcom_length(lcom2));
memcpy(lcom1->val + lcom_length(lcom1), lcom2->val, lcom_length(lcom2));
lcom1->size += lcom2->size;
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index bd45314350..90e0fd5253 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -9519,14 +9519,18 @@ void route_vty_out_overlay(struct vty *vty, const struct prefix *p,
static void damp_route_vty_out(struct vty *vty, const struct prefix *p,
struct bgp_path_info *path, int display,
afi_t afi, safi_t safi, bool use_json,
- json_object *json)
+ json_object *json_paths)
{
- struct attr *attr;
+ struct attr *attr = path->attr;
int len;
char timebuf[BGP_UPTIME_LEN];
+ json_object *json_path = NULL;
+
+ if (use_json)
+ json_path = json_object_new_object();
/* short status lead text */
- route_vty_short_status_out(vty, path, p, json);
+ route_vty_short_status_out(vty, path, p, json_path);
/* print prefix and mask */
if (!use_json) {
@@ -9534,147 +9538,125 @@ static void damp_route_vty_out(struct vty *vty, const struct prefix *p,
route_vty_out_route(path->net, p, vty, NULL, false);
else
vty_out(vty, "%*s", 17, " ");
- }
- len = vty_out(vty, "%s", path->peer->host);
- len = 17 - len;
- if (len < 1) {
- if (!use_json)
+ len = vty_out(vty, "%s", path->peer->host);
+ len = 17 - len;
+
+ if (len < 1)
vty_out(vty, "\n%*s", 34, " ");
- } else {
- if (use_json)
- json_object_int_add(json, "peerHost", len);
else
vty_out(vty, "%*s", len, " ");
- }
- if (use_json)
- bgp_damp_reuse_time_vty(vty, path, timebuf, BGP_UPTIME_LEN, afi,
- safi, use_json, json);
- else
vty_out(vty, "%s ",
bgp_damp_reuse_time_vty(vty, path, timebuf,
BGP_UPTIME_LEN, afi, safi,
- use_json, json));
-
- /* Print attribute */
- attr = path->attr;
+ use_json, NULL));
- /* Print aspath */
- if (attr->aspath) {
- if (use_json)
- json_object_string_add(json, "asPath",
- attr->aspath->str);
- else
+ if (attr->aspath)
aspath_print_vty(vty, "%s", attr->aspath, " ");
- }
- /* Print origin */
- if (use_json)
- json_object_string_add(json, "origin",
- bgp_origin_str[attr->origin]);
- else
vty_out(vty, "%s", bgp_origin_str[attr->origin]);
- if (!use_json)
vty_out(vty, "\n");
+ } else {
+ bgp_damp_reuse_time_vty(vty, path, timebuf, BGP_UPTIME_LEN, afi,
+ safi, use_json, json_path);
+
+ if (attr->aspath)
+ json_object_string_add(json_path, "asPath",
+ attr->aspath->str);
+
+ json_object_string_add(json_path, "origin",
+ bgp_origin_str[attr->origin]);
+ json_object_string_add(json_path, "peerHost", path->peer->host);
+
+ json_object_array_add(json_paths, json_path);
+ }
}
/* flap route */
static void flap_route_vty_out(struct vty *vty, const struct prefix *p,
struct bgp_path_info *path, int display,
afi_t afi, safi_t safi, bool use_json,
- json_object *json)
+ json_object *json_paths)
{
- struct attr *attr;
+ struct attr *attr = path->attr;
struct bgp_damp_info *bdi;
char timebuf[BGP_UPTIME_LEN];
int len;
+ json_object *json_path = NULL;
if (!path->extra)
return;
+ if (use_json)
+ json_path = json_object_new_object();
+
bdi = path->extra->damp_info;
/* short status lead text */
- route_vty_short_status_out(vty, path, p, json);
+ route_vty_short_status_out(vty, path, p, json_path);
- /* print prefix and mask */
if (!use_json) {
if (!display)
route_vty_out_route(path->net, p, vty, NULL, false);
else
vty_out(vty, "%*s", 17, " ");
- }
- len = vty_out(vty, "%s", path->peer->host);
- len = 16 - len;
- if (len < 1) {
- if (!use_json)
+ len = vty_out(vty, "%s", path->peer->host);
+ len = 16 - len;
+ if (len < 1)
vty_out(vty, "\n%*s", 33, " ");
- } else {
- if (use_json)
- json_object_int_add(json, "peerHost", len);
else
vty_out(vty, "%*s", len, " ");
- }
- len = vty_out(vty, "%d", bdi->flap);
- len = 5 - len;
- if (len < 1) {
- if (!use_json)
+ len = vty_out(vty, "%d", bdi->flap);
+ len = 5 - len;
+ if (len < 1)
vty_out(vty, " ");
- } else {
- if (use_json)
- json_object_int_add(json, "bdiFlap", len);
else
vty_out(vty, "%*s", len, " ");
- }
- if (use_json)
- peer_uptime(bdi->start_time, timebuf, BGP_UPTIME_LEN, use_json,
- json);
- else
vty_out(vty, "%s ", peer_uptime(bdi->start_time, timebuf,
BGP_UPTIME_LEN, 0, NULL));
- if (CHECK_FLAG(path->flags, BGP_PATH_DAMPED)
- && !CHECK_FLAG(path->flags, BGP_PATH_HISTORY)) {
- if (use_json)
- bgp_damp_reuse_time_vty(vty, path, timebuf,
- BGP_UPTIME_LEN, afi, safi,
- use_json, json);
- else
+ if (CHECK_FLAG(path->flags, BGP_PATH_DAMPED)
+ && !CHECK_FLAG(path->flags, BGP_PATH_HISTORY))
vty_out(vty, "%s ",
bgp_damp_reuse_time_vty(vty, path, timebuf,
BGP_UPTIME_LEN, afi,
- safi, use_json, json));
- } else {
- if (!use_json)
+ safi, use_json, NULL));
+ else
vty_out(vty, "%*s ", 8, " ");
- }
-
- /* Print attribute */
- attr = path->attr;
- /* Print aspath */
- if (attr->aspath) {
- if (use_json)
- json_object_string_add(json, "asPath",
- attr->aspath->str);
- else
+ if (attr->aspath)
aspath_print_vty(vty, "%s", attr->aspath, " ");
- }
- /* Print origin */
- if (use_json)
- json_object_string_add(json, "origin",
- bgp_origin_str[attr->origin]);
- else
vty_out(vty, "%s", bgp_origin_str[attr->origin]);
- if (!use_json)
vty_out(vty, "\n");
+ } else {
+ json_object_string_add(json_path, "peerHost", path->peer->host);
+ json_object_int_add(json_path, "bdiFlap", bdi->flap);
+
+ peer_uptime(bdi->start_time, timebuf, BGP_UPTIME_LEN, use_json,
+ json_path);
+
+ if (CHECK_FLAG(path->flags, BGP_PATH_DAMPED)
+ && !CHECK_FLAG(path->flags, BGP_PATH_HISTORY))
+ bgp_damp_reuse_time_vty(vty, path, timebuf,
+ BGP_UPTIME_LEN, afi, safi,
+ use_json, json_path);
+
+ if (attr->aspath)
+ json_object_string_add(json_path, "asPath",
+ attr->aspath->str);
+
+ json_object_string_add(json_path, "origin",
+ bgp_origin_str[attr->origin]);
+
+ json_object_array_add(json_paths, json_path);
+ }
}
static void route_vty_out_advertised_to(struct vty *vty, struct peer *peer,
diff --git a/bgpd/bgp_updgrp_packet.c b/bgpd/bgp_updgrp_packet.c
index 038ef4f798..9c32c7ed1e 100644
--- a/bgpd/bgp_updgrp_packet.c
+++ b/bgpd/bgp_updgrp_packet.c
@@ -1158,6 +1158,7 @@ void subgroup_default_update_packet(struct update_subgroup *subgrp,
(void)bpacket_queue_add(SUBGRP_PKTQ(subgrp), s, &vecarr);
subgroup_trigger_write(subgrp);
+ subgrp->scount++;
}
void subgroup_default_withdraw_packet(struct update_subgroup *subgrp)
@@ -1250,6 +1251,7 @@ void subgroup_default_withdraw_packet(struct update_subgroup *subgrp)
(void)bpacket_queue_add(SUBGRP_PKTQ(subgrp), s, NULL);
subgroup_trigger_write(subgrp);
+ subgrp->scount--;
}
static void
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 77c2f618ba..6c9ec0ebaa 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -656,14 +656,9 @@ int bgp_confederation_peers_add(struct bgp *bgp, as_t as)
if (bgp_confederation_peers_check(bgp, as))
return -1;
- if (bgp->confed_peers)
- bgp->confed_peers =
- XREALLOC(MTYPE_BGP_CONFED_LIST, bgp->confed_peers,
- (bgp->confed_peers_cnt + 1) * sizeof(as_t));
- else
- bgp->confed_peers =
- XMALLOC(MTYPE_BGP_CONFED_LIST,
- (bgp->confed_peers_cnt + 1) * sizeof(as_t));
+ bgp->confed_peers =
+ XREALLOC(MTYPE_BGP_CONFED_LIST, bgp->confed_peers,
+ (bgp->confed_peers_cnt + 1) * sizeof(as_t));
bgp->confed_peers[bgp->confed_peers_cnt] = as;
bgp->confed_peers_cnt++;
diff --git a/configure.ac b/configure.ac
index 564588cca7..1ad87d9435 100644
--- a/configure.ac
+++ b/configure.ac
@@ -703,7 +703,7 @@ AC_ARG_ENABLE([thread-sanitizer],
AC_ARG_ENABLE([memory-sanitizer],
AS_HELP_STRING([--enable-memory-sanitizer], [enable MemorySanitizer support for detecting uninitialized memory reads]))
AC_ARG_ENABLE([undefined-sanitizer],
- AS_HELP_STRING([--undefined-sanitizer], [enable UndefinedBehaviorSanitizer support for detecting undefined behavior]))
+ AS_HELP_STRING([--enable-undefined-sanitizer], [enable UndefinedBehaviorSanitizer support for detecting undefined behavior]))
AC_ARG_WITH([crypto],
AS_HELP_STRING([--with-crypto=<internal|openssl>], [choose between different implementations of cryptographic functions(default value is --with-crypto=internal)]))
AC_ARG_WITH([frr-format],
diff --git a/ospf6d/ospf6_message.c b/ospf6d/ospf6_message.c
index 1f2618ec83..549f5668b9 100644
--- a/ospf6d/ospf6_message.c
+++ b/ospf6d/ospf6_message.c
@@ -1977,7 +1977,6 @@ static int ospf6_write(struct thread *thread)
__func__, latency);
oi->last_hello = timestamp;
oi->hello_out++;
- ospf6_hello_print(oh, OSPF6_ACTION_SEND);
break;
case OSPF6_MESSAGE_TYPE_DBDESC:
oi->db_desc_out++;
diff --git a/tests/topotests/bgp_conditional_advertisement/r1/bgpd.conf b/tests/topotests/bgp_conditional_advertisement/r1/bgpd.conf
index 633d1832fd..293b38c7e8 100644
--- a/tests/topotests/bgp_conditional_advertisement/r1/bgpd.conf
+++ b/tests/topotests/bgp_conditional_advertisement/r1/bgpd.conf
@@ -17,6 +17,7 @@ route-map DEF permit 10
!
router bgp 1
bgp log-neighbor-changes
+ bgp conditional-advertisement timer 5
no bgp ebgp-requires-policy
neighbor 10.10.10.2 remote-as 2
!
diff --git a/tests/topotests/bgp_conditional_advertisement/r2/bgpd.conf b/tests/topotests/bgp_conditional_advertisement/r2/bgpd.conf
index c6147fe658..82525fac64 100644
--- a/tests/topotests/bgp_conditional_advertisement/r2/bgpd.conf
+++ b/tests/topotests/bgp_conditional_advertisement/r2/bgpd.conf
@@ -32,6 +32,7 @@ route-map RMAP-2 deny 10
!
router bgp 2
bgp log-neighbor-changes
+ bgp conditional-advertisement timer 5
no bgp ebgp-requires-policy
neighbor 10.10.10.1 remote-as 1
neighbor 10.10.20.3 remote-as 3
diff --git a/tests/topotests/bgp_conditional_advertisement/r3/bgpd.conf b/tests/topotests/bgp_conditional_advertisement/r3/bgpd.conf
index 2f4f5068d8..f389f309a6 100644
--- a/tests/topotests/bgp_conditional_advertisement/r3/bgpd.conf
+++ b/tests/topotests/bgp_conditional_advertisement/r3/bgpd.conf
@@ -1,6 +1,7 @@
!
router bgp 3
bgp log-neighbor-changes
+ bgp conditional-advertisement timer 5
no bgp ebgp-requires-policy
neighbor 10.10.20.2 remote-as 2
!
diff --git a/tests/topotests/bgp_default_route/r2/bgpd.conf b/tests/topotests/bgp_default_route/r2/bgpd.conf
index 00c96cc58b..6d1080c119 100644
--- a/tests/topotests/bgp_default_route/r2/bgpd.conf
+++ b/tests/topotests/bgp_default_route/r2/bgpd.conf
@@ -2,7 +2,4 @@ router bgp 65001
no bgp ebgp-requires-policy
neighbor 192.168.255.1 remote-as 65000
neighbor 192.168.255.1 timers 3 10
- address-family ipv4 unicast
- redistribute connected
- exit-address-family
!
diff --git a/tests/topotests/bgp_default_route/test_bgp_default-originate.py b/tests/topotests/bgp_default_route/test_bgp_default-originate.py
index d8de0f0ac6..19632162b4 100644
--- a/tests/topotests/bgp_default_route/test_bgp_default-originate.py
+++ b/tests/topotests/bgp_default_route/test_bgp_default-originate.py
@@ -79,10 +79,10 @@ def test_bgp_default_originate_route_map():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- router = tgen.gears["r2"]
-
- def _bgp_converge(router):
- output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
+ def _bgp_check_if_received():
+ output = json.loads(
+ tgen.gears["r2"].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")
+ )
expected = {
"192.168.255.1": {
"bgpState": "Established",
@@ -91,22 +91,27 @@ def test_bgp_default_originate_route_map():
}
return topotest.json_cmp(output, expected)
+ def _bgp_check_if_originated():
+ output = json.loads(tgen.gears["r1"].vtysh_cmd("show ip bgp summary json"))
+ expected = {"ipv4Unicast": {"peers": {"192.168.255.2": {"pfxSnt": 1}}}}
+ return topotest.json_cmp(output, expected)
+
def _bgp_default_route_is_valid(router):
output = json.loads(router.vtysh_cmd("show ip bgp 0.0.0.0/0 json"))
expected = {"paths": [{"valid": True}]}
return topotest.json_cmp(output, expected)
- test_func = functools.partial(_bgp_converge, router)
+ test_func = functools.partial(_bgp_check_if_received)
success, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "No 0.0.0.0/0 at r2 from r1"
- assert result is None, 'Failed to see bgp convergence in "{}"'.format(router)
-
- test_func = functools.partial(_bgp_default_route_is_valid, router)
+ test_func = functools.partial(_bgp_check_if_originated)
success, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "No 0.0.0.0/0 from r1 to r2"
- assert (
- result is None
- ), 'Failed to see applied metric for default route in "{}"'.format(router)
+ test_func = functools.partial(_bgp_default_route_is_valid, tgen.gears["r2"])
+ success, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "Failed to see 0.0.0.0/0 in r2"
if __name__ == "__main__":
diff --git a/tests/topotests/conftest.py b/tests/topotests/conftest.py
index e57db7471c..d119b0931b 100755
--- a/tests/topotests/conftest.py
+++ b/tests/topotests/conftest.py
@@ -26,6 +26,12 @@ def pytest_addoption(parser):
only run the setup_module() to setup the topology without running any tests.
"""
parser.addoption(
+ "--asan-abort",
+ action="store_true",
+ help="Configure address sanitizer to abort process on error",
+ )
+
+ parser.addoption(
"--gdb-breakpoints",
metavar="SYMBOL[,SYMBOL...]",
help="Comma-separated list of functions to set gdb breakpoints on",
@@ -68,6 +74,12 @@ def pytest_addoption(parser):
)
parser.addoption(
+ "--strace-daemons",
+ metavar="DAEMON[,DAEMON...]",
+ help="Comma-separated list of daemons to strace, or 'all'",
+ )
+
+ parser.addoption(
"--topology-only",
action="store_true",
default=False,
@@ -167,6 +179,9 @@ def pytest_configure(config):
if not diagnose_env():
pytest.exit("environment has errors, please read the logs")
+ asan_abort = config.getoption("--asan-abort")
+ topotest_extra_config["asan_abort"] = asan_abort
+
gdb_routers = config.getoption("--gdb-routers")
gdb_routers = gdb_routers.split(",") if gdb_routers else []
topotest_extra_config["gdb_routers"] = gdb_routers
@@ -185,6 +200,9 @@ def pytest_configure(config):
shell = config.getoption("--shell")
topotest_extra_config["shell"] = shell.split(",") if shell else []
+ strace = config.getoption("--strace-daemons")
+ topotest_extra_config["strace_daemons"] = strace.split(",") if strace else []
+
pause_after = config.getoption("--pause-after")
shell_on_error = config.getoption("--shell-on-error")
@@ -244,6 +262,11 @@ def pytest_runtest_makereport(item, call):
)
)
+ # We want to pause, if requested, on any error not just test cases
+ # (e.g., call.when == "setup")
+ if not pause:
+ pause = topotest_extra_config["pause_after"]
+
# (topogen) Set topology error to avoid advancing in the test.
tgen = get_topogen()
if tgen is not None:
diff --git a/tests/topotests/docker/frr-topotests.sh b/tests/topotests/docker/frr-topotests.sh
index 9ef59b3bbc..1eaaea2971 100755
--- a/tests/topotests/docker/frr-topotests.sh
+++ b/tests/topotests/docker/frr-topotests.sh
@@ -145,7 +145,15 @@ if [ "${TOPOTEST_PULL:-1}" = "1" ]; then
docker pull frrouting/topotests:latest
fi
+if [[ -n "$TMUX" ]]; then
+ TMUX_OPTIONS="-v $(dirname $TMUX):$(dirname $TMUX) -e TMUX=$TMUX -e TMUX_PANE=$TMUX_PANE"
+fi
+
+if [[ -n "$STY" ]]; then
+ SCREEN_OPTIONS="-v /run/screen:/run/screen -e STY=$STY"
+fi
set -- --rm -i \
+ -v "$HOME:$HOME:ro" \
-v "$TOPOTEST_LOGS:/tmp" \
-v "$TOPOTEST_FRR:/root/host-frr:ro" \
-v "$TOPOTEST_BUILDCACHE:/root/persist" \
@@ -154,6 +162,8 @@ set -- --rm -i \
-e "TOPOTEST_DOC=$TOPOTEST_DOC" \
-e "TOPOTEST_SANITIZER=$TOPOTEST_SANITIZER" \
--privileged \
+ $SCREEN_OPTINS \
+ $TMUX_OPTIONS \
$TOPOTEST_OPTIONS \
frrouting/topotests:latest "$@"
diff --git a/tests/topotests/evpn_type5_test_topo1/evpn_type5_topo1.json b/tests/topotests/evpn_type5_test_topo1/evpn_type5_topo1.json
index 14842da326..dd412708bb 100644
--- a/tests/topotests/evpn_type5_test_topo1/evpn_type5_topo1.json
+++ b/tests/topotests/evpn_type5_test_topo1/evpn_type5_topo1.json
@@ -41,7 +41,10 @@
"neighbor": {
"e1": {
"dest_link": {
- "r1": {}
+ "r1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -55,7 +58,10 @@
"neighbor": {
"e1": {
"dest_link": {
- "r1": {}
+ "r1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -106,7 +112,10 @@
"neighbor": {
"e1": {
"dest_link": {
- "r2-link1": {}
+ "r2-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -120,7 +129,10 @@
"neighbor": {
"e1": {
"dest_link": {
- "r2-link1": {}
+ "r2-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -140,7 +152,10 @@
"neighbor": {
"e1": {
"dest_link": {
- "r2-link2": {}
+ "r2-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -154,7 +169,10 @@
"neighbor": {
"e1": {
"dest_link": {
- "r2-link2": {}
+ "r2-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -222,7 +240,10 @@
"neighbor": {
"r1": {
"dest_link": {
- "e1": {}
+ "e1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -233,7 +254,10 @@
"neighbor": {
"r1": {
"dest_link": {
- "e1": {}
+ "e1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -262,7 +286,10 @@
"neighbor": {
"r2": {
"dest_link": {
- "e1-link1": {}
+ "e1-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -273,7 +300,10 @@
"neighbor": {
"r2": {
"dest_link": {
- "e1-link1": {}
+ "e1-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -302,7 +332,10 @@
"neighbor": {
"r2": {
"dest_link": {
- "e1-link2": {}
+ "e1-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -313,7 +346,10 @@
"neighbor": {
"r2": {
"dest_link": {
- "e1-link2": {}
+ "e1-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -342,6 +378,8 @@
"d1": {
"dest_link": {
"e1-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
"deactivate": "ipv4"
}
}
@@ -349,6 +387,8 @@
"d2": {
"dest_link": {
"e1-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
"deactivate": "ipv4"
}
}
@@ -412,6 +452,8 @@
"e1": {
"dest_link": {
"d1-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
"deactivate": "ipv4"
}
}
@@ -442,7 +484,10 @@
"neighbor": {
"r3": {
"dest_link": {
- "d1": {}
+ "d1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -453,7 +498,10 @@
"neighbor": {
"r3": {
"dest_link": {
- "d1": {}
+ "d1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -482,7 +530,10 @@
"neighbor": {
"r4": {
"dest_link": {
- "d1-link1": {}
+ "d1-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -493,7 +544,10 @@
"neighbor": {
"r4": {
"dest_link": {
- "d1-link1": {}
+ "d1-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -522,7 +576,10 @@
"neighbor": {
"r4": {
"dest_link": {
- "d1-link2": {}
+ "d1-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -533,7 +590,10 @@
"neighbor": {
"r4": {
"dest_link": {
- "d1-link2": {}
+ "d1-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -590,7 +650,9 @@
"e1": {
"dest_link": {
"d2-link1": {
- "deactivate": "ipv4"
+ "deactivate": "ipv4",
+ "keepalivetimer": 1,
+ "holddowntimer": 3
}
}
}
@@ -620,7 +682,10 @@
"neighbor": {
"r3": {
"dest_link": {
- "d2": {}
+ "d2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -631,7 +696,10 @@
"neighbor": {
"r3": {
"dest_link": {
- "d2": {}
+ "d2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -660,7 +728,10 @@
"neighbor": {
"r4": {
"dest_link": {
- "d2-link1": {}
+ "d2-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -671,7 +742,10 @@
"neighbor": {
"r4": {
"dest_link": {
- "d2-link1": {}
+ "d2-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -700,7 +774,10 @@
"neighbor": {
"r4": {
"dest_link": {
- "d2-link2": {}
+ "d2-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -711,7 +788,10 @@
"neighbor": {
"r4": {
"dest_link": {
- "d2-link2": {}
+ "d2-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -755,12 +835,18 @@
"neighbor": {
"d1": {
"dest_link": {
- "r3": {}
+ "r3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
},
"d2": {
"dest_link": {
- "r3": {}
+ "r3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -771,12 +857,18 @@
"neighbor": {
"d1": {
"dest_link": {
- "r3": {}
+ "r3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
},
"d2": {
"dest_link": {
- "r3": {}
+ "r3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -814,12 +906,18 @@
"neighbor": {
"d1": {
"dest_link": {
- "r4-link1": {}
+ "r4-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
},
"d2": {
"dest_link": {
- "r4-link1": {}
+ "r4-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -830,12 +928,18 @@
"neighbor": {
"d1": {
"dest_link": {
- "r4-link1": {}
+ "r4-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
},
"d2": {
"dest_link": {
- "r4-link1": {}
+ "r4-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -852,12 +956,18 @@
"neighbor": {
"d1": {
"dest_link": {
- "r4-link2": {}
+ "r4-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
},
"d2": {
"dest_link": {
- "r4-link2": {}
+ "r4-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -868,12 +978,18 @@
"neighbor": {
"d1": {
"dest_link": {
- "r4-link2": {}
+ "r4-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
},
"d2": {
"dest_link": {
- "r4-link2": {}
+ "r4-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
}
}
}
@@ -885,3 +1001,4 @@
}
}
}
+
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
index a236a916b5..2f1f67439f 100644
--- a/tests/topotests/lib/bgp.py
+++ b/tests/topotests/lib/bgp.py
@@ -44,6 +44,7 @@ from lib.common_config import (
FRRCFG_FILE,
retry,
get_ipv6_linklocal_address,
+ get_frr_ipv6_linklocal
)
LOGDIR = "/tmp/topotests/"
@@ -265,6 +266,11 @@ def __create_bgp_global(tgen, input_dict, router, build=False):
config_data.append("bgp router-id {}".format(router_id))
config_data.append("no bgp network import-check")
+ bgp_peer_grp_data = bgp_data.setdefault("peer-group", {})
+
+ if "peer-group" in bgp_data and bgp_peer_grp_data:
+ peer_grp_data = __create_bgp_peer_group(tgen, bgp_peer_grp_data, router)
+ config_data.extend(peer_grp_data)
bst_path = bgp_data.setdefault("bestpath", None)
if bst_path:
@@ -380,6 +386,7 @@ def __create_bgp_unicast_neighbor(
addr_data = addr_dict["unicast"]
if addr_data:
config_data.append("address-family {} unicast".format(addr_type))
+
advertise_network = addr_data.setdefault("advertise_networks", [])
for advertise_network_dict in advertise_network:
network = advertise_network_dict["network"]
@@ -404,14 +411,29 @@ def __create_bgp_unicast_neighbor(
config_data.append(cmd)
+ import_cmd = addr_data.setdefault("import", {})
+ if import_cmd:
+ try:
+ if import_cmd["delete"]:
+ config_data.append("no import vrf {}".format(import_cmd["vrf"]))
+ except KeyError:
+ config_data.append("import vrf {}".format(import_cmd["vrf"]))
+
max_paths = addr_data.setdefault("maximum_paths", {})
if max_paths:
ibgp = max_paths.setdefault("ibgp", None)
ebgp = max_paths.setdefault("ebgp", None)
+ del_cmd = max_paths.setdefault("delete", False)
if ibgp:
- config_data.append("maximum-paths ibgp {}".format(ibgp))
+ if del_cmd:
+ config_data.append("no maximum-paths ibgp {}".format(ibgp))
+ else:
+ config_data.append("maximum-paths ibgp {}".format(ibgp))
if ebgp:
- config_data.append("maximum-paths {}".format(ebgp))
+ if del_cmd:
+ config_data.append("no maximum-paths {}".format(ebgp))
+ else:
+ config_data.append("maximum-paths {}".format(ebgp))
aggregate_addresses = addr_data.setdefault("aggregate_address", [])
for aggregate_address in aggregate_addresses:
@@ -649,6 +671,38 @@ def __create_l2vpn_evpn_address_family(
return config_data
+def __create_bgp_peer_group(topo, input_dict, router):
+ """
+ Helper API to create neighbor specific configuration
+
+ Parameters
+ ----------
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured
+ """
+ config_data = []
+ logger.debug("Entering lib API: __create_bgp_peer_group()")
+
+ for grp, grp_dict in input_dict.items():
+ config_data.append("neighbor {} peer-group".format(grp))
+ neigh_cxt = "neighbor {} ".format(grp)
+ update_source = grp_dict.setdefault("update-source", None)
+ remote_as = grp_dict.setdefault("remote-as", None)
+ capability = grp_dict.setdefault("capability", None)
+ if update_source:
+ config_data.append("{} update-source {}".format(neigh_cxt, update_source))
+
+ if remote_as:
+ config_data.append("{} remote-as {}".format(neigh_cxt, remote_as))
+
+ if capability:
+ config_data.append("{} capability {}".format(neigh_cxt, capability))
+
+ logger.debug("Exiting lib API: __create_bgp_peer_group()")
+ return config_data
+
+
def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True):
"""
Helper API to create neighbor specific configuration
@@ -660,10 +714,9 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True):
* `input_dict` : Input dict data, required when configuring from testcase
* `router` : router id to be configured
"""
-
config_data = []
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
-
+ tgen = get_topogen()
bgp_data = input_dict["address_family"]
neigh_data = bgp_data[addr_type]["unicast"]["neighbor"]
@@ -672,35 +725,91 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True):
nh_details = topo[name]
if "vrfs" in topo[router] or type(nh_details["bgp"]) is list:
- remote_as = nh_details["bgp"][0]["local_as"]
+ for vrf_data in nh_details["bgp"]:
+ if "vrf" in nh_details["links"][dest_link] and "vrf" in vrf_data:
+ if nh_details["links"][dest_link]["vrf"] == vrf_data["vrf"]:
+ remote_as = vrf_data["local_as"]
+ break
+ else:
+ if "vrf" not in vrf_data:
+ remote_as = vrf_data["local_as"]
+ break
+
else:
remote_as = nh_details["bgp"]["local_as"]
update_source = None
- if dest_link in nh_details["links"].keys():
- ip_addr = nh_details["links"][dest_link][addr_type].split("/")[0]
- # Loopback interface
- if "source_link" in peer and peer["source_link"] == "lo":
- update_source = topo[router]["links"]["lo"][addr_type].split("/")[0]
+ if "neighbor_type" in peer and peer["neighbor_type"] == "unnumbered":
+ ip_addr = nh_details["links"][dest_link]["peer-interface"]
+ elif "neighbor_type" in peer and peer["neighbor_type"] == "link-local":
+ intf = topo[name]["links"][dest_link]["interface"]
+ ip_addr = get_frr_ipv6_linklocal(tgen, name, intf)
+ elif dest_link in nh_details["links"].keys():
+ try:
+ ip_addr = nh_details["links"][dest_link][addr_type].split("/")[0]
+ except KeyError:
+ intf = topo[name]["links"][dest_link]["interface"]
+ ip_addr = get_frr_ipv6_linklocal(tgen, name, intf)
+ if "delete" in peer and peer["delete"]:
+ neigh_cxt = "no neighbor {}".format(ip_addr)
+ config_data.append("{}".format(neigh_cxt))
+ return config_data
+ else:
+ neigh_cxt = "neighbor {}".format(ip_addr)
- neigh_cxt = "neighbor {}".format(ip_addr)
+ if "peer-group" in peer:
+ config_data.append(
+ "neighbor {} interface peer-group {}".format(
+ ip_addr, peer["peer-group"]
+ )
+ )
+
+ # Loopback interface
+ if "source_link" in peer:
+ if peer["source_link"] == "lo":
+ update_source = topo[router]["links"]["lo"][addr_type].split("/")[0]
+ else:
+ update_source = topo[router]["links"][peer["source_link"]][
+ "interface"
+ ]
+ if "peer-group" not in peer:
+ if "neighbor_type" in peer and peer["neighbor_type"] == "unnumbered":
+ config_data.append(
+ "{} interface remote-as {}".format(neigh_cxt, remote_as)
+ )
+ elif add_neigh:
+ config_data.append("{} remote-as {}".format(neigh_cxt, remote_as))
- if add_neigh:
- config_data.append("{} remote-as {}".format(neigh_cxt, remote_as))
if addr_type == "ipv6":
config_data.append("address-family ipv6 unicast")
config_data.append("{} activate".format(neigh_cxt))
+ if "neighbor_type" in peer and peer["neighbor_type"] == "link-local":
+ config_data.append(
+ "{} update-source {}".format(
+ neigh_cxt, nh_details["links"][dest_link]["peer-interface"]
+ )
+ )
+ config_data.append(
+ "{} interface {}".format(
+ neigh_cxt, nh_details["links"][dest_link]["peer-interface"]
+ )
+ )
+
disable_connected = peer.setdefault("disable_connected_check", False)
keep_alive = peer.setdefault("keepalivetimer", 3)
hold_down = peer.setdefault("holddowntimer", 10)
password = peer.setdefault("password", None)
no_password = peer.setdefault("no_password", None)
+ capability = peer.setdefault("capability", None)
max_hop_limit = peer.setdefault("ebgp_multihop", 1)
+
graceful_restart = peer.setdefault("graceful-restart", None)
graceful_restart_helper = peer.setdefault("graceful-restart-helper", None)
graceful_restart_disable = peer.setdefault("graceful-restart-disable", None)
+ if capability:
+ config_data.append("{} capability {}".format(neigh_cxt, capability))
if update_source:
config_data.append(
@@ -718,7 +827,6 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True):
config_data.append(
"{} timers {} {}".format(neigh_cxt, keep_alive, hold_down)
)
-
if graceful_restart:
config_data.append("{} graceful-restart".format(neigh_cxt))
elif graceful_restart == False:
@@ -768,7 +876,7 @@ def __create_bgp_unicast_address_family(
config_data = []
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
-
+ tgen = get_topogen()
bgp_data = input_dict["address_family"]
neigh_data = bgp_data[addr_type]["unicast"]["neighbor"]
@@ -784,16 +892,34 @@ def __create_bgp_unicast_address_family(
for destRouterLink, data in sorted(nh_details["links"].items()):
if "type" in data and data["type"] == "loopback":
if dest_link == destRouterLink:
- ip_addr = nh_details["links"][destRouterLink][
- addr_type
- ].split("/")[0]
+ ip_addr = (
+ nh_details["links"][destRouterLink][addr_type]
+ .split("/")[0]
+ .lower()
+ )
# Physical interface
else:
- if dest_link in nh_details["links"].keys():
-
- ip_addr = nh_details["links"][dest_link][addr_type].split("/")[0]
- if addr_type == "ipv4" and bgp_data["ipv6"]:
+ # check the neighbor type if un numbered nbr, use interface.
+ if "neighbor_type" in peer and peer["neighbor_type"] == "unnumbered":
+ ip_addr = nh_details["links"][dest_link]["peer-interface"]
+ elif "neighbor_type" in peer and peer["neighbor_type"] == "link-local":
+ intf = topo[peer_name]["links"][dest_link]["interface"]
+ ip_addr = get_frr_ipv6_linklocal(tgen, peer_name, intf)
+ elif dest_link in nh_details["links"].keys():
+ try:
+ ip_addr = nh_details["links"][dest_link][addr_type].split("/")[
+ 0
+ ]
+ except KeyError:
+ intf = topo[peer_name]["links"][dest_link]["interface"]
+ ip_addr = get_frr_ipv6_linklocal(tgen, peer_name, intf)
+ if (
+ addr_type == "ipv4"
+ and bgp_data["ipv6"]
+ and check_address_types("ipv6")
+ and "ipv6" in nh_details["links"][dest_link]
+ ):
deactivate = nh_details["links"][dest_link]["ipv6"].split("/")[
0
]
@@ -822,6 +948,7 @@ def __create_bgp_unicast_address_family(
prefix_lists = peer.setdefault("prefix_lists", {})
route_maps = peer.setdefault("route_maps", {})
no_send_community = peer.setdefault("no_send_community", None)
+ capability = peer.setdefault("capability", None)
allowas_in = peer.setdefault("allowas-in", None)
# next-hop-self
@@ -841,6 +968,11 @@ def __create_bgp_unicast_address_family(
"no {} send-community {}".format(neigh_cxt, no_send_community)
)
+ # capability_ext_nh
+ if capability and addr_type == "ipv6":
+ config_data.append("address-family ipv4 unicast")
+ config_data.append("{} activate".format(neigh_cxt))
+
if "allowas_in" in peer:
allow_as_in = peer["allowas_in"]
config_data.append("{} allowas-in {}".format(neigh_cxt, allow_as_in))
@@ -1067,33 +1199,37 @@ def verify_bgp_convergence(tgen, topo, dut=None, expected=True):
API will verify if BGP is converged with in the given time frame.
Running "show bgp summary json" command and verify bgp neighbor
state is established,
+
Parameters
----------
* `tgen`: topogen object
* `topo`: input json file data
* `dut`: device under test
- * `expected` : expected results from API, by-default True
Usage
-----
# To veriry is BGP is converged for all the routers used in
topology
results = verify_bgp_convergence(tgen, topo, dut="r1")
+
Returns
-------
errormsg(str) or True
"""
- logger.debug("Entering lib API: verify_bgp_convergence()")
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
for router, rnode in tgen.routers().items():
- if dut is not None and dut != router:
+ if 'bgp' not in topo['routers'][router]:
continue
- if "bgp" not in topo["routers"][router]:
+ if dut is not None and dut != router:
continue
logger.info("Verifying BGP Convergence on router %s:", router)
- show_bgp_json = run_frr_cmd(rnode, "show bgp vrf all summary json", isjson=True)
+ show_bgp_json = run_frr_cmd(rnode, "show bgp vrf all summary json",
+ isjson=True)
# Verifying output dictionary show_bgp_json is empty or not
if not bool(show_bgp_json):
errormsg = "BGP is not running"
@@ -1115,100 +1251,6 @@ def verify_bgp_convergence(tgen, topo, dut=None, expected=True):
# To find neighbor ip type
bgp_addr_type = bgp_data["address_family"]
- if "ipv4" in bgp_addr_type or "ipv6" in bgp_addr_type:
- for addr_type in bgp_addr_type.keys():
- if not check_address_types(addr_type):
- continue
- total_peer = 0
-
- bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
-
- for bgp_neighbor in bgp_neighbors:
- total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
-
- for addr_type in bgp_addr_type.keys():
- if not check_address_types(addr_type):
- continue
- bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
-
- no_of_peer = 0
- for bgp_neighbor, peer_data in bgp_neighbors.items():
- for dest_link in peer_data["dest_link"].keys():
- data = topo["routers"][bgp_neighbor]["links"]
- if dest_link in data:
- peer_details = peer_data["dest_link"][dest_link]
- # for link local neighbors
- if (
- "neighbor_type" in peer_details
- and peer_details["neighbor_type"] == "link-local"
- ):
- neighbor_ip = get_ipv6_linklocal_address(
- topo["routers"], bgp_neighbor, dest_link
- )
- elif "source_link" in peer_details:
- neighbor_ip = topo["routers"][bgp_neighbor][
- "links"
- ][peer_details["source_link"]][addr_type].split(
- "/"
- )[
- 0
- ]
- elif (
- "neighbor_type" in peer_details
- and peer_details["neighbor_type"] == "unnumbered"
- ):
- neighbor_ip = data[dest_link]["peer-interface"]
- else:
- neighbor_ip = data[dest_link][addr_type].split("/")[
- 0
- ]
- nh_state = None
-
- if addr_type == "ipv4":
- if "ipv4Unicast" in show_bgp_json[vrf]:
- ipv4_data = show_bgp_json[vrf]["ipv4Unicast"][
- "peers"
- ]
- nh_state = ipv4_data[neighbor_ip]["state"]
- else:
- if "ipv6Unicast" in show_bgp_json[vrf]:
- ipv6_data = show_bgp_json[vrf]["ipv6Unicast"][
- "peers"
- ]
- nh_state = ipv6_data[neighbor_ip]["state"]
- if nh_state == "Established":
- no_of_peer += 1
-
- if "l2vpn" in bgp_addr_type:
- if "neighbor" not in bgp_addr_type["l2vpn"]["evpn"]:
- if no_of_peer == total_peer:
- logger.info(
- "[DUT: %s] VRF: %s, BGP is Converged for %s address-family",
- router,
- vrf,
- addr_type,
- )
- else:
- errormsg = (
- "[DUT: %s] VRF: %s, BGP is not converged for %s address-family"
- % (router, vrf, addr_type)
- )
- return errormsg
- else:
- if no_of_peer == total_peer:
- logger.info(
- "[DUT: %s] VRF: %s, BGP is Converged for %s address-family",
- router,
- vrf,
- addr_type,
- )
- else:
- errormsg = (
- "[DUT: %s] VRF: %s, BGP is not converged for %s address-family"
- % (router, vrf, addr_type)
- )
- return errormsg
-
if "l2vpn" in bgp_addr_type:
total_evpn_peer = 0
@@ -1224,46 +1266,120 @@ def verify_bgp_convergence(tgen, topo, dut=None, expected=True):
data = topo["routers"][bgp_neighbor]["links"]
for dest_link in dest_link_dict.keys():
if dest_link in data:
- peer_details = peer_data[_addr_type][dest_link]
+ peer_details = \
+ peer_data[_addr_type][dest_link]
- neighbor_ip = data[dest_link][_addr_type].split("/")[0]
+ neighbor_ip = \
+ data[dest_link][_addr_type].split(
+ "/")[0]
nh_state = None
- if (
- "ipv4Unicast" in show_bgp_json[vrf]
- or "ipv6Unicast" in show_bgp_json[vrf]
- ):
- errormsg = (
- "[DUT: %s] VRF: %s, "
- "ipv4Unicast/ipv6Unicast"
- " address-family present"
- " under l2vpn" % (router, vrf)
- )
+ if "ipv4Unicast" in show_bgp_json[vrf] or \
+ "ipv6Unicast" in show_bgp_json[vrf]:
+ errormsg = ("[DUT: %s] VRF: %s, "
+ "ipv4Unicast/ipv6Unicast"
+ " address-family present"
+ " under l2vpn" % (router,
+ vrf))
return errormsg
- l2VpnEvpn_data = show_bgp_json[vrf]["l2VpnEvpn"][
- "peers"
- ]
- nh_state = l2VpnEvpn_data[neighbor_ip]["state"]
+ l2VpnEvpn_data = \
+ show_bgp_json[vrf]["l2VpnEvpn"][
+ "peers"]
+ nh_state = \
+ l2VpnEvpn_data[neighbor_ip]["state"]
if nh_state == "Established":
no_of_evpn_peer += 1
if no_of_evpn_peer == total_evpn_peer:
- logger.info(
- "[DUT: %s] VRF: %s, BGP is Converged for " "epvn peers",
- router,
- vrf,
- )
+ logger.info("[DUT: %s] VRF: %s, BGP is Converged for "
+ "epvn peers", router, vrf)
+ result = True
else:
- errormsg = (
- "[DUT: %s] VRF: %s, BGP is not converged "
- "for evpn peers" % (router, vrf)
- )
+ errormsg = ("[DUT: %s] VRF: %s, BGP is not converged "
+ "for evpn peers" % (router, vrf))
return errormsg
+ else:
+ total_peer = 0
+ for addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
- logger.debug("Exiting API: verify_bgp_convergence()")
- return True
+ bgp_neighbors = \
+ bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor in bgp_neighbors:
+ total_peer += \
+ len(bgp_neighbors[bgp_neighbor]["dest_link"])
+
+ no_of_peer = 0
+ for addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
+ bgp_neighbors = \
+ bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ for dest_link in peer_data["dest_link"].\
+ keys():
+ data = \
+ topo["routers"][bgp_neighbor]["links"]
+ if dest_link in data:
+ peer_details = \
+ peer_data['dest_link'][dest_link]
+ # for link local neighbors
+ if "neighbor_type" in peer_details and \
+ peer_details["neighbor_type"] == \
+ 'link-local':
+ intf = topo["routers"][bgp_neighbor][
+ "links"][dest_link]["interface"]
+ neighbor_ip = get_frr_ipv6_linklocal(
+ tgen, bgp_neighbor, intf)
+ elif "source_link" in peer_details:
+ neighbor_ip = \
+ topo["routers"][bgp_neighbor][
+ "links"][peer_details[
+ 'source_link']][
+ addr_type].\
+ split("/")[0]
+ elif "neighbor_type" in peer_details and \
+ peer_details["neighbor_type"] == \
+ 'unnumbered':
+ neighbor_ip = \
+ data[dest_link]["peer-interface"]
+ else:
+ neighbor_ip = \
+ data[dest_link][addr_type].split(
+ "/")[0]
+ nh_state = None
+ neighbor_ip = neighbor_ip.lower()
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json[vrf][
+ "ipv4Unicast"]["peers"]
+ nh_state = \
+ ipv4_data[neighbor_ip]["state"]
+ else:
+ ipv6_data = show_bgp_json[vrf][
+ "ipv6Unicast"]["peers"]
+ if neighbor_ip in ipv6_data:
+ nh_state = \
+ ipv6_data[neighbor_ip]["state"]
+
+ if nh_state == "Established":
+ no_of_peer += 1
+
+ if no_of_peer == total_peer and no_of_peer > 0:
+ logger.info("[DUT: %s] VRF: %s, BGP is Converged",
+ router, vrf)
+ result = True
+ else:
+ errormsg = ("[DUT: %s] VRF: %s, BGP is not converged"
+ % (router, vrf))
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
@retry(retry_timeout=16)
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index 9e60e523d3..6a02e50127 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -497,7 +497,7 @@ def reset_config_on_routers(tgen, routerName=None):
f.close()
run_cfg_file = "{}/{}/frr.sav".format(TMPDIR, rname)
init_cfg_file = "{}/{}/frr_json_initial.conf".format(TMPDIR, rname)
- command = "/usr/lib/frr/frr-reload.py --input {} --test {} > {}".format(
+ command = "/usr/lib/frr/frr-reload.py --test --test-reset --input {} {} > {}".format(
run_cfg_file, init_cfg_file, dname
)
result = call(command, shell=True, stderr=SUB_STDOUT, stdout=SUB_PIPE)
@@ -527,37 +527,9 @@ def reset_config_on_routers(tgen, routerName=None):
raise InvalidCLIError(out_data)
raise InvalidCLIError("Unknown error in %s", output)
- f = open(dname, "r")
delta = StringIO()
- delta.write("configure terminal\n")
- t_delta = f.read()
-
- # Don't disable debugs
- check_debug = True
-
- for line in t_delta.split("\n"):
- line = line.strip()
- if line == "Lines To Delete" or line == "===============" or not line:
- continue
-
- if line == "Lines To Add":
- check_debug = False
- continue
-
- if line == "============" or not line:
- continue
-
- # Leave debugs and log output alone
- if check_debug:
- if "debug" in line or "log file" in line:
- continue
-
- delta.write(line)
- delta.write("\n")
-
- f.close()
-
- delta.write("end\n")
+ with open(dname, "r") as f:
+ delta.write(f.read())
output = router.vtysh_multicmd(delta.getvalue(), pretty_output=False)
@@ -637,6 +609,7 @@ def load_config_to_router(tgen, routerName, save_bkup=False):
return True
+
def get_frr_ipv6_linklocal(tgen, router, intf=None, vrf=None):
"""
API to get the link local ipv6 address of a particular interface using
@@ -669,38 +642,48 @@ def get_frr_ipv6_linklocal(tgen, router, intf=None, vrf=None):
else:
cmd = "show interface"
- ifaces = router_list[router].run('vtysh -c "{}"'.format(cmd))
-
- # Fix newlines (make them all the same)
- ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
-
- interface = None
- ll_per_if_count = 0
- for line in ifaces:
- # Interface name
- m = re_search("Interface ([a-zA-Z0-9-]+) is", line)
- if m:
- interface = m.group(1).split(" ")[0]
- ll_per_if_count = 0
-
- # Interface ip
- m1 = re_search("inet6 (fe80[:a-fA-F0-9]+[/0-9]+)", line)
- if m1:
- local = m1.group(1)
- ll_per_if_count += 1
- if ll_per_if_count > 1:
- linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
- else:
- linklocal += [[interface, local]]
-
- if linklocal:
- if intf:
- return [_linklocal[1] for _linklocal in linklocal if _linklocal[0] == intf][
- 0
- ].split("/")[0]
- return linklocal
- else:
- errormsg = "Link local ip missing on router {}"
+ linklocal = []
+ if vrf:
+ cmd = "show interface vrf {}".format(vrf)
+ else:
+ cmd = "show interface"
+ for chk_ll in range(0, 60):
+ sleep(1/4)
+ ifaces = router_list[router].run('vtysh -c "{}"'.format(cmd))
+ # Fix newlines (make them all the same)
+ ifaces = ('\n'.join(ifaces.splitlines()) + '\n').splitlines()
+
+ interface = None
+ ll_per_if_count = 0
+ for line in ifaces:
+ # Interface name
+ m = re_search('Interface ([a-zA-Z0-9-]+) is', line)
+ if m:
+ interface = m.group(1).split(" ")[0]
+ ll_per_if_count = 0
+
+ # Interface ip
+ m1 = re_search('inet6 (fe80[:a-fA-F0-9]+[\/0-9]+)',
+ line)
+ if m1:
+ local = m1.group(1)
+ ll_per_if_count += 1
+ if ll_per_if_count > 1:
+ linklocal += [["%s-%s" %
+ (interface, ll_per_if_count), local]]
+ else:
+ linklocal += [[interface, local]]
+
+ try:
+ if linklocal:
+ if intf:
+ return [_linklocal[1] for _linklocal in linklocal if _linklocal[0]==intf][0].\
+ split("/")[0]
+ return linklocal
+ except IndexError:
+ continue
+
+ errormsg = "Link local ip missing on router {}".format(router)
return errormsg
@@ -1846,6 +1829,14 @@ def create_interfaces_cfg(tgen, topo, build=False):
else:
interface_data.append("ipv6 address {}".format(intf_addr))
+ # Wait for vrf interfaces to get link local address once they are up
+ if not destRouterLink == 'lo' and 'vrf' in topo[c_router][
+ 'links'][destRouterLink]:
+ vrf = topo[c_router]['links'][destRouterLink]['vrf']
+ intf = topo[c_router]['links'][destRouterLink]['interface']
+ ll = get_frr_ipv6_linklocal(tgen, c_router, intf=intf,
+ vrf = vrf)
+
if "ipv6-link-local" in data:
intf_addr = c_data["links"][destRouterLink]["ipv6-link-local"]
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
index ade5933504..b998878118 100644
--- a/tests/topotests/lib/topogen.py
+++ b/tests/topotests/lib/topogen.py
@@ -801,8 +801,8 @@ class TopoRouter(TopoGear):
try:
return json.loads(output)
- except ValueError:
- logger.warning("vtysh_cmd: failed to convert json output")
+ except ValueError as error:
+ logger.warning("vtysh_cmd: %s: failed to convert json output: %s: %s", self.name, str(output), str(error))
return {}
def vtysh_multicmd(self, commands, pretty_output=True, daemon=None):
diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py
index d1f60bfe0d..23dcced2bf 100644
--- a/tests/topotests/lib/topotest.py
+++ b/tests/topotests/lib/topotest.py
@@ -1152,6 +1152,18 @@ class Router(Node):
self.reportCores = True
self.version = None
+ self.ns_cmd = "sudo nsenter -m -n -t {} ".format(self.pid)
+ try:
+ # Allow escaping from running inside docker
+ cgroup = open("/proc/1/cgroup").read()
+ m = re.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup)
+ if m:
+ self.ns_cmd = "docker exec -it {} ".format(m.group(1)) + self.ns_cmd
+ except IOError:
+ pass
+ else:
+ logger.debug("CMD to enter {}: {}".format(self.name, self.ns_cmd))
+
def _config_frr(self, **params):
"Configure FRR binaries"
self.daemondir = params.get("frrdir")
@@ -1350,7 +1362,7 @@ class Router(Node):
term = topo_terminal if topo_terminal else "xterm"
makeTerm(self, title=title if title else cmd, term=term, cmd=cmd)
else:
- nscmd = "sudo nsenter -m -n -t {} {}".format(self.pid, cmd)
+ nscmd = self.ns_cmd + cmd
if "TMUX" in os.environ:
self.cmd("tmux select-layout main-horizontal")
wcmd = "tmux split-window -h"
@@ -1451,11 +1463,13 @@ class Router(Node):
def startRouterDaemons(self, daemons=None):
"Starts all FRR daemons for this router."
+ asan_abort = g_extra_config["asan_abort"]
gdb_breakpoints = g_extra_config["gdb_breakpoints"]
gdb_daemons = g_extra_config["gdb_daemons"]
gdb_routers = g_extra_config["gdb_routers"]
valgrind_extra = g_extra_config["valgrind_extra"]
valgrind_memleaks = g_extra_config["valgrind_memleaks"]
+ strace_daemons = g_extra_config["strace_daemons"]
bundle_data = ""
@@ -1482,7 +1496,6 @@ class Router(Node):
os.path.join(self.daemondir, "bgpd") + " -v"
).split()[2]
logger.info("{}: running version: {}".format(self.name, self.version))
-
# If `daemons` was specified then some upper API called us with
# specific daemons, otherwise just use our own configuration.
daemons_list = []
@@ -1506,13 +1519,20 @@ class Router(Node):
else:
binary = os.path.join(self.daemondir, daemon)
- cmdenv = "ASAN_OPTIONS=log_path={0}.asan".format(daemon)
+ cmdenv = "ASAN_OPTIONS="
+ if asan_abort:
+ cmdenv = "abort_on_error=1:"
+ cmdenv += "log_path={0}/{1}.{2}.asan ".format(self.logdir, self.name, daemon)
+
if valgrind_memleaks:
this_dir = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
supp_file = os.path.abspath(os.path.join(this_dir, "../../../tools/valgrind.supp"))
cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(daemon, self.logdir, self.name, supp_file)
if valgrind_extra:
cmdenv += "--gen-suppressions=all --expensive-definedness-checks=yes"
+ elif daemon in strace_daemons or "all" in strace_daemons:
+ cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(daemon, self.logdir, self.name)
+
cmdopt = "{} --log file:{}.log --log-level debug".format(
daemon_opts, daemon
)
diff --git a/tools/frr-reload.py b/tools/frr-reload.py
index eb8753fd08..9d41305ec3 100755
--- a/tools/frr-reload.py
+++ b/tools/frr-reload.py
@@ -2012,6 +2012,11 @@ if __name__ == "__main__":
parser.add_argument(
"--daemon", help="daemon for which want to replace the config", default=""
)
+ parser.add_argument(
+ "--test-reset",
+ action="store_true",
+ help="Used by topotest to not delete debug or log file commands",
+ )
args = parser.parse_args()
@@ -2125,7 +2130,7 @@ if __name__ == "__main__":
service_integrated_vtysh_config = False
break
- if not service_integrated_vtysh_config and not args.daemon:
+ if not args.test and not service_integrated_vtysh_config and not args.daemon:
log.error(
"'service integrated-vtysh-config' is not configured, this is required for 'service frr reload'"
)
@@ -2153,35 +2158,56 @@ if __name__ == "__main__":
running.load_from_show_running(args.daemon)
(lines_to_add, lines_to_del) = compare_context_objects(newconf, running)
- lines_to_configure = []
if lines_to_del:
- print("\nLines To Delete")
- print("===============")
+ if not args.test_reset:
+ print("\nLines To Delete")
+ print("===============")
for (ctx_keys, line) in lines_to_del:
if line == "!":
continue
- cmd = "\n".join(lines_to_config(ctx_keys, line, True))
- lines_to_configure.append(cmd)
+ nolines = lines_to_config(ctx_keys, line, True)
+
+ if args.test_reset:
+ # For topotests the original code stripped the lines, and ommitted blank lines
+ # after, do that here
+ nolines = [x.strip() for x in nolines]
+ # For topotests leave these lines in (don't delete them)
+ # [chopps: why is "log file" more special than other "log" commands?]
+ nolines = [x for x in nolines if "debug" not in x and "log file" not in x]
+ if not nolines:
+ continue
+
+ cmd = "\n".join(nolines)
print(cmd)
if lines_to_add:
- print("\nLines To Add")
- print("============")
+ if not args.test_reset:
+ print("\nLines To Add")
+ print("============")
for (ctx_keys, line) in lines_to_add:
if line == "!":
continue
- cmd = "\n".join(lines_to_config(ctx_keys, line, False))
- lines_to_configure.append(cmd)
+ lines = lines_to_config(ctx_keys, line, False)
+
+ if args.test_reset:
+ # For topotests the original code stripped the lines, and ommitted blank lines
+ # after, do that here
+ lines = [x.strip() for x in lines if x.strip()]
+ if not lines:
+ continue
+
+ cmd = "\n".join(lines)
print(cmd)
elif args.reload:
+ lines_to_configure = []
# We will not be able to do anything, go ahead and exit(1)
if not vtysh.is_config_available():
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index 8f2aa2fb09..4ef4bc6722 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -682,6 +682,8 @@ static int zsend_ipv4_nexthop_lookup_mrib(struct zserv *client,
stream_put_in_addr(s, &addr);
if (re) {
+ struct nexthop_group *nhg;
+
stream_putc(s, re->distance);
stream_putl(s, re->metric);
num = 0;
@@ -689,15 +691,11 @@ static int zsend_ipv4_nexthop_lookup_mrib(struct zserv *client,
nump = stream_get_endp(s);
/* reserve room for nexthop_num */
stream_putc(s, 0);
- /*
- * Only non-recursive routes are elegible to resolve the
- * nexthop we are looking up. Therefore, we will just iterate
- * over the top chain of nexthops.
- */
- for (nexthop = re->nhe->nhg.nexthop; nexthop;
- nexthop = nexthop->next)
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
+ nhg = rib_get_fib_nhg(re);
+ for (ALL_NEXTHOPS_PTR(nhg, nexthop)) {
+ if (rnh_nexthop_valid(re, nexthop))
num += zserv_encode_nexthop(s, nexthop);
+ }
/* store nexthop_num */
stream_putc_at(s, nump, num);
diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c
index af86263a16..46d5164127 100644
--- a/zebra/zebra_nhg.c
+++ b/zebra/zebra_nhg.c
@@ -453,8 +453,13 @@ static void *zebra_nhg_hash_alloc(void *arg)
/* Mark duplicate nexthops in a group at creation time. */
nexthop_group_mark_duplicates(&(nhe->nhg));
- /* Add the ifp now if it's not a group or recursive and has ifindex */
- if (nhe->nhg.nexthop && nhe->nhg.nexthop->ifindex) {
+ /*
+ * Add the ifp now if it's not a group or recursive and has ifindex.
+ *
+ * A proto-owned ID is always a group.
+ */
+ if (!PROTO_OWNED(nhe) && nhe->nhg.nexthop && !nhe->nhg.nexthop->next
+ && !nhe->nhg.nexthop->resolved && nhe->nhg.nexthop->ifindex) {
struct interface *ifp = NULL;
ifp = if_lookup_by_index(nhe->nhg.nexthop->ifindex,
diff --git a/zebra/zebra_rnh.c b/zebra/zebra_rnh.c
index 41d55c2e6c..017a4aae7f 100644
--- a/zebra/zebra_rnh.c
+++ b/zebra/zebra_rnh.c
@@ -598,8 +598,7 @@ static const int RNH_INVALID_NH_FLAGS = (NEXTHOP_FLAG_RECURSIVE |
NEXTHOP_FLAG_DUPLICATE |
NEXTHOP_FLAG_RNH_FILTERED);
-static bool rnh_nexthop_valid(const struct route_entry *re,
- const struct nexthop *nh)
+bool rnh_nexthop_valid(const struct route_entry *re, const struct nexthop *nh)
{
return (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED)
&& CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)
diff --git a/zebra/zebra_rnh.h b/zebra/zebra_rnh.h
index 4897a6af30..24ee6d0bd9 100644
--- a/zebra/zebra_rnh.h
+++ b/zebra/zebra_rnh.h
@@ -64,6 +64,9 @@ extern void zebra_print_rnh_table(vrf_id_t vrfid, afi_t afi, struct vty *vty,
extern int rnh_resolve_via_default(struct zebra_vrf *zvrf, int family);
+extern bool rnh_nexthop_valid(const struct route_entry *re,
+ const struct nexthop *nh);
+
/* UI control to avoid notifications if backup nexthop status changes */
void rnh_set_hide_backups(bool hide_p);
bool rnh_get_hide_backups(void);