summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore5
-rw-r--r--Makefile.am42
-rw-r--r--bgpd/bgp_attr.c24
-rw-r--r--bgpd/bgp_community.c3
-rw-r--r--bgpd/bgp_ecommunity.c3
-rw-r--r--bgpd/bgp_filter.c10
-rw-r--r--bgpd/bgp_lcommunity.c3
-rw-r--r--bgpd/bgp_nht.c3
-rw-r--r--bgpd/bgp_route.c12
-rw-r--r--bgpd/bgp_vty.c101
-rwxr-xr-xconfigure.ac3
-rw-r--r--doc/developer/building-frr-for-archlinux.rst2
-rw-r--r--doc/user/bgp.rst32
-rw-r--r--doc/user/overview.rst7
-rw-r--r--doc/user/zebra.rst18
-rw-r--r--grpc/subdir.am2
-rw-r--r--ldpd/l2vpn.c12
-rw-r--r--ldpd/lde.c5
-rw-r--r--ldpd/ldp_vty_exec.c4
-rw-r--r--ldpd/ldpd.h9
-rw-r--r--ldpd/logmsg.c22
-rw-r--r--ldpd/neighbor.c10
-rw-r--r--ldpd/subdir.am1
-rw-r--r--lib/hook.h2
-rw-r--r--lib/northbound_grpc.cpp1307
-rw-r--r--lib/subdir.am2
-rw-r--r--lib/vrf.c2
-rw-r--r--python/callgraph-dot.py476
-rw-r--r--python/makefile.py34
-rw-r--r--python/makevars.py60
-rw-r--r--qpb/subdir.am1
-rw-r--r--tests/topotests/bgp_gr_functionality_topo1/bgp_gr_topojson_topo1.json115
-rwxr-xr-xtests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py3495
-rw-r--r--tests/topotests/bgp_gr_functionality_topo2/bgp_gr_topojson_topo2.json334
-rwxr-xr-xtests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py3085
-rw-r--r--tests/topotests/ldp-topo1/r3/how_mpls_table.ref10
-rw-r--r--tests/topotests/ldp-topo1/r4/how_mpls_table.ref9
-rw-r--r--tests/topotests/lib/bgp.py1354
-rw-r--r--tests/topotests/lib/common_config.py298
-rw-r--r--tests/topotests/lib/topogen.py32
-rw-r--r--tests/topotests/lib/topotest.py114
-rw-r--r--tools/frr-llvm-cg.c649
-rwxr-xr-xtools/frr-reload.py19
-rwxr-xr-xtools/frr.in2
-rw-r--r--tools/frrcommon.sh.in2
-rw-r--r--tools/subdir.am12
-rw-r--r--yang/LICENSE22
-rw-r--r--yang/frr-bfdd.yang27
-rw-r--r--yang/frr-eigrpd.yang27
-rw-r--r--yang/frr-filter.yang28
-rw-r--r--yang/frr-igmp.yang27
-rw-r--r--yang/frr-interface.yang27
-rw-r--r--yang/frr-isisd.yang27
-rw-r--r--yang/frr-module-translator.yang27
-rw-r--r--yang/frr-nexthop.yang27
-rw-r--r--yang/frr-pim-rp.yang27
-rw-r--r--yang/frr-pim.yang27
-rw-r--r--yang/frr-ripd.yang27
-rw-r--r--yang/frr-ripngd.yang27
-rw-r--r--yang/frr-route-map.yang28
-rw-r--r--yang/frr-route-types.yang27
-rw-r--r--yang/frr-routing.yang27
-rw-r--r--yang/frr-staticd.yang27
-rw-r--r--yang/frr-test-module.yang28
-rw-r--r--yang/frr-vrf.yang27
-rw-r--r--yang/frr-vrrpd.yang27
-rw-r--r--yang/frr-zebra.yang64
-rw-r--r--zebra/connected.c2
-rw-r--r--zebra/if_netlink.c23
-rw-r--r--zebra/rib.h42
-rw-r--r--zebra/rt_netlink.c27
-rw-r--r--zebra/rt_socket.c21
-rw-r--r--zebra/rtadv.c4
-rw-r--r--zebra/rtadv.h4
-rw-r--r--zebra/zebra_dplane.c2
-rw-r--r--zebra/zebra_fpm.c80
-rw-r--r--zebra/zebra_fpm_netlink.c33
-rw-r--r--zebra/zebra_gr.c6
-rw-r--r--zebra/zebra_nb.c202
-rw-r--r--zebra/zebra_nb.h179
-rw-r--r--zebra/zebra_nb_config.c50
-rw-r--r--zebra/zebra_nb_state.c584
-rw-r--r--zebra/zebra_nhg.c15
-rw-r--r--zebra/zebra_pw.c152
-rw-r--r--zebra/zebra_rib.c44
-rw-r--r--zebra/zebra_rnh.c31
-rw-r--r--zebra/zebra_rnh.h14
-rw-r--r--zebra/zebra_router.c20
-rw-r--r--zebra/zebra_router.h3
-rw-r--r--zebra/zebra_vrf.h6
-rw-r--r--zebra/zebra_vty.c20
-rw-r--r--zebra/zserv.h2
92 files changed, 12536 insertions, 1379 deletions
diff --git a/.gitignore b/.gitignore
index 8c61aeb9c5..fbbb04b60c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -55,6 +55,10 @@
*.pb-c.c
*.pb.cc
*_clippy.c
+*.bc
+*.cg.json
+*.cg.dot
+*.cg.svg
### gcov outputs
@@ -103,3 +107,4 @@ compile_commands.json
refix
.vscode
.kitchen
+.emacs.desktop*
diff --git a/Makefile.am b/Makefile.am
index 1e3311fa7b..a959fd9e5a 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -89,9 +89,11 @@ clippy-only: Makefile lib/clippy config.h
#AUTODERP# endif
EXTRA_DIST =
+EXTRA_PROGRAMS =
BUILT_SOURCES =
CLEANFILES =
DISTCLEANFILES =
+SUFFIXES =
examplesdir = $(exampledir)
@@ -231,12 +233,48 @@ EXTRA_DIST += \
vrrpd/Makefile \
# end
-clean-local: clean-python
-.PHONY: clean-python
+AM_V_LLVM_BC = $(am__v_LLVM_BC_$(V))
+am__v_LLVM_BC_ = $(am__v_LLVM_BC_$(AM_DEFAULT_VERBOSITY))
+am__v_LLVM_BC_0 = @echo " LLVM.BC " $@;
+am__v_LLVM_BC_1 =
+
+AM_V_LLVM_LD = $(am__v_LLVM_LD_$(V))
+am__v_LLVM_LD_ = $(am__v_LLVM_LD_$(AM_DEFAULT_VERBOSITY))
+am__v_LLVM_LD_0 = @echo " LLVM.LD " $@;
+am__v_LLVM_LD_1 =
+
+SUFFIXES += .lo.bc .o.bc
+
+.o.o.bc:
+ $(AM_V_LLVM_BC)$(COMPILE) -emit-llvm -c -o $@ $(patsubst %.o,%.c,$<)
+.lo.lo.bc:
+ $(AM_V_LLVM_BC)$(COMPILE) -emit-llvm -c -o $@ $(patsubst %.lo,%.c,$<)
+
+%.cg.json: %.bc tools/frr-llvm-cg
+ tools/frr-llvm-cg -o $@ $<
+%.cg.dot: %.cg.json
+ $(PYTHON) $(top_srcdir)/python/callgraph-dot.py $< $@
+%.cg.svg: %.cg.dot
+ @echo if the following command fails, you need to install graphviz.
+ @echo also, the output is nondeterministic. run it multiple times and use the nicest output.
+ @echo tuning parameters may yield nicer looking graphs as well.
+ fdp -GK=0.7 -Gstart=42231337 -Gmaxiter=2000 -Elen=2 -Gnodesep=1.5 -Tsvg -o$@ $<
+# don't delete intermediaries
+.PRECIOUS: %.cg.json %.cg.dot
+
+# <lib>.la.bc, <lib>.a.bc and <daemon>.bc targets are generated by
+# python/makefile.py
+LLVM_LINK = llvm-link-$(llvm_version)
+
+clean-local: clean-python clean-llvm-bitcode
+.PHONY: clean-python clean-llvm-bitcode
clean-python:
find . -name __pycache__ -o -name .pytest_cache | xargs rm -rf
find . -name "*.pyc" -o -name "*_clippy.c" | xargs rm -f
+clean-llvm-bitcode:
+ find . -name "*.bc" -o -name "*.cg.json" -o -name "*.cg.dot" -o -name "*.cg.svg" | xargs rm -f
+
redistclean:
$(MAKE) distclean CONFIG_CLEAN_FILES="$(filter-out $(EXTRA_DIST), $(CONFIG_CLEAN_FILES))"
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index b7e2f45195..d8566fed9f 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -1405,9 +1405,10 @@ static int bgp_attr_aspath(struct bgp_attr_parser_args *args)
/* Codification of AS 0 Processing */
if (aspath_check_as_zero(attr->aspath)) {
- flog_err(EC_BGP_ATTR_MAL_AS_PATH,
- "Malformed AS path, contains BGP_AS_ZERO(0) from %s",
- peer->host);
+ flog_err(
+ EC_BGP_ATTR_MAL_AS_PATH,
+ "Malformed AS path, AS number is 0 in the path from %s",
+ peer->host);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_MAL_AS_PATH,
0);
}
@@ -1485,9 +1486,10 @@ static int bgp_attr_as4_path(struct bgp_attr_parser_args *args,
/* Codification of AS 0 Processing */
if (aspath_check_as_zero(*as4_path)) {
- flog_err(EC_BGP_ATTR_MAL_AS_PATH,
- "Malformed AS4 path, contains BGP_AS_ZERO(0) from %s",
- peer->host);
+ flog_err(
+ EC_BGP_ATTR_MAL_AS_PATH,
+ "Malformed AS path, AS number is 0 in the path from %s",
+ peer->host);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_MAL_AS_PATH,
0);
}
@@ -1667,13 +1669,10 @@ static int bgp_attr_aggregator(struct bgp_attr_parser_args *args)
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR);
/* Codification of AS 0 Processing */
- if (aggregator_as == BGP_AS_ZERO) {
+ if (aggregator_as == BGP_AS_ZERO)
flog_err(EC_BGP_ATTR_LEN,
"AGGREGATOR AS number is 0 for aspath: %s",
aspath_print(attr->aspath));
- return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_MAL_AS_PATH,
- args->total);
- }
return BGP_ATTR_PARSE_PROCEED;
}
@@ -1703,13 +1702,10 @@ bgp_attr_as4_aggregator(struct bgp_attr_parser_args *args,
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR);
/* Codification of AS 0 Processing */
- if (aggregator_as == BGP_AS_ZERO) {
+ if (aggregator_as == BGP_AS_ZERO)
flog_err(EC_BGP_ATTR_LEN,
"AS4_AGGREGATOR AS number is 0 for aspath: %s",
aspath_print(attr->aspath));
- return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_MAL_AS_PATH,
- 0);
- }
return BGP_ATTR_PARSE_PROCEED;
}
diff --git a/bgpd/bgp_community.c b/bgpd/bgp_community.c
index 30de84c878..0d60fbf479 100644
--- a/bgpd/bgp_community.c
+++ b/bgpd/bgp_community.c
@@ -40,6 +40,9 @@ static struct community *community_new(void)
/* Free communities value. */
void community_free(struct community **com)
{
+ if (!(*com))
+ return;
+
XFREE(MTYPE_COMMUNITY_VAL, (*com)->val);
XFREE(MTYPE_COMMUNITY_STR, (*com)->str);
diff --git a/bgpd/bgp_ecommunity.c b/bgpd/bgp_ecommunity.c
index 062a6477fa..d13da74b04 100644
--- a/bgpd/bgp_ecommunity.c
+++ b/bgpd/bgp_ecommunity.c
@@ -59,6 +59,9 @@ void ecommunity_strfree(char **s)
/* Allocate ecommunities. */
void ecommunity_free(struct ecommunity **ecom)
{
+ if (!(*ecom))
+ return;
+
XFREE(MTYPE_ECOMMUNITY_VAL, (*ecom)->val);
XFREE(MTYPE_ECOMMUNITY_STR, (*ecom)->str);
XFREE(MTYPE_ECOMMUNITY, *ecom);
diff --git a/bgpd/bgp_filter.c b/bgpd/bgp_filter.c
index 3e26263df1..0308a30d54 100644
--- a/bgpd/bgp_filter.c
+++ b/bgpd/bgp_filter.c
@@ -548,15 +548,6 @@ DEFUN (no_as_path_all,
return CMD_SUCCESS;
}
-ALIAS (no_as_path_all,
- no_ip_as_path_all_cmd,
- "no ip as-path access-list WORD",
- NO_STR
- IP_STR
- "BGP autonomous system path filter\n"
- "Specify an access list name\n"
- "Regular expression access list name\n")
-
static void as_list_show(struct vty *vty, struct as_list *aslist)
{
struct as_filter *asfilter;
@@ -683,7 +674,6 @@ void bgp_filter_init(void)
install_element(CONFIG_NODE, &bgp_as_path_cmd);
install_element(CONFIG_NODE, &no_bgp_as_path_cmd);
install_element(CONFIG_NODE, &no_bgp_as_path_all_cmd);
- install_element(CONFIG_NODE, &no_ip_as_path_all_cmd);
install_element(VIEW_NODE, &show_bgp_as_path_access_list_cmd);
install_element(VIEW_NODE, &show_ip_as_path_access_list_cmd);
diff --git a/bgpd/bgp_lcommunity.c b/bgpd/bgp_lcommunity.c
index f47ae91663..5900fcf862 100644
--- a/bgpd/bgp_lcommunity.c
+++ b/bgpd/bgp_lcommunity.c
@@ -44,6 +44,9 @@ static struct lcommunity *lcommunity_new(void)
/* Allocate lcommunities. */
void lcommunity_free(struct lcommunity **lcom)
{
+ if (!(*lcom))
+ return;
+
XFREE(MTYPE_LCOMMUNITY_VAL, (*lcom)->val);
XFREE(MTYPE_LCOMMUNITY_STR, (*lcom)->str);
if ((*lcom)->json)
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index cc208a8190..fced2fbcab 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -962,6 +962,9 @@ void bgp_nht_dereg_enhe_cap_intfs(struct peer *peer)
for (nhop = bnc->nexthop; nhop; nhop = nhop->next) {
ifp = if_lookup_by_index(nhop->ifindex, nhop->vrf_id);
+ if (!ifp)
+ continue;
+
zclient_send_interface_radv_req(zclient, nhop->vrf_id, ifp, 0,
0);
}
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index 78f077eaca..e8151d94ed 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -10567,8 +10567,8 @@ DEFUN(show_ip_bgp_statistics_all, show_ip_bgp_statistics_all_cmd,
{
bool uj = use_json(argc, argv);
struct bgp *bgp = NULL;
- safi_t safi;
- afi_t afi;
+ safi_t safi = SAFI_UNICAST;
+ afi_t afi = AFI_IP6;
int idx = 0;
struct json_object *json_all = NULL;
struct json_object *json_afi_safi = NULL;
@@ -10626,8 +10626,8 @@ DEFUN (show_ip_bgp_l2vpn_evpn_statistics,
"BGP RIB advertisement statistics\n"
JSON_STR)
{
- afi_t afi;
- safi_t safi;
+ afi_t afi = AFI_IP6;
+ safi_t safi = SAFI_UNICAST;
struct bgp *bgp = NULL;
int idx = 0, ret;
bool uj = use_json(argc, argv);
@@ -10666,8 +10666,8 @@ DEFUN(show_ip_bgp_afi_safi_statistics, show_ip_bgp_afi_safi_statistics_cmd,
BGP_SAFI_WITH_LABEL_HELP_STR
"BGP RIB advertisement statistics\n" JSON_STR)
{
- afi_t afi;
- safi_t safi;
+ afi_t afi = AFI_IP6;
+ safi_t safi = SAFI_UNICAST;
struct bgp *bgp = NULL;
int idx = 0, ret;
bool uj = use_json(argc, argv);
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index 4f36073e9a..9890a3f071 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -2987,8 +2987,7 @@ DEFUN (no_bgp_bestpath_med,
/* "bgp bestpath bandwidth" configuration. */
DEFPY (bgp_bestpath_bw,
bgp_bestpath_bw_cmd,
- "[no$no] bgp bestpath bandwidth [<ignore|skip-missing|default-weight-for-missing>$bw_cfg]",
- NO_STR
+ "bgp bestpath bandwidth <ignore|skip-missing|default-weight-for-missing>$bw_cfg",
"BGP specific commands\n"
"Change the default bestpath selection\n"
"Link Bandwidth attribute\n"
@@ -3000,22 +2999,18 @@ DEFPY (bgp_bestpath_bw,
afi_t afi;
safi_t safi;
- if (no) {
- bgp->lb_handling = BGP_LINK_BW_ECMP;
- } else {
- if (!bw_cfg) {
- vty_out(vty, "%% Bandwidth configuration must be specified\n");
- return CMD_ERR_INCOMPLETE;
- }
- if (!strcmp(bw_cfg, "ignore"))
- bgp->lb_handling = BGP_LINK_BW_IGNORE_BW;
- else if (!strcmp(bw_cfg, "skip-missing"))
- bgp->lb_handling = BGP_LINK_BW_SKIP_MISSING;
- else if (!strcmp(bw_cfg, "default-weight-for-missing"))
- bgp->lb_handling = BGP_LINK_BW_DEFWT_4_MISSING;
- else
- return CMD_ERR_NO_MATCH;
+ if (!bw_cfg) {
+ vty_out(vty, "%% Bandwidth configuration must be specified\n");
+ return CMD_ERR_INCOMPLETE;
}
+ if (!strcmp(bw_cfg, "ignore"))
+ bgp->lb_handling = BGP_LINK_BW_IGNORE_BW;
+ else if (!strcmp(bw_cfg, "skip-missing"))
+ bgp->lb_handling = BGP_LINK_BW_SKIP_MISSING;
+ else if (!strcmp(bw_cfg, "default-weight-for-missing"))
+ bgp->lb_handling = BGP_LINK_BW_DEFWT_4_MISSING;
+ else
+ return CMD_ERR_NO_MATCH;
/* This config is used in route install, so redo that. */
FOREACH_AFI_SAFI (afi, safi) {
@@ -3027,6 +3022,32 @@ DEFPY (bgp_bestpath_bw,
return CMD_SUCCESS;
}
+DEFPY (no_bgp_bestpath_bw,
+ no_bgp_bestpath_bw_cmd,
+ "no bgp bestpath bandwidth [<ignore|skip-missing|default-weight-for-missing>$bw_cfg]",
+ NO_STR
+ "BGP specific commands\n"
+ "Change the default bestpath selection\n"
+ "Link Bandwidth attribute\n"
+ "Ignore link bandwidth (i.e., do regular ECMP, not weighted)\n"
+ "Ignore paths without link bandwidth for ECMP (if other paths have it)\n"
+ "Assign a low default weight (value 1) to paths not having link bandwidth\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ afi_t afi;
+ safi_t safi;
+
+ bgp->lb_handling = BGP_LINK_BW_ECMP;
+
+ /* This config is used in route install, so redo that. */
+ FOREACH_AFI_SAFI (afi, safi) {
+ if (!bgp_fibupd_safi(safi))
+ continue;
+ bgp_zebra_announce_table(bgp, afi, safi);
+ }
+ return CMD_SUCCESS;
+}
+
/* "no bgp default ipv4-unicast". */
DEFUN (no_bgp_default_ipv4_unicast,
no_bgp_default_ipv4_unicast_cmd,
@@ -8766,6 +8787,7 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
json_object *json_peer = NULL;
json_object *json_peers = NULL;
struct peer_af *paf;
+ struct bgp_filter *filter;
/* labeled-unicast routes are installed in the unicast table so in order
* to
@@ -9065,7 +9087,8 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
}
}
- paf = peer_af_find(peer, afi, pfx_rcd_safi);
+ paf = peer_af_find(peer, afi, safi);
+ filter = &peer->filter[afi][safi];
count++;
/* Works for both failed & successful cases */
@@ -9208,18 +9231,39 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
BGP_UPTIME_LEN, 0, NULL));
if (peer->status == Established) {
- if (peer->afc_recv[afi][safi])
- vty_out(vty, " %12" PRIu32,
- peer->pcount
- [afi]
- [pfx_rcd_safi]);
- else
+ if (peer->afc_recv[afi][safi]) {
+ if (CHECK_FLAG(
+ bgp->flags,
+ BGP_FLAG_EBGP_REQUIRES_POLICY)
+ && !bgp_inbound_policy_exists(
+ peer, filter))
+ vty_out(vty, " %12s",
+ "(Policy)");
+ else
+ vty_out(vty,
+ " %12" PRIu32,
+ peer->pcount
+ [afi]
+ [pfx_rcd_safi]);
+ } else {
vty_out(vty, " NoNeg");
+ }
- if (paf && PAF_SUBGRP(paf))
- vty_out(vty, " %8" PRIu32,
- (PAF_SUBGRP(paf))
- ->scount);
+ if (paf && PAF_SUBGRP(paf)) {
+ if (CHECK_FLAG(
+ bgp->flags,
+ BGP_FLAG_EBGP_REQUIRES_POLICY)
+ && !bgp_outbound_policy_exists(
+ peer, filter))
+ vty_out(vty, " %8s",
+ "(Policy)");
+ else
+ vty_out(vty,
+ " %8" PRIu32,
+ (PAF_SUBGRP(
+ paf))
+ ->scount);
+ }
} else {
if (CHECK_FLAG(peer->flags, PEER_FLAG_SHUTDOWN))
vty_out(vty, " Idle (Admin)");
@@ -15734,6 +15778,7 @@ void bgp_vty_init(void)
/* "bgp bestpath bandwidth" commands */
install_element(BGP_NODE, &bgp_bestpath_bw_cmd);
+ install_element(BGP_NODE, &no_bgp_bestpath_bw_cmd);
/* "no bgp default ipv4-unicast" commands. */
install_element(BGP_NODE, &no_bgp_default_ipv4_unicast_cmd);
diff --git a/configure.ac b/configure.ac
index e1e23e224d..d4c652c6e5 100755
--- a/configure.ac
+++ b/configure.ac
@@ -7,7 +7,7 @@
##
AC_PREREQ([2.60])
-AC_INIT([frr], [7.4-dev], [https://github.com/frrouting/frr/issues])
+AC_INIT([frr], [7.5-dev], [https://github.com/frrouting/frr/issues])
PACKAGE_URL="https://frrouting.org/"
AC_SUBST([PACKAGE_URL])
PACKAGE_FULLNAME="FRRouting"
@@ -813,6 +813,7 @@ AC_SUBST([enable_vty_group])
enable_configfile_mask=${enable_configfile_mask:-0600}
AC_DEFINE_UNQUOTED([CONFIGFILE_MASK], [${enable_configfile_mask}], [Mask for config files])
+AC_SUBST([enable_configfile_mask])
enable_logfile_mask=${enable_logfile_mask:-0600}
AC_DEFINE_UNQUOTED([LOGFILE_MASK], [${enable_logfile_mask}], [Mask for log files])
diff --git a/doc/developer/building-frr-for-archlinux.rst b/doc/developer/building-frr-for-archlinux.rst
index 7ede35ad9c..f62add5963 100644
--- a/doc/developer/building-frr-for-archlinux.rst
+++ b/doc/developer/building-frr-for-archlinux.rst
@@ -67,7 +67,7 @@ Some sysctls need to be changed in order to enable IPv4/IPv6 forwarding and
MPLS (if supported by your platform). If your platform does not support MPLS,
skip the MPLS related configuration in this section.
-Edit :file:`/etc/sysctl.conf`[*Create the file if it doesn't exist*] and
+Edit :file:`/etc/sysctl.conf` [*Create the file if it doesn't exist*] and
append the following values (ignore the other settings):
::
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index 8020aeeae3..c056b39889 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -420,6 +420,23 @@ Require policy on EBGP
This is enabled by default.
+ When the incoming or outgoing filter is missing you will see
+ "(Policy)" sign under ``show bgp summary``:
+
+ .. code-block:: frr
+
+ exit1# show bgp summary
+
+ IPv4 Unicast Summary:
+ BGP router identifier 10.10.10.1, local AS number 65001 vrf-id 0
+ BGP table version 4
+ RIB entries 7, using 1344 bytes of memory
+ Peers 2, using 43 KiB of memory
+
+ Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt
+ 192.168.0.2 4 65002 8 10 0 0 0 00:03:09 5 (Policy)
+ fe80:1::2222 4 65002 9 11 0 0 0 00:03:09 (Policy) (Policy)
+
Reject routes with AS_SET or AS_CONFED_SET types
------------------------------------------------
@@ -1489,6 +1506,17 @@ AS path access list is user defined AS path.
.. index:: no bgp as-path access-list WORD permit|deny LINE
.. clicmd:: no bgp as-path access-list WORD permit|deny LINE
+.. _bgp-bogon-filter-example:
+
+Bogon ASN filter policy configuration example
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: frr
+
+ bgp as-path access-list 99 permit _0_
+ bgp as-path access-list 99 permit _23456_
+ bgp as-path access-list 99 permit _1310[0-6][0-9]_|_13107[0-1]_
+
.. _bgp-using-as-path-in-route-map:
Using AS Path in Route Map
@@ -3013,8 +3041,8 @@ certainly contains silly mistakes, if not serious flaws.
ip prefix-list pl-peer2-network permit 192.168.2.0/24
ip prefix-list pl-peer2-network permit 172.16.1/24
!
- ip as-path access-list asp-own-as permit ^$
- ip as-path access-list asp-own-as permit _64512_
+ bgp as-path access-list asp-own-as permit ^$
+ bgp as-path access-list asp-own-as permit _64512_
!
! #################################################################
! Match communities we provide actions for, on routes receives from
diff --git a/doc/user/overview.rst b/doc/user/overview.rst
index cf8cc44097..64a54d9a7d 100644
--- a/doc/user/overview.rst
+++ b/doc/user/overview.rst
@@ -395,7 +395,14 @@ MPLS
- :rfc:`7552`
:t:`Updates to LDP for IPv6, R. Asati, C. Pignataro, K. Raza, V. Manral, and R. Papneja. June 2015.`
+VRRP
+----
+- :rfc:`3768`
+ :t:`Virtual Router Redundancy Protocol (VRRP). R. Hinden. April 2004.`
+- :rfc:`5798`
+ :t:`Virtual Router Redundancy Protocol (VRRP) Version 3 for IPv4 and IPv6. S. Nadas. June 2000.`
+
SNMP
----
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index 9dfd08f733..f3b4ca7d03 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -708,6 +708,24 @@ that sets the preferred source address, and applies the route-map to all
ip protocol rip route-map RM1
+IPv6 example for OSPFv3.
+
+.. code-block:: frr
+
+ ipv6 prefix-list ANY seq 10 permit any
+ route-map RM6 permit 10
+ match ipv6 address prefix-list ANY
+ set src 2001:db8:425:1000::3
+
+ ipv6 protocol ospf6 route-map RM6
+
+
+.. note::
+
+ For both IPv4 and IPv6, the IP address has to exist at the point the
+ route-map is created. Be wary of race conditions if the interface is
+ not created at startup. On Debian, FRR might start before ifupdown
+ completes. Consider a reboot test.
.. _zebra-fib-push-interface:
diff --git a/grpc/subdir.am b/grpc/subdir.am
index 048e12a024..045848aee7 100644
--- a/grpc/subdir.am
+++ b/grpc/subdir.am
@@ -26,6 +26,8 @@ am__v_PROTOC_ = $(am__v_PROTOC_$(AM_DEFAULT_VERBOSITY))
am__v_PROTOC_0 = @echo " PROTOC" $@;
am__v_PROTOC_1 =
+SUFFIXES += .pb.h .pb.cc .grpc.pb.cc
+
.proto.pb.cc:
$(AM_V_PROTOC)$(PROTOC) -I$(top_srcdir) --cpp_out=$(top_srcdir) $(top_srcdir)/$^
.proto.grpc.pb.cc:
diff --git a/ldpd/l2vpn.c b/ldpd/l2vpn.c
index b234e3ebe3..4a944fa019 100644
--- a/ldpd/l2vpn.c
+++ b/ldpd/l2vpn.c
@@ -303,6 +303,7 @@ l2vpn_pw_ok(struct l2vpn_pw *pw, struct fec_nh *fnh)
if (fnh->remote_label == NO_LABEL) {
log_warnx("%s: pseudowire %s: no remote label", __func__,
pw->ifname);
+ pw->reason = F_PW_NO_REMOTE_LABEL;
return (0);
}
@@ -310,6 +311,7 @@ l2vpn_pw_ok(struct l2vpn_pw *pw, struct fec_nh *fnh)
if (pw->l2vpn->mtu != pw->remote_mtu) {
log_warnx("%s: pseudowire %s: MTU mismatch detected", __func__,
pw->ifname);
+ pw->reason = F_PW_MTU_MISMATCH;
return (0);
}
@@ -318,9 +320,11 @@ l2vpn_pw_ok(struct l2vpn_pw *pw, struct fec_nh *fnh)
pw->remote_status != PW_FORWARDING) {
log_warnx("%s: pseudowire %s: remote end is down", __func__,
pw->ifname);
+ pw->reason = F_PW_REMOTE_NOT_FWD;
return (0);
}
+ pw->reason = F_PW_NO_ERR;
return (1);
}
@@ -517,10 +521,13 @@ l2vpn_pw_status_update(struct zapi_pw_status *zpw)
return (1);
}
- if (zpw->status == PW_STATUS_UP)
+ if (zpw->status == PW_STATUS_UP) {
local_status = PW_FORWARDING;
- else
+ pw->reason = F_PW_NO_ERR;
+ } else {
local_status = PW_NOT_FORWARDING;
+ pw->reason = F_PW_LOCAL_NOT_FWD;
+ }
/* local status didn't change */
if (pw->local_status == local_status)
@@ -604,6 +611,7 @@ l2vpn_binding_ctl(pid_t pid)
pwctl.local_ifmtu = pw->l2vpn->mtu;
pwctl.local_cword = (pw->flags & F_PW_CWORD_CONF) ?
1 : 0;
+ pwctl.reason = pw->reason;
} else
pwctl.local_label = NO_LABEL;
diff --git a/ldpd/lde.c b/ldpd/lde.c
index 7d1df158a5..3220278960 100644
--- a/ldpd/lde.c
+++ b/ldpd/lde.c
@@ -751,7 +751,6 @@ lde_update_label(struct fec_node *fn)
return (MPLS_LABEL_IMPLICIT_NULL);
return MPLS_LABEL_IPV6_EXPLICIT_NULL;
default:
- fatalx("lde_update_label: unexpected fec type");
break;
}
}
@@ -1421,8 +1420,10 @@ lde_nbr_del(struct lde_nbr *ln)
if (f->u.pwid.lsr_id.s_addr != ln->id.s_addr)
continue;
pw = (struct l2vpn_pw *) fn->data;
- if (pw)
+ if (pw) {
+ pw->reason = F_PW_NO_REMOTE_LABEL;
l2vpn_pw_reset(pw);
+ }
break;
default:
break;
diff --git a/ldpd/ldp_vty_exec.c b/ldpd/ldp_vty_exec.c
index d317da7b20..d74017c7e2 100644
--- a/ldpd/ldp_vty_exec.c
+++ b/ldpd/ldp_vty_exec.c
@@ -1256,6 +1256,8 @@ show_l2vpn_binding_msg(struct vty *vty, struct imsg *imsg,
"GroupID: %u\n", "", pw->local_cword,
pw_type_name(pw->type),pw->local_gid);
vty_out (vty, "%-8sMTU: %u\n", "",pw->local_ifmtu);
+ vty_out (vty, "%-8sLast failure: %s\n", "",
+ pw_error_code(pw->reason));
} else
vty_out (vty," Local Label: unassigned\n");
@@ -1309,6 +1311,8 @@ show_l2vpn_binding_msg_json(struct imsg *imsg, struct show_params *params,
pw->local_gid);
json_object_int_add(json_pw, "localIfMtu",
pw->local_ifmtu);
+ json_object_string_add(json_pw, "lastFailureReason",
+ pw_error_code(pw->reason));
} else
json_object_string_add(json_pw, "localLabel",
"unassigned");
diff --git a/ldpd/ldpd.h b/ldpd/ldpd.h
index 606fb372bb..c1bcc56c44 100644
--- a/ldpd/ldpd.h
+++ b/ldpd/ldpd.h
@@ -422,6 +422,7 @@ struct l2vpn_pw {
uint32_t local_status;
uint32_t remote_status;
uint8_t flags;
+ uint8_t reason;
QOBJ_FIELDS
};
RB_HEAD(l2vpn_pw_head, l2vpn_pw);
@@ -433,6 +434,12 @@ DECLARE_QOBJ_TYPE(l2vpn_pw)
#define F_PW_CWORD 0x08 /* control word negotiated */
#define F_PW_STATIC_NBR_ADDR 0x10 /* static neighbor address configured */
+#define F_PW_NO_ERR 0x00 /* no error reported */
+#define F_PW_LOCAL_NOT_FWD 0x01 /* locally can't forward over PW */
+#define F_PW_REMOTE_NOT_FWD 0x02 /* remote end of PW reported fwd error*/
+#define F_PW_NO_REMOTE_LABEL 0x03 /* have not recvd label from peer */
+#define F_PW_MTU_MISMATCH 0x04 /* mtu mismatch between peers */
+
struct l2vpn {
RB_ENTRY(l2vpn) entry;
char name[L2VPN_NAME_LEN];
@@ -662,6 +669,7 @@ struct ctl_pw {
uint16_t remote_ifmtu;
uint8_t remote_cword;
uint32_t status;
+ uint8_t reason;
};
extern struct ldpd_conf *ldpd_conf, *vty_conf;
@@ -808,6 +816,7 @@ const char *if_type_name(enum iface_type);
const char *msg_name(uint16_t);
const char *status_code_name(uint32_t);
const char *pw_type_name(uint16_t);
+const char *pw_error_code(uint8_t);
/* quagga */
extern struct thread_master *master;
diff --git a/ldpd/logmsg.c b/ldpd/logmsg.c
index 2c9fbf0dae..6427d0e13b 100644
--- a/ldpd/logmsg.c
+++ b/ldpd/logmsg.c
@@ -485,3 +485,25 @@ pw_type_name(uint16_t pw_type)
return (buf);
}
}
+
+const char *
+pw_error_code(uint8_t status)
+{
+ static char buf[16];
+
+ switch (status) {
+ case F_PW_NO_ERR:
+ return ("No Error");
+ case F_PW_LOCAL_NOT_FWD:
+ return ("local not forwarding");
+ case F_PW_REMOTE_NOT_FWD:
+ return ("remote not forwarding");
+ case F_PW_NO_REMOTE_LABEL:
+ return ("no remote label");
+ case F_PW_MTU_MISMATCH:
+ return ("mtu mismatch between peers");
+ default:
+ snprintf(buf, sizeof(buf), "[%0x]", status);
+ return (buf);
+ }
+}
diff --git a/ldpd/neighbor.c b/ldpd/neighbor.c
index ae51490c07..1aa53151e6 100644
--- a/ldpd/neighbor.c
+++ b/ldpd/neighbor.c
@@ -619,6 +619,16 @@ nbr_establish_connection(struct nbr *nbr)
#endif
}
+ if (nbr->af == AF_INET) {
+ if (sock_set_ipv4_tos(nbr->fd, IPTOS_PREC_INTERNETCONTROL) == -1)
+ log_warn("%s: lsr-id %s, sock_set_ipv4_tos error",
+ __func__, inet_ntoa(nbr->id));
+ } else if (nbr->af == AF_INET6) {
+ if (sock_set_ipv6_dscp(nbr->fd, IPTOS_PREC_INTERNETCONTROL) == -1)
+ log_warn("%s: lsr-id %s, sock_set_ipv6_dscp error",
+ __func__, inet_ntoa(nbr->id));
+ }
+
addr2sa(nbr->af, &nbr->laddr, 0, &local_su);
addr2sa(nbr->af, &nbr->raddr, LDP_PORT, &remote_su);
if (nbr->af == AF_INET6 && nbr->raddr_scope)
diff --git a/ldpd/subdir.am b/ldpd/subdir.am
index 09936e5c28..0b38c37872 100644
--- a/ldpd/subdir.am
+++ b/ldpd/subdir.am
@@ -28,7 +28,6 @@ ldpd_libldp_a_SOURCES = \
ldpd/ldp_vty_conf.c \
ldpd/ldp_vty_exec.c \
ldpd/ldp_zebra.c \
- ldpd/ldpd.c \
ldpd/ldpe.c \
ldpd/log.c \
ldpd/logmsg.c \
diff --git a/lib/hook.h b/lib/hook.h
index 3823cebe6a..bef5351e90 100644
--- a/lib/hook.h
+++ b/lib/hook.h
@@ -145,7 +145,7 @@ extern void _hook_register(struct hook *hook, struct hookent *stackent,
*/
#define _hook_reg_svar(hook, funcptr, arg, has_arg, module, funcname, prio) \
do { \
- static struct hookent stack_hookent = { .ent_on_heap = 0, }; \
+ static struct hookent stack_hookent = {}; \
_hook_register(hook, &stack_hookent, funcptr, arg, has_arg, \
module, funcname, prio); \
} while (0)
diff --git a/lib/northbound_grpc.cpp b/lib/northbound_grpc.cpp
index 66bf05c1ab..2962a977eb 100644
--- a/lib/northbound_grpc.cpp
+++ b/lib/northbound_grpc.cpp
@@ -28,6 +28,7 @@
#include "lib_errors.h"
#include "northbound.h"
#include "northbound_db.h"
+#include "frr_pthread.h"
#include <iostream>
#include <sstream>
@@ -36,6 +37,8 @@
#define GRPC_DEFAULT_PORT 50051
+static void *grpc_pthread_start(void *arg);
+
/*
* NOTE: we can't use the FRR debugging infrastructure here since it uses
* atomics and C++ has a different atomics API. Enable gRPC debugging
@@ -43,14 +46,78 @@
*/
static bool nb_dbg_client_grpc = 1;
-static pthread_t grpc_pthread;
+static struct frr_pthread *fpt;
+
+/* Default frr_pthread attributes */
+static const struct frr_pthread_attr attr = {
+ .start = grpc_pthread_start,
+ .stop = NULL,
+};
+
+enum CallStatus { CREATE, PROCESS, FINISH };
+
+/* Thanks gooble */
+class RpcStateBase
+{
+ public:
+ virtual void doCallback() = 0;
+};
+
+class NorthboundImpl;
-class NorthboundImpl final : public frr::Northbound::Service
+template <typename Q, typename S> class RpcState : RpcStateBase
+{
+ public:
+ RpcState(NorthboundImpl *svc,
+ void (NorthboundImpl::*cb)(RpcState<Q, S> *))
+ : callback(cb), responder(&ctx), async_responder(&ctx),
+ service(svc){};
+
+ void doCallback() override
+ {
+ (service->*callback)(this);
+ }
+
+ grpc::ServerContext ctx;
+ Q request;
+ S response;
+ grpc::ServerAsyncResponseWriter<S> responder;
+ grpc::ServerAsyncWriter<S> async_responder;
+
+ NorthboundImpl *service;
+ void (NorthboundImpl::*callback)(RpcState<Q, S> *);
+
+ void *context;
+ CallStatus state = CREATE;
+};
+
+#define REQUEST_RPC(NAME) \
+ do { \
+ auto _rpcState = \
+ new RpcState<frr::NAME##Request, frr::NAME##Response>( \
+ this, &NorthboundImpl::Handle##NAME); \
+ _service->Request##NAME(&_rpcState->ctx, &_rpcState->request, \
+ &_rpcState->responder, _cq, _cq, \
+ _rpcState); \
+ } while (0)
+
+#define REQUEST_RPC_STREAMING(NAME) \
+ do { \
+ auto _rpcState = \
+ new RpcState<frr::NAME##Request, frr::NAME##Response>( \
+ this, &NorthboundImpl::Handle##NAME); \
+ _service->Request##NAME(&_rpcState->ctx, &_rpcState->request, \
+ &_rpcState->async_responder, _cq, _cq, \
+ _rpcState); \
+ } while (0)
+
+class NorthboundImpl
{
public:
NorthboundImpl(void)
{
_nextCandidateId = 0;
+ _service = new frr::Northbound::AsyncService();
}
~NorthboundImpl(void)
@@ -61,61 +128,135 @@ class NorthboundImpl final : public frr::Northbound::Service
delete_candidate(&it->second);
}
- grpc::Status
- GetCapabilities(grpc::ServerContext *context,
- frr::GetCapabilitiesRequest const *request,
- frr::GetCapabilitiesResponse *response) override
+ void Run(unsigned long port)
+ {
+ grpc::ServerBuilder builder;
+ std::stringstream server_address;
+
+ server_address << "0.0.0.0:" << port;
+
+ builder.AddListeningPort(server_address.str(),
+ grpc::InsecureServerCredentials());
+ builder.RegisterService(_service);
+
+ auto cq = builder.AddCompletionQueue();
+ _cq = cq.get();
+ auto _server = builder.BuildAndStart();
+
+ /* Schedule all RPC handlers */
+ REQUEST_RPC(GetCapabilities);
+ REQUEST_RPC(CreateCandidate);
+ REQUEST_RPC(DeleteCandidate);
+ REQUEST_RPC(UpdateCandidate);
+ REQUEST_RPC(EditCandidate);
+ REQUEST_RPC(LoadToCandidate);
+ REQUEST_RPC(Commit);
+ REQUEST_RPC(GetTransaction);
+ REQUEST_RPC(LockConfig);
+ REQUEST_RPC(UnlockConfig);
+ REQUEST_RPC(Execute);
+ REQUEST_RPC_STREAMING(Get);
+ REQUEST_RPC_STREAMING(ListTransactions);
+
+ zlog_notice("gRPC server listening on %s",
+ server_address.str().c_str());
+
+ /* Process inbound RPCs */
+ void *tag;
+ bool ok;
+ while (true) {
+ _cq->Next(&tag, &ok);
+ GPR_ASSERT(ok);
+ static_cast<RpcStateBase *>(tag)->doCallback();
+ tag = nullptr;
+ }
+ }
+
+ void HandleGetCapabilities(RpcState<frr::GetCapabilitiesRequest,
+ frr::GetCapabilitiesResponse> *tag)
{
if (nb_dbg_client_grpc)
zlog_debug("received RPC GetCapabilities()");
- // Response: string frr_version = 1;
- response->set_frr_version(FRR_VERSION);
+ switch (tag->state) {
+ case CREATE:
+ REQUEST_RPC(GetCapabilities);
+ tag->state = PROCESS;
+ case PROCESS: {
- // Response: bool rollback_support = 2;
+ // Response: string frr_version = 1;
+ tag->response.set_frr_version(FRR_VERSION);
+
+ // Response: bool rollback_support = 2;
#ifdef HAVE_CONFIG_ROLLBACKS
- response->set_rollback_support(true);
+ tag->response.set_rollback_support(true);
#else
- response->set_rollback_support(false);
+ tag->response.set_rollback_support(false);
#endif
- // Response: repeated ModuleData supported_modules = 3;
- struct yang_module *module;
- RB_FOREACH (module, yang_modules, &yang_modules) {
- auto m = response->add_supported_modules();
+ // Response: repeated ModuleData supported_modules = 3;
+ struct yang_module *module;
+ RB_FOREACH (module, yang_modules, &yang_modules) {
+ auto m = tag->response.add_supported_modules();
- m->set_name(module->name);
- if (module->info->rev_size)
- m->set_revision(module->info->rev[0].date);
- m->set_organization(module->info->org);
- }
+ m->set_name(module->name);
+ if (module->info->rev_size)
+ m->set_revision(
+ module->info->rev[0].date);
+ m->set_organization(module->info->org);
+ }
- // Response: repeated Encoding supported_encodings = 4;
- response->add_supported_encodings(frr::JSON);
- response->add_supported_encodings(frr::XML);
+ // Response: repeated Encoding supported_encodings = 4;
+ tag->response.add_supported_encodings(frr::JSON);
+ tag->response.add_supported_encodings(frr::XML);
- return grpc::Status::OK;
+ tag->responder.Finish(tag->response, grpc::Status::OK,
+ tag);
+ tag->state = FINISH;
+ break;
+ }
+ case FINISH:
+ delete tag;
+ }
}
- grpc::Status Get(grpc::ServerContext *context,
- frr::GetRequest const *request,
- grpc::ServerWriter<frr::GetResponse> *writer) override
+ void HandleGet(RpcState<frr::GetRequest, frr::GetResponse> *tag)
{
- // Request: DataType type = 1;
- int type = request->type();
- // Request: Encoding encoding = 2;
- frr::Encoding encoding = request->encoding();
- // Request: bool with_defaults = 3;
- bool with_defaults = request->with_defaults();
+ switch (tag->state) {
+ case CREATE: {
+ auto mypaths = new std::list<std::string>();
+ tag->context = mypaths;
+ auto paths = tag->request.path();
+ for (const std::string &path : paths) {
+ mypaths->push_back(std::string(path));
+ }
+ REQUEST_RPC_STREAMING(Get);
+ tag->state = PROCESS;
+ }
+ case PROCESS: {
+ // Request: DataType type = 1;
+ int type = tag->request.type();
+ // Request: Encoding encoding = 2;
+ frr::Encoding encoding = tag->request.encoding();
+ // Request: bool with_defaults = 3;
+ bool with_defaults = tag->request.with_defaults();
+
+ if (nb_dbg_client_grpc)
+ zlog_debug(
+ "received RPC Get(type: %u, encoding: %u, with_defaults: %u)",
+ type, encoding, with_defaults);
+
+ auto mypaths = static_cast<std::list<std::string> *>(
+ tag->context);
+
+ if (mypaths->empty()) {
+ tag->async_responder.Finish(grpc::Status::OK,
+ tag);
+ tag->state = FINISH;
+ return;
+ }
- if (nb_dbg_client_grpc)
- zlog_debug(
- "received RPC Get(type: %u, encoding: %u, with_defaults: %u)",
- type, encoding, with_defaults);
- // Request: repeated string path = 4;
- auto paths = request->path();
- for (const std::string &path : paths) {
frr::GetResponse response;
grpc::Status status;
@@ -124,391 +265,695 @@ class NorthboundImpl final : public frr::Northbound::Service
// Response: DataTree data = 2;
auto *data = response.mutable_data();
- data->set_encoding(request->encoding());
- status = get_path(data, path, type,
+ data->set_encoding(tag->request.encoding());
+ status = get_path(data, mypaths->back().c_str(), type,
encoding2lyd_format(encoding),
with_defaults);
// Something went wrong...
- if (!status.ok())
- return status;
+ if (!status.ok()) {
+ tag->async_responder.WriteAndFinish(
+ response, grpc::WriteOptions(), status,
+ tag);
+ tag->state = FINISH;
+ return;
+ }
- writer->Write(response);
- }
+ mypaths->pop_back();
- if (nb_dbg_client_grpc)
- zlog_debug("received RPC Get() end");
+ tag->async_responder.Write(response, tag);
- return grpc::Status::OK;
+ break;
+ }
+ case FINISH:
+ if (nb_dbg_client_grpc)
+ zlog_debug("received RPC Get() end");
+
+ delete static_cast<std::list<std::string> *>(
+ tag->context);
+ delete tag;
+ }
}
- grpc::Status
- CreateCandidate(grpc::ServerContext *context,
- frr::CreateCandidateRequest const *request,
- frr::CreateCandidateResponse *response) override
+ void HandleCreateCandidate(RpcState<frr::CreateCandidateRequest,
+ frr::CreateCandidateResponse> *tag)
{
if (nb_dbg_client_grpc)
zlog_debug("received RPC CreateCandidate()");
- struct candidate *candidate = create_candidate();
- if (!candidate)
- return grpc::Status(
- grpc::StatusCode::RESOURCE_EXHAUSTED,
- "Can't create candidate configuration");
+ switch (tag->state) {
+ case CREATE:
+ REQUEST_RPC(CreateCandidate);
+ tag->state = PROCESS;
+ case PROCESS: {
+ struct candidate *candidate = create_candidate();
+ if (!candidate) {
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::
+ RESOURCE_EXHAUSTED,
+ "Can't create candidate configuration"),
+ tag);
+ } else {
+ tag->response.set_candidate_id(candidate->id);
+ tag->responder.Finish(tag->response,
+ grpc::Status::OK, tag);
+ }
- // Response: uint32 candidate_id = 1;
- response->set_candidate_id(candidate->id);
+ tag->state = FINISH;
- return grpc::Status::OK;
+ break;
+ }
+ case FINISH:
+ delete tag;
+ }
}
- grpc::Status
- DeleteCandidate(grpc::ServerContext *context,
- frr::DeleteCandidateRequest const *request,
- frr::DeleteCandidateResponse *response) override
+ void HandleDeleteCandidate(RpcState<frr::DeleteCandidateRequest,
+ frr::DeleteCandidateResponse> *tag)
{
- // Request: uint32 candidate_id = 1;
- uint32_t candidate_id = request->candidate_id();
-
- if (nb_dbg_client_grpc)
- zlog_debug(
- "received RPC DeleteCandidate(candidate_id: %u)",
- candidate_id);
-
- struct candidate *candidate = get_candidate(candidate_id);
- if (!candidate)
- return grpc::Status(
- grpc::StatusCode::NOT_FOUND,
- "candidate configuration not found");
+ switch (tag->state) {
+ case CREATE:
+ REQUEST_RPC(DeleteCandidate);
+ tag->state = PROCESS;
+ case PROCESS: {
+
+ // Request: uint32 candidate_id = 1;
+ uint32_t candidate_id = tag->request.candidate_id();
+
+ if (nb_dbg_client_grpc)
+ zlog_debug(
+ "received RPC DeleteCandidate(candidate_id: %u)",
+ candidate_id);
+
+ struct candidate *candidate =
+ get_candidate(candidate_id);
+ if (!candidate) {
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::NOT_FOUND,
+ "candidate configuration not found"),
+ tag);
+ tag->state = FINISH;
+ return;
+ } else {
+ delete_candidate(candidate);
+ tag->responder.Finish(tag->response,
+ grpc::Status::OK, tag);
+ tag->state = FINISH;
+ return;
+ }
+ tag->state = FINISH;
+ break;
+ }
+ case FINISH:
+ delete tag;
+ }
+ }
- delete_candidate(candidate);
+ void HandleUpdateCandidate(RpcState<frr::UpdateCandidateRequest,
+ frr::UpdateCandidateResponse> *tag)
+ {
+ switch (tag->state) {
+ case CREATE:
+ REQUEST_RPC(UpdateCandidate);
+ tag->state = PROCESS;
+ case PROCESS: {
+
+ // Request: uint32 candidate_id = 1;
+ uint32_t candidate_id = tag->request.candidate_id();
+
+ if (nb_dbg_client_grpc)
+ zlog_debug(
+ "received RPC UpdateCandidate(candidate_id: %u)",
+ candidate_id);
+
+ struct candidate *candidate =
+ get_candidate(candidate_id);
+
+ if (!candidate)
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::NOT_FOUND,
+ "candidate configuration not found"),
+ tag);
+ else if (candidate->transaction)
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::
+ FAILED_PRECONDITION,
+ "candidate is in the middle of a transaction"),
+ tag);
+ else if (nb_candidate_update(candidate->config)
+ != NB_OK)
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::INTERNAL,
+ "failed to update candidate configuration"),
+ tag);
+
+ else
+ tag->responder.Finish(tag->response,
+ grpc::Status::OK, tag);
+
+ tag->state = FINISH;
- return grpc::Status::OK;
+ break;
+ }
+ case FINISH:
+ delete tag;
+ }
}
- grpc::Status
- UpdateCandidate(grpc::ServerContext *context,
- frr::UpdateCandidateRequest const *request,
- frr::UpdateCandidateResponse *response) override
+ void HandleEditCandidate(RpcState<frr::EditCandidateRequest,
+ frr::EditCandidateResponse> *tag)
{
- // Request: uint32 candidate_id = 1;
- uint32_t candidate_id = request->candidate_id();
+ switch (tag->state) {
+ case CREATE:
+ REQUEST_RPC(EditCandidate);
+ tag->state = PROCESS;
+ case PROCESS: {
+
+ // Request: uint32 candidate_id = 1;
+ uint32_t candidate_id = tag->request.candidate_id();
+
+ if (nb_dbg_client_grpc)
+ zlog_debug(
+ "received RPC EditCandidate(candidate_id: %u)",
+ candidate_id);
+
+ struct candidate *candidate =
+ get_candidate(candidate_id);
+
+ if (!candidate) {
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::NOT_FOUND,
+ "candidate configuration not found"),
+ tag);
+ tag->state = FINISH;
+ break;
+ }
- if (nb_dbg_client_grpc)
- zlog_debug(
- "received RPC UpdateCandidate(candidate_id: %u)",
- candidate_id);
+ struct nb_config *candidate_tmp =
+ nb_config_dup(candidate->config);
+
+ auto pvs = tag->request.update();
+ for (const frr::PathValue &pv : pvs) {
+ if (yang_dnode_edit(candidate_tmp->dnode,
+ pv.path(), pv.value())
+ != 0) {
+ nb_config_free(candidate_tmp);
+
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::
+ INVALID_ARGUMENT,
+ "Failed to update \""
+ + pv.path()
+ + "\""),
+ tag);
+
+ tag->state = FINISH;
+ return;
+ }
+ }
- struct candidate *candidate = get_candidate(candidate_id);
- if (!candidate)
- return grpc::Status(
- grpc::StatusCode::NOT_FOUND,
- "candidate configuration not found");
+ pvs = tag->request.delete_();
+ for (const frr::PathValue &pv : pvs) {
+ if (yang_dnode_delete(candidate_tmp->dnode,
+ pv.path())
+ != 0) {
+ nb_config_free(candidate_tmp);
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::
+ INVALID_ARGUMENT,
+ "Failed to remove \""
+ + pv.path()
+ + "\""),
+ tag);
+ tag->state = FINISH;
+ return;
+ }
+ }
- if (candidate->transaction)
- return grpc::Status(
- grpc::StatusCode::FAILED_PRECONDITION,
- "candidate is in the middle of a transaction");
+ // No errors, accept all changes.
+ nb_config_replace(candidate->config, candidate_tmp,
+ false);
- if (nb_candidate_update(candidate->config) != NB_OK)
- return grpc::Status(
- grpc::StatusCode::INTERNAL,
- "failed to update candidate configuration");
+ tag->responder.Finish(tag->response, grpc::Status::OK,
+ tag);
- return grpc::Status::OK;
+ tag->state = FINISH;
+
+ break;
+ }
+ case FINISH:
+ delete tag;
+ }
}
- grpc::Status
- EditCandidate(grpc::ServerContext *context,
- frr::EditCandidateRequest const *request,
- frr::EditCandidateResponse *response) override
+ void HandleLoadToCandidate(RpcState<frr::LoadToCandidateRequest,
+ frr::LoadToCandidateResponse> *tag)
{
- // Request: uint32 candidate_id = 1;
- uint32_t candidate_id = request->candidate_id();
-
- if (nb_dbg_client_grpc)
- zlog_debug(
- "received RPC EditCandidate(candidate_id: %u)",
- candidate_id);
-
- struct candidate *candidate = get_candidate(candidate_id);
- if (!candidate)
- return grpc::Status(
- grpc::StatusCode::NOT_FOUND,
- "candidate configuration not found");
-
- // Create a copy of the candidate. For consistency, we need to
- // ensure that either all changes are accepted or none are (in
- // the event of an error).
- struct nb_config *candidate_tmp =
- nb_config_dup(candidate->config);
-
- auto pvs = request->update();
- for (const frr::PathValue &pv : pvs) {
- if (yang_dnode_edit(candidate_tmp->dnode, pv.path(),
- pv.value())
- != 0) {
- nb_config_free(candidate_tmp);
- return grpc::Status(
- grpc::StatusCode::INVALID_ARGUMENT,
- "Failed to update \"" + pv.path()
- + "\"");
+ switch (tag->state) {
+ case CREATE:
+ REQUEST_RPC(LoadToCandidate);
+ tag->state = PROCESS;
+ case PROCESS: {
+ // Request: uint32 candidate_id = 1;
+ uint32_t candidate_id = tag->request.candidate_id();
+
+ if (nb_dbg_client_grpc)
+ zlog_debug(
+ "received RPC LoadToCandidate(candidate_id: %u)",
+ candidate_id);
+
+ // Request: LoadType type = 2;
+ int load_type = tag->request.type();
+ // Request: DataTree config = 3;
+ auto config = tag->request.config();
+
+
+ struct candidate *candidate =
+ get_candidate(candidate_id);
+
+ if (!candidate) {
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::NOT_FOUND,
+ "candidate configuration not found"),
+ tag);
+ tag->state = FINISH;
+ return;
}
- }
- pvs = request->delete_();
- for (const frr::PathValue &pv : pvs) {
- if (yang_dnode_delete(candidate_tmp->dnode, pv.path())
- != 0) {
- nb_config_free(candidate_tmp);
- return grpc::Status(
- grpc::StatusCode::INVALID_ARGUMENT,
- "Failed to remove \"" + pv.path()
- + "\"");
+ struct lyd_node *dnode =
+ dnode_from_data_tree(&config, true);
+ if (!dnode) {
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::INTERNAL,
+ "Failed to parse the configuration"),
+ tag);
+ tag->state = FINISH;
+ return;
}
- }
- // No errors, accept all changes.
- nb_config_replace(candidate->config, candidate_tmp, false);
+ struct nb_config *loaded_config = nb_config_new(dnode);
+
+ if (load_type == frr::LoadToCandidateRequest::REPLACE)
+ nb_config_replace(candidate->config,
+ loaded_config, false);
+ else if (nb_config_merge(candidate->config,
+ loaded_config, false)
+ != NB_OK) {
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::INTERNAL,
+ "Failed to merge the loaded configuration"),
+ tag);
+ tag->state = FINISH;
+ return;
+ }
- return grpc::Status::OK;
+ tag->responder.Finish(tag->response, grpc::Status::OK,
+ tag);
+ tag->state = FINISH;
+ break;
+ }
+ case FINISH:
+ delete tag;
+ }
}
- grpc::Status
- LoadToCandidate(grpc::ServerContext *context,
- frr::LoadToCandidateRequest const *request,
- frr::LoadToCandidateResponse *response) override
+ void
+ HandleCommit(RpcState<frr::CommitRequest, frr::CommitResponse> *tag)
{
- // Request: uint32 candidate_id = 1;
- uint32_t candidate_id = request->candidate_id();
- // Request: LoadType type = 2;
- int load_type = request->type();
- // Request: DataTree config = 3;
- auto config = request->config();
-
- if (nb_dbg_client_grpc)
- zlog_debug(
- "received RPC LoadToCandidate(candidate_id: %u)",
- candidate_id);
-
- struct candidate *candidate = get_candidate(candidate_id);
- if (!candidate)
- return grpc::Status(
- grpc::StatusCode::NOT_FOUND,
- "candidate configuration not found");
+ switch (tag->state) {
+ case CREATE:
+ REQUEST_RPC(Commit);
+ tag->state = PROCESS;
+ case PROCESS: {
+ // Request: uint32 candidate_id = 1;
+ uint32_t candidate_id = tag->request.candidate_id();
+ if (nb_dbg_client_grpc)
+ zlog_debug(
+ "received RPC Commit(candidate_id: %u)",
+ candidate_id);
+
+ // Request: Phase phase = 2;
+ int phase = tag->request.phase();
+ // Request: string comment = 3;
+ const std::string comment = tag->request.comment();
+
+ // Find candidate configuration.
+ struct candidate *candidate =
+ get_candidate(candidate_id);
+ if (!candidate) {
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::NOT_FOUND,
+ "candidate configuration not found"),
+ tag);
+ tag->state = FINISH;
+ return;
+ }
- struct lyd_node *dnode = dnode_from_data_tree(&config, true);
- if (!dnode)
- return grpc::Status(
- grpc::StatusCode::INTERNAL,
- "Failed to parse the configuration");
+ int ret = NB_OK;
+ uint32_t transaction_id = 0;
+
+ // Check for misuse of the two-phase commit protocol.
+ switch (phase) {
+ case frr::CommitRequest::PREPARE:
+ case frr::CommitRequest::ALL:
+ if (candidate->transaction) {
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::
+ FAILED_PRECONDITION,
+ "candidate is in the middle of a transaction"),
+ tag);
+ tag->state = FINISH;
+ return;
+ }
+ break;
+ case frr::CommitRequest::ABORT:
+ case frr::CommitRequest::APPLY:
+ if (!candidate->transaction) {
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::
+ FAILED_PRECONDITION,
+ "no transaction in progress"),
+ tag);
+ tag->state = FINISH;
+ return;
+ }
+ break;
+ default:
+ break;
+ }
- struct nb_config *loaded_config = nb_config_new(dnode);
- if (load_type == frr::LoadToCandidateRequest::REPLACE)
- nb_config_replace(candidate->config, loaded_config,
- false);
- else if (nb_config_merge(candidate->config, loaded_config,
- false)
- != NB_OK)
- return grpc::Status(
- grpc::StatusCode::INTERNAL,
- "Failed to merge the loaded configuration");
+ // Execute the user request.
+ switch (phase) {
+ case frr::CommitRequest::VALIDATE:
+ ret = nb_candidate_validate(candidate->config);
+ break;
+ case frr::CommitRequest::PREPARE:
+ ret = nb_candidate_commit_prepare(
+ candidate->config, NB_CLIENT_GRPC, NULL,
+ comment.c_str(),
+ &candidate->transaction);
+ break;
+ case frr::CommitRequest::ABORT:
+ nb_candidate_commit_abort(
+ candidate->transaction);
+ break;
+ case frr::CommitRequest::APPLY:
+ nb_candidate_commit_apply(
+ candidate->transaction, true,
+ &transaction_id);
+ break;
+ case frr::CommitRequest::ALL:
+ ret = nb_candidate_commit(
+ candidate->config, NB_CLIENT_GRPC, NULL,
+ true, comment.c_str(), &transaction_id);
+ break;
+ }
- return grpc::Status::OK;
- }
+ // Map northbound error codes to gRPC error codes.
+ switch (ret) {
+ case NB_ERR_NO_CHANGES:
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::ABORTED,
+ "No configuration changes detected"),
+ tag);
+ tag->state = FINISH;
+ return;
+ case NB_ERR_LOCKED:
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::UNAVAILABLE,
+ "There's already a transaction in progress"),
+ tag);
+ tag->state = FINISH;
+ return;
+ case NB_ERR_VALIDATION:
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(grpc::StatusCode::
+ INVALID_ARGUMENT,
+ "Validation error"),
+ tag);
+ tag->state = FINISH;
+ return;
+ case NB_ERR_RESOURCE:
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::
+ RESOURCE_EXHAUSTED,
+ "Failed do allocate resources"),
+ tag);
+ tag->state = FINISH;
+ return;
+ case NB_ERR:
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(grpc::StatusCode::INTERNAL,
+ "Internal error"),
+ tag);
+ tag->state = FINISH;
+ return;
+ default:
+ break;
+ }
- grpc::Status Commit(grpc::ServerContext *context,
- frr::CommitRequest const *request,
- frr::CommitResponse *response) override
- {
- // Request: uint32 candidate_id = 1;
- uint32_t candidate_id = request->candidate_id();
- // Request: Phase phase = 2;
- int phase = request->phase();
- // Request: string comment = 3;
- const std::string comment = request->comment();
+ // Response: uint32 transaction_id = 1;
+ if (transaction_id)
+ tag->response.set_transaction_id(
+ transaction_id);
- if (nb_dbg_client_grpc)
- zlog_debug("received RPC Commit(candidate_id: %u)",
- candidate_id);
-
- // Find candidate configuration.
- struct candidate *candidate = get_candidate(candidate_id);
- if (!candidate)
- return grpc::Status(
- grpc::StatusCode::NOT_FOUND,
- "candidate configuration not found");
-
- int ret = NB_OK;
- uint32_t transaction_id = 0;
-
- // Check for misuse of the two-phase commit protocol.
- switch (phase) {
- case frr::CommitRequest::PREPARE:
- case frr::CommitRequest::ALL:
- if (candidate->transaction)
- return grpc::Status(
- grpc::StatusCode::FAILED_PRECONDITION,
- "pending transaction in progress");
- break;
- case frr::CommitRequest::ABORT:
- case frr::CommitRequest::APPLY:
- if (!candidate->transaction)
- return grpc::Status(
- grpc::StatusCode::FAILED_PRECONDITION,
- "no transaction in progress");
- break;
- default:
- break;
- }
+ tag->responder.Finish(tag->response, grpc::Status::OK,
+ tag);
+ tag->state = FINISH;
- // Execute the user request.
- switch (phase) {
- case frr::CommitRequest::VALIDATE:
- ret = nb_candidate_validate(candidate->config);
- break;
- case frr::CommitRequest::PREPARE:
- ret = nb_candidate_commit_prepare(
- candidate->config, NB_CLIENT_GRPC, NULL,
- comment.c_str(), &candidate->transaction);
- break;
- case frr::CommitRequest::ABORT:
- nb_candidate_commit_abort(candidate->transaction);
- break;
- case frr::CommitRequest::APPLY:
- nb_candidate_commit_apply(candidate->transaction, true,
- &transaction_id);
- break;
- case frr::CommitRequest::ALL:
- ret = nb_candidate_commit(
- candidate->config, NB_CLIENT_GRPC, NULL, true,
- comment.c_str(), &transaction_id);
break;
}
-
- // Map northbound error codes to gRPC error codes.
- switch (ret) {
- case NB_ERR_NO_CHANGES:
- return grpc::Status(
- grpc::StatusCode::ABORTED,
- "No configuration changes detected");
- case NB_ERR_LOCKED:
- return grpc::Status(
- grpc::StatusCode::UNAVAILABLE,
- "There's already a transaction in progress");
- case NB_ERR_VALIDATION:
- return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "Validation error");
- case NB_ERR_RESOURCE:
- return grpc::Status(
- grpc::StatusCode::RESOURCE_EXHAUSTED,
- "Failed do allocate resources");
- case NB_ERR:
- return grpc::Status(grpc::StatusCode::INTERNAL,
- "Internal error");
- default:
- break;
+ case FINISH:
+ delete tag;
}
-
- // Response: uint32 transaction_id = 1;
- if (transaction_id)
- response->set_transaction_id(transaction_id);
-
- return grpc::Status::OK;
}
- grpc::Status
- ListTransactions(grpc::ServerContext *context,
- frr::ListTransactionsRequest const *request,
- grpc::ServerWriter<frr::ListTransactionsResponse>
- *writer) override
+ void
+ HandleListTransactions(RpcState<frr::ListTransactionsRequest,
+ frr::ListTransactionsResponse> *tag)
{
if (nb_dbg_client_grpc)
zlog_debug("received RPC ListTransactions()");
- nb_db_transactions_iterate(list_transactions_cb, writer);
+ switch (tag->state) {
+ case CREATE:
+ REQUEST_RPC_STREAMING(ListTransactions);
+ tag->context = new std::list<std::tuple<
+ int, std::string, std::string, std::string>>();
+ nb_db_transactions_iterate(list_transactions_cb,
+ tag->context);
+ tag->state = PROCESS;
+ case PROCESS: {
+ auto list = static_cast<std::list<std::tuple<
+ int, std::string, std::string, std::string>> *>(
+ tag->context);
+ if (list->empty()) {
+ tag->async_responder.Finish(grpc::Status::OK,
+ tag);
+ tag->state = FINISH;
+ return;
+ }
+ auto item = list->back();
- return grpc::Status::OK;
- }
- grpc::Status
- GetTransaction(grpc::ServerContext *context,
- frr::GetTransactionRequest const *request,
- frr::GetTransactionResponse *response) override
- {
- struct nb_config *nb_config;
+ frr::ListTransactionsResponse response;
- // Request: uint32 transaction_id = 1;
- uint32_t transaction_id = request->transaction_id();
- // Request: Encoding encoding = 2;
- frr::Encoding encoding = request->encoding();
- // Request: bool with_defaults = 3;
- bool with_defaults = request->with_defaults();
+ // Response: uint32 id = 1;
+ response.set_id(std::get<0>(item));
- if (nb_dbg_client_grpc)
- zlog_debug(
- "received RPC GetTransaction(transaction_id: %u, encoding: %u)",
- transaction_id, encoding);
+ // Response: string client = 2;
+ response.set_client(std::get<1>(item).c_str());
- // Load configuration from the transactions database.
- nb_config = nb_db_transaction_load(transaction_id);
- if (!nb_config)
- return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "Transaction not found");
+ // Response: string date = 3;
+ response.set_date(std::get<2>(item).c_str());
- // Response: DataTree config = 1;
- auto config = response->mutable_config();
- config->set_encoding(encoding);
+ // Response: string comment = 4;
+ response.set_comment(std::get<3>(item).c_str());
- // Dump data using the requested format.
- if (data_tree_from_dnode(config, nb_config->dnode,
- encoding2lyd_format(encoding),
- with_defaults)
- != 0) {
- nb_config_free(nb_config);
- return grpc::Status(grpc::StatusCode::INTERNAL,
- "Failed to dump data");
+ list->pop_back();
+
+ tag->async_responder.Write(response, tag);
+ break;
+ }
+ case FINISH:
+ delete static_cast<std::list<std::tuple<
+ int, std::string, std::string, std::string>> *>(
+ tag->context);
+ delete tag;
}
+ }
- nb_config_free(nb_config);
+ void HandleGetTransaction(RpcState<frr::GetTransactionRequest,
+ frr::GetTransactionResponse> *tag)
+ {
+ switch (tag->state) {
+ case CREATE:
+ REQUEST_RPC(GetTransaction);
+ tag->state = PROCESS;
+ case PROCESS: {
+ // Request: uint32 transaction_id = 1;
+ uint32_t transaction_id = tag->request.transaction_id();
+ // Request: Encoding encoding = 2;
+ frr::Encoding encoding = tag->request.encoding();
+ // Request: bool with_defaults = 3;
+ bool with_defaults = tag->request.with_defaults();
+
+ if (nb_dbg_client_grpc)
+ zlog_debug(
+ "received RPC GetTransaction(transaction_id: %u, encoding: %u)",
+ transaction_id, encoding);
+
+ struct nb_config *nb_config;
+
+ // Load configuration from the transactions database.
+ nb_config = nb_db_transaction_load(transaction_id);
+ if (!nb_config) {
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(grpc::StatusCode::
+ INVALID_ARGUMENT,
+ "Transaction not found"),
+ tag);
+ tag->state = FINISH;
+ return;
+ }
- return grpc::Status::OK;
+ // Response: DataTree config = 1;
+ auto config = tag->response.mutable_config();
+ config->set_encoding(encoding);
+
+ // Dump data using the requested format.
+ if (data_tree_from_dnode(config, nb_config->dnode,
+ encoding2lyd_format(encoding),
+ with_defaults)
+ != 0) {
+ nb_config_free(nb_config);
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(grpc::StatusCode::INTERNAL,
+ "Failed to dump data"),
+ tag);
+ tag->state = FINISH;
+ return;
+ }
+
+ nb_config_free(nb_config);
+
+ tag->responder.Finish(tag->response, grpc::Status::OK,
+ tag);
+ tag->state = FINISH;
+ break;
+ }
+ case FINISH:
+ delete tag;
+ }
}
- grpc::Status LockConfig(grpc::ServerContext *context,
- frr::LockConfigRequest const *request,
- frr::LockConfigResponse *response) override
+ void HandleLockConfig(
+ RpcState<frr::LockConfigRequest, frr::LockConfigResponse> *tag)
{
if (nb_dbg_client_grpc)
zlog_debug("received RPC LockConfig()");
- if (nb_running_lock(NB_CLIENT_GRPC, NULL))
- return grpc::Status(
- grpc::StatusCode::FAILED_PRECONDITION,
- "running configuration is locked already");
+ switch (tag->state) {
+ case CREATE:
+ REQUEST_RPC(LockConfig);
+ tag->state = PROCESS;
+ case PROCESS: {
+ if (nb_running_lock(NB_CLIENT_GRPC, NULL)) {
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::
+ FAILED_PRECONDITION,
+ "running configuration is locked already"),
+ tag);
+ tag->state = FINISH;
+ return;
+ }
- return grpc::Status::OK;
+ tag->responder.Finish(tag->response, grpc::Status::OK,
+ tag);
+ tag->state = FINISH;
+ break;
+ }
+ case FINISH:
+ delete tag;
+ }
}
- grpc::Status UnlockConfig(grpc::ServerContext *context,
- frr::UnlockConfigRequest const *request,
- frr::UnlockConfigResponse *response) override
+ void HandleUnlockConfig(RpcState<frr::UnlockConfigRequest,
+ frr::UnlockConfigResponse> *tag)
{
if (nb_dbg_client_grpc)
zlog_debug("received RPC UnlockConfig()");
- if (nb_running_unlock(NB_CLIENT_GRPC, NULL))
- return grpc::Status(
- grpc::StatusCode::FAILED_PRECONDITION,
- "failed to unlock the running configuration");
+ switch (tag->state) {
+ case CREATE:
+ REQUEST_RPC(UnlockConfig);
+ tag->state = PROCESS;
+ case PROCESS: {
+ if (nb_running_unlock(NB_CLIENT_GRPC, NULL)) {
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(
+ grpc::StatusCode::
+ FAILED_PRECONDITION,
+ "failed to unlock the running configuration"),
+ tag);
+ tag->state = FINISH;
+ return;
+ }
- return grpc::Status::OK;
+ tag->responder.Finish(tag->response, grpc::Status::OK,
+ tag);
+ tag->state = FINISH;
+ break;
+ }
+ case FINISH:
+ delete tag;
+ }
}
- grpc::Status Execute(grpc::ServerContext *context,
- frr::ExecuteRequest const *request,
- frr::ExecuteResponse *response) override
+ void
+ HandleExecute(RpcState<frr::ExecuteRequest, frr::ExecuteResponse> *tag)
{
struct nb_node *nb_node;
struct list *input_list;
@@ -517,61 +962,98 @@ class NorthboundImpl final : public frr::Northbound::Service
struct yang_data *data;
const char *xpath;
- // Request: string path = 1;
- xpath = request->path().c_str();
+ switch (tag->state) {
+ case CREATE:
+ REQUEST_RPC(Execute);
+ tag->state = PROCESS;
+ case PROCESS: {
+ // Request: string path = 1;
+ xpath = tag->request.path().c_str();
+
+ if (nb_dbg_client_grpc)
+ zlog_debug("received RPC Execute(path: \"%s\")",
+ xpath);
+
+ if (tag->request.path().empty()) {
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(grpc::StatusCode::
+ INVALID_ARGUMENT,
+ "Data path is empty"),
+ tag);
+ tag->state = FINISH;
+ return;
+ }
- if (nb_dbg_client_grpc)
- zlog_debug("received RPC Execute(path: \"%s\")", xpath);
+ nb_node = nb_node_find(xpath);
+ if (!nb_node) {
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(grpc::StatusCode::
+ INVALID_ARGUMENT,
+ "Unknown data path"),
+ tag);
+ tag->state = FINISH;
+ return;
+ }
- if (request->path().empty())
- return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "Data path is empty");
+ input_list = yang_data_list_new();
+ output_list = yang_data_list_new();
- nb_node = nb_node_find(xpath);
- if (!nb_node)
- return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "Unknown data path");
+ // Read input parameters.
+ auto input = tag->request.input();
+ for (const frr::PathValue &pv : input) {
+ // Request: repeated PathValue input = 2;
+ data = yang_data_new(pv.path().c_str(),
+ pv.value().c_str());
+ listnode_add(input_list, data);
+ }
- input_list = yang_data_list_new();
- output_list = yang_data_list_new();
+ // Execute callback registered for this XPath.
+ if (nb_callback_rpc(nb_node, xpath, input_list,
+ output_list)
+ != NB_OK) {
+ flog_warn(EC_LIB_NB_CB_RPC,
+ "%s: rpc callback failed: %s",
+ __func__, xpath);
+ list_delete(&input_list);
+ list_delete(&output_list);
+
+ tag->responder.Finish(
+ tag->response,
+ grpc::Status(grpc::StatusCode::INTERNAL,
+ "RPC failed"),
+ tag);
+ tag->state = FINISH;
+ return;
+ }
- // Read input parameters.
- auto input = request->input();
- for (const frr::PathValue &pv : input) {
- // Request: repeated PathValue input = 2;
- data = yang_data_new(pv.path().c_str(),
- pv.value().c_str());
- listnode_add(input_list, data);
- }
+ // Process output parameters.
+ for (ALL_LIST_ELEMENTS_RO(output_list, node, data)) {
+ // Response: repeated PathValue output = 1;
+ frr::PathValue *pv = tag->response.add_output();
+ pv->set_path(data->xpath);
+ pv->set_value(data->value);
+ }
- // Execute callback registered for this XPath.
- if (nb_callback_rpc(nb_node, xpath, input_list, output_list)
- != NB_OK) {
- flog_warn(EC_LIB_NB_CB_RPC,
- "%s: rpc callback failed: %s", __func__,
- xpath);
+ // Release memory.
list_delete(&input_list);
list_delete(&output_list);
- return grpc::Status(grpc::StatusCode::INTERNAL,
- "RPC failed");
- }
- // Process output parameters.
- for (ALL_LIST_ELEMENTS_RO(output_list, node, data)) {
- // Response: repeated PathValue output = 1;
- frr::PathValue *pv = response->add_output();
- pv->set_path(data->xpath);
- pv->set_value(data->value);
+ tag->responder.Finish(tag->response, grpc::Status::OK,
+ tag);
+ tag->state = FINISH;
+ break;
+ }
+ case FINISH:
+ delete tag;
}
-
- // Release memory.
- list_delete(&input_list);
- list_delete(&output_list);
-
- return grpc::Status::OK;
}
private:
+ frr::Northbound::AsyncService *_service;
+ grpc::ServerCompletionQueue *_cq;
+
struct candidate {
uint32_t id;
struct nb_config *config;
@@ -640,24 +1122,12 @@ class NorthboundImpl final : public frr::Northbound::Service
const char *client_name,
const char *date, const char *comment)
{
- grpc::ServerWriter<frr::ListTransactionsResponse> *writer =
- static_cast<grpc::ServerWriter<
- frr::ListTransactionsResponse> *>(arg);
- frr::ListTransactionsResponse response;
-
- // Response: uint32 id = 1;
- response.set_id(transaction_id);
-
- // Response: string client = 2;
- response.set_client(client_name);
- // Response: string date = 3;
- response.set_date(date);
-
- // Response: string comment = 4;
- response.set_comment(comment);
-
- writer->Write(response);
+ auto list = static_cast<std::list<std::tuple<
+ int, std::string, std::string, std::string>> *>(arg);
+ list->push_back(std::make_tuple(
+ transaction_id, std::string(client_name),
+ std::string(date), std::string(comment)));
}
static int data_tree_from_dnode(frr::DataTree *dt,
@@ -844,42 +1314,37 @@ class NorthboundImpl final : public frr::Northbound::Service
static void *grpc_pthread_start(void *arg)
{
- unsigned long *port = static_cast<unsigned long *>(arg);
- NorthboundImpl service;
- std::stringstream server_address;
-
- server_address << "0.0.0.0:" << *port;
+ struct frr_pthread *fpt = static_cast<frr_pthread *>(arg);
+ unsigned long *port = static_cast<unsigned long *>(fpt->data);
- grpc::ServerBuilder builder;
- builder.AddListeningPort(server_address.str(),
- grpc::InsecureServerCredentials());
- builder.RegisterService(&service);
+ frr_pthread_set_name(fpt);
- std::unique_ptr<grpc::Server> server(builder.BuildAndStart());
-
- zlog_notice("gRPC server listening on %s",
- server_address.str().c_str());
-
- server->Wait();
+ NorthboundImpl nb;
+ nb.Run(*port);
return NULL;
}
static int frr_grpc_init(unsigned long *port)
{
+ fpt = frr_pthread_new(&attr, "frr-grpc", "frr-grpc");
+ fpt->data = static_cast<void *>(port);
+
/* Create a pthread for gRPC since it runs its own event loop. */
- if (pthread_create(&grpc_pthread, NULL, grpc_pthread_start, port)) {
+ if (frr_pthread_run(fpt, NULL) < 0) {
flog_err(EC_LIB_SYSTEM_CALL, "%s: error creating pthread: %s",
__func__, safe_strerror(errno));
return -1;
}
- pthread_detach(grpc_pthread);
+ pthread_detach(fpt->thread);
return 0;
}
static int frr_grpc_finish(void)
{
+ if (fpt)
+ frr_pthread_destroy(fpt);
// TODO: cancel the gRPC pthreads gracefully.
return 0;
@@ -918,6 +1383,8 @@ static int frr_grpc_module_very_late_init(struct thread *thread)
if (frr_grpc_init(&port) < 0)
goto error;
+ return 0;
+
error:
flog_err(EC_LIB_GRPC_INIT, "failed to initialize the gRPC module");
return -1;
diff --git a/lib/subdir.am b/lib/subdir.am
index 2f8cbe5d52..b2f3e7c5de 100644
--- a/lib/subdir.am
+++ b/lib/subdir.am
@@ -415,7 +415,7 @@ am__v_CLIPPY_1 =
CLIPPY_DEPS = $(CLIPPY) $(top_srcdir)/python/clidef.py
-SUFFIXES = _clippy.c .proto .pb-c.c .pb-c.h .pb.h .pb.cc .grpc.pb.cc
+SUFFIXES += _clippy.c
.c_clippy.c:
$(AM_V_CLIPPY) $(CLIPPY) $(top_srcdir)/python/clidef.py -o $@ $<
diff --git a/lib/vrf.c b/lib/vrf.c
index f8702973bc..6bd577fce1 100644
--- a/lib/vrf.c
+++ b/lib/vrf.c
@@ -726,7 +726,7 @@ DEFUN_NOSH(vrf_exit,
{
/* We have to set vrf context to default vrf */
VTY_PUSH_CONTEXT(VRF_NODE, vrf_get(VRF_DEFAULT, VRF_DEFAULT_NAME));
- vty->node = CONFIG_NODE;
+ cmd_exit(vty);
return CMD_SUCCESS;
}
diff --git a/python/callgraph-dot.py b/python/callgraph-dot.py
new file mode 100644
index 0000000000..4faf1dae16
--- /dev/null
+++ b/python/callgraph-dot.py
@@ -0,0 +1,476 @@
+# callgraph json to graphviz generator for FRR
+#
+# Copyright (C) 2020 David Lamparter for NetDEF, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; see the file COPYING; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+import re
+import sys
+import json
+
+class FunctionNode(object):
+ funcs = {}
+
+ def __init__(self, name):
+ super().__init__()
+ FunctionNode.funcs[name] = self
+
+ self.name = name
+ self.out = []
+ self.inb = []
+ self.rank = None
+ self.defined = False
+ self.defs = []
+
+ def __repr__(self):
+ return '<"%s()" rank=%r>' % (self.name, self.rank)
+
+ def define(self, attrs):
+ self.defined = True
+ self.defs.append((attrs['filename'], attrs['line']))
+ return self
+
+ def add_call(self, called, attrs):
+ return CallEdge(self, called, attrs)
+
+ def calls(self):
+ for e in self.out:
+ yield e.o
+
+ def calld(self):
+ for e in self.inb:
+ yield e.i
+
+ def unlink(self, other):
+ self.out = list([edge for edge in self.out if edge.o != other])
+ other.inb = list([edge for edge in other.inb if edge.i != other])
+
+ @classmethod
+ def get(cls, name):
+ if name in cls.funcs:
+ return cls.funcs[name]
+ return FunctionNode(name)
+
+class CallEdge(object):
+ def __init__(self, i, o, attrs):
+ self.i = i
+ self.o = o
+ self.is_external = attrs['is_external']
+ self.attrs = attrs
+
+ i.out.append(self)
+ o.inb.append(self)
+
+ def __repr__(self):
+ return '<"%s()" -> "%s()">' % (self.i.name, self.o.name)
+
+def nameclean(n):
+ if '.' in n:
+ return n.split('.', 1)[0]
+ return n
+
+def calc_rank(queue, direction):
+ nextq = queue
+
+ if direction == 1:
+ aggr = max
+ elem = lambda x: x.calls()
+ else:
+ aggr = min
+ elem = lambda x: x.calld()
+
+ currank = direction
+ cont = True
+
+ while len(nextq) > 0 and cont:
+ queue = nextq
+ nextq = []
+
+ #sys.stderr.write('rank %d\n' % currank)
+
+ cont = False
+
+ for node in queue:
+ if not node.defined:
+ node.rank = 0
+ continue
+
+ rank = direction
+ for other in elem(node):
+ if other is node:
+ continue
+ if other.rank is None:
+ nextq.append(node)
+ break
+ rank = aggr(rank, other.rank + direction)
+ else:
+ cont = True
+ node.rank = rank
+
+ currank += direction
+
+ return nextq
+
+class Graph(dict):
+ class Subgraph(set):
+ def __init__(self):
+ super().__init__()
+
+ class NodeGroup(set):
+ def __init__(self, members):
+ super().__init__(members)
+
+ class Node(object):
+ def __init__(self, graph, fn):
+ super().__init__()
+ self._fn = fn
+ self._fns = [fn]
+ self._graph = graph
+ self._calls = set()
+ self._calld = set()
+ self._group = None
+
+ def __repr__(self):
+ return '<Graph.Node "%s()"/%d>' % (self._fn.name, len(self._fns))
+
+ def __hash__(self):
+ return hash(self._fn.name)
+
+ def _finalize(self):
+ for called in self._fn.calls():
+ if called.name == self._fn.name:
+ continue
+ if called.name in self._graph:
+ self._calls.add(self._graph[called.name])
+ self._graph[called.name]._calld.add(self)
+
+ def unlink(self, other):
+ self._calls.remove(other)
+ other._calld.remove(self)
+
+ @property
+ def name(self):
+ return self._fn.name
+
+ def calls(self):
+ return self._calls
+ def calld(self):
+ return self._calld
+
+ def group(self, members):
+ assert self in members
+
+ pregroups = []
+ for g in [m._group for m in members]:
+ if g is None:
+ continue
+ if g in pregroups:
+ continue
+
+ assert g <= members
+ pregroups.append(g)
+
+ if len(pregroups) == 0:
+ group = self._graph.NodeGroup(members)
+ self._graph._groups.append(group)
+ elif len(pregroups) == 1:
+ group = pregroups[0]
+ group |= members
+ else:
+ for g in pregroups:
+ self._graph._groups.remove(g)
+ group = self._graph.NodeGroup(members)
+ self._graph._groups.append(group)
+
+ for m in members:
+ m._group = group
+ return group
+
+ def merge(self, other):
+ self._fns.extend(other._fns)
+ self._calls = (self._calls | other._calls) - {self, other}
+ self._calld = (self._calld | other._calld) - {self, other}
+ for c in other._calls:
+ if c == self:
+ continue
+ c._calld.remove(other)
+ c._calld.add(self)
+ for c in other._calld:
+ if c == self:
+ continue
+ c._calls.remove(other)
+ c._calls.add(self)
+ del self._graph[other._fn.name]
+
+ def __init__(self, funcs):
+ super().__init__()
+ self._funcs = funcs
+ for fn in funcs:
+ self[fn.name] = self.Node(self, fn)
+ for node in self.values():
+ node._finalize()
+ self._groups = []
+
+ def automerge(self):
+ nodes = list(self.values())
+
+ while len(nodes):
+ node = nodes.pop(0)
+
+ candidates = {node}
+ evalset = set(node.calls())
+ prevevalset = None
+
+ while prevevalset != evalset:
+ prevevalset = evalset
+ evalset = set()
+
+ for evnode in prevevalset:
+ inbound = set(evnode.calld())
+ if inbound <= candidates:
+ candidates.add(evnode)
+ evalset |= set(evnode.calls()) - candidates
+ else:
+ evalset.add(evnode)
+
+ #if len(candidates) > 1:
+ # for candidate in candidates:
+ # if candidate != node:
+ # #node.merge(candidate)
+ # if candidate in nodes:
+ # nodes.remove(candidate)
+ node.group(candidates)
+
+ for candidate in candidates:
+ if candidate in nodes:
+ nodes.remove(candidate)
+
+ def calc_subgraphs(self):
+ nodes = list(self.values())
+ self._subgraphs = []
+ up = {}
+ down = {}
+
+ self._linear_nodes = []
+
+ while len(nodes):
+ sys.stderr.write('%d\n' % len(nodes))
+ node = nodes.pop(0)
+
+ down[node] = set()
+ queue = [node]
+ while len(queue):
+ now = queue.pop()
+ down[node].add(now)
+ for calls in now.calls():
+ if calls in down[node]:
+ continue
+ queue.append(calls)
+
+ up[node] = set()
+ queue = [node]
+ while len(queue):
+ now = queue.pop()
+ up[node].add(now)
+ for calld in now.calld():
+ if calld in up[node]:
+ continue
+ queue.append(calld)
+
+ common = up[node] & down[node]
+
+ if len(common) == 1:
+ self._linear_nodes.append(node)
+ else:
+ sg = self.Subgraph()
+ sg |= common
+ self._subgraphs.append(sg)
+ for n in common:
+ if n != node:
+ nodes.remove(n)
+
+ return self._subgraphs, self._linear_nodes
+
+
+with open(sys.argv[1], 'r') as fd:
+ data = json.load(fd)
+
+extra_info = {
+ # zebra - LSP WQ
+ ('lsp_processq_add', 'work_queue_add'): [
+ 'lsp_process',
+ 'lsp_processq_del',
+ 'lsp_processq_complete',
+ ],
+ # zebra - main WQ
+ ('mq_add_handler', 'work_queue_add'): [
+ 'meta_queue_process',
+ ],
+ ('meta_queue_process', 'work_queue_add'): [
+ 'meta_queue_process',
+ ],
+ # bgpd - label pool WQ
+ ('bgp_lp_get', 'work_queue_add'): [
+ 'lp_cbq_docallback',
+ ],
+ ('bgp_lp_event_chunk', 'work_queue_add'): [
+ 'lp_cbq_docallback',
+ ],
+ ('bgp_lp_event_zebra_up', 'work_queue_add'): [
+ 'lp_cbq_docallback',
+ ],
+ # bgpd - main WQ
+ ('bgp_process', 'work_queue_add'): [
+ 'bgp_process_wq',
+ 'bgp_processq_del',
+ ],
+ ('bgp_add_eoiu_mark', 'work_queue_add'): [
+ 'bgp_process_wq',
+ 'bgp_processq_del',
+ ],
+ # clear node WQ
+ ('bgp_clear_route_table', 'work_queue_add'): [
+ 'bgp_clear_route_node',
+ 'bgp_clear_node_queue_del',
+ 'bgp_clear_node_complete',
+ ],
+ # rfapi WQs
+ ('rfapi_close', 'work_queue_add'): [
+ 'rfapi_deferred_close_workfunc',
+ ],
+ ('rfapiRibUpdatePendingNode', 'work_queue_add'): [
+ 'rfapiRibDoQueuedCallback',
+ 'rfapiRibQueueItemDelete',
+ ],
+}
+
+
+for func, fdata in data['functions'].items():
+ func = nameclean(func)
+ fnode = FunctionNode.get(func).define(fdata)
+
+ for call in fdata['calls']:
+ if call.get('type') in [None, 'unnamed', 'thread_sched']:
+ if call.get('target') is None:
+ continue
+ tgt = nameclean(call['target'])
+ fnode.add_call(FunctionNode.get(tgt), call)
+ for fptr in call.get('funcptrs', []):
+ fnode.add_call(FunctionNode.get(nameclean(fptr)), call)
+ if tgt == 'work_queue_add':
+ if (func, tgt) not in extra_info:
+ sys.stderr.write('%s:%d:%s(): work_queue_add() not handled\n' % (
+ call['filename'], call['line'], func))
+ else:
+ attrs = dict(call)
+ attrs.update({'is_external': False, 'type': 'workqueue'})
+ for dst in extra_info[func, tgt]:
+ fnode.add_call(FunctionNode.get(dst), call)
+ elif call['type'] == 'install_element':
+ vty_node = FunctionNode.get('VTY_NODE_%d' % call['vty_node'])
+ vty_node.add_call(FunctionNode.get(nameclean(call['target'])), call)
+ elif call['type'] == 'hook':
+ # TODO: edges for hooks from data['hooks']
+ pass
+
+n = FunctionNode.funcs
+
+# fix some very low end functions cycling back very far to the top
+if 'peer_free' in n:
+ n['peer_free'].unlink(n['bgp_timer_set'])
+ n['peer_free'].unlink(n['bgp_addpath_set_peer_type'])
+if 'bgp_path_info_extra_free' in n:
+ n['bgp_path_info_extra_free'].rank = 0
+
+if 'zlog_ref' in n:
+ n['zlog_ref'].rank = 0
+if 'mt_checkalloc' in n:
+ n['mt_checkalloc'].rank = 0
+
+queue = list(FunctionNode.funcs.values())
+queue = calc_rank(queue, 1)
+queue = calc_rank(queue, -1)
+
+sys.stderr.write('%d functions in cyclic set\n' % len(queue))
+
+graph = Graph(queue)
+graph.automerge()
+
+gv_nodes = []
+gv_edges = []
+
+sys.stderr.write('%d groups after automerge\n' % len(graph._groups))
+
+def is_vnc(n):
+ return n.startswith('rfapi') or n.startswith('vnc') or ('_vnc_' in n)
+
+_vncstyle = ',fillcolor="#ffffcc",style=filled'
+cyclic_set_names = set([fn.name for fn in graph.values()])
+
+for i, group in enumerate(graph._groups):
+ if len(group) > 1:
+ group.num = i
+ gv_nodes.append('\tsubgraph cluster_%d {' % i)
+ gv_nodes.append('\t\tcolor=blue;')
+ for gn in group:
+ has_cycle_callers = set(gn.calld()) - group
+ has_ext_callers = set([edge.i.name for edge in gn._fn.inb]) - cyclic_set_names
+
+ style = ''
+ etext = ''
+ if is_vnc(gn.name):
+ style += _vncstyle
+ if has_cycle_callers:
+ style += ',color=blue,penwidth=3'
+ if has_ext_callers:
+ style += ',fillcolor="#ffeebb",style=filled'
+ etext += '<br/><font point-size="10">(%d other callers)</font>' % (len(has_ext_callers))
+
+ gv_nodes.append('\t\t"%s" [shape=box,label=<%s%s>%s];' % (gn.name, '<br/>'.join([fn.name for fn in gn._fns]), etext, style))
+ gv_nodes.append('\t}')
+ else:
+ for gn in group:
+ has_ext_callers = set([edge.i.name for edge in gn._fn.inb]) - cyclic_set_names
+
+ style = ''
+ etext = ''
+ if is_vnc(gn.name):
+ style += _vncstyle
+ if has_ext_callers:
+ style += ',fillcolor="#ffeebb",style=filled'
+ etext += '<br/><font point-size="10">(%d other callers)</font>' % (len(has_ext_callers))
+ gv_nodes.append('\t"%s" [shape=box,label=<%s%s>%s];' % (gn.name, '<br/>'.join([fn.name for fn in gn._fns]), etext, style))
+
+edges = set()
+for gn in graph.values():
+ for calls in gn.calls():
+ if gn._group == calls._group:
+ gv_edges.append('\t"%s" -> "%s" [color="#55aa55",style=dashed];' % (gn.name, calls.name))
+ else:
+ def xname(nn):
+ if len(nn._group) > 1:
+ return 'cluster_%d' % nn._group.num
+ else:
+ return nn.name
+ tup = xname(gn), calls.name
+ if tup[0] != tup[1] and tup not in edges:
+ gv_edges.append('\t"%s" -> "%s" [weight=0.0,w=0.0,color=blue];' % tup)
+ edges.add(tup)
+
+with open(sys.argv[2], 'w') as fd:
+ fd.write('''digraph {
+ node [fontsize=13,fontname="Fira Sans"];
+%s
+}''' % '\n'.join(gv_nodes + [''] + gv_edges))
diff --git a/python/makefile.py b/python/makefile.py
index 9af397d373..948d3f7391 100644
--- a/python/makefile.py
+++ b/python/makefile.py
@@ -11,6 +11,7 @@ import subprocess
import re
import argparse
from string import Template
+from makevars import MakeReVars
argp = argparse.ArgumentParser(description = 'FRR Makefile extensions')
argp.add_argument('--dev-build', action = 'store_const', const = True,
@@ -20,13 +21,9 @@ args = argp.parse_args()
with open('Makefile', 'r') as fd:
before = fd.read()
-nolinecont = before.replace('\\\n', '')
-m = re.search('^clippy_scan\s*=([^#]*)(?:#.*)?$', nolinecont, flags=re.MULTILINE)
-if m is None:
- sys.stderr.write('failed to parse Makefile.in\n')
- sys.exit(2)
+mv = MakeReVars(before)
-clippy_scan = m.group(1).strip().split()
+clippy_scan = mv['clippy_scan'].strip().split()
for clippy_file in clippy_scan:
assert clippy_file.endswith('.c')
@@ -57,6 +54,7 @@ ${target}: ${clippybase}_clippy.c
lines = before.splitlines()
autoderp = '#AUTODERP# '
out_lines = []
+bcdeps = []
make_rule_re = re.compile('^([^:\s]+):\s*([^:\s]+)\s*($|\n)')
while lines:
@@ -80,6 +78,12 @@ while lines:
out_lines.append(line)
continue
+ target, dep = m.group(1), m.group(2)
+
+ if target.endswith('.lo') or target.endswith('.o'):
+ if not dep.endswith('.h'):
+ bcdeps.append('%s.bc: %s' % (target, target))
+ bcdeps.append('\t$(AM_V_LLVM_BC)$(COMPILE) -emit-llvm -c -o $@ %s' % (dep))
if m.group(2) in clippy_scan:
out_lines.append(clippyauxdep.substitute(target=m.group(1), clippybase=m.group(2)[:-2]))
@@ -88,6 +92,24 @@ while lines:
out_lines.append('# clippy{\n# main clippy targets')
for clippy_file in clippy_scan:
out_lines.append(clippydep.substitute(clippybase = clippy_file[:-2]))
+
+out_lines.append('')
+out_lines.extend(bcdeps)
+out_lines.append('')
+bc_targets = []
+for varname in ['bin_PROGRAMS', 'sbin_PROGRAMS', 'lib_LTLIBRARIES', 'module_LTLIBRARIES', 'noinst_LIBRARIES']:
+ bc_targets.extend(mv[varname].strip().split())
+for target in bc_targets:
+ amtgt = target.replace('/', '_').replace('.', '_').replace('-', '_')
+ objs = mv[amtgt + '_OBJECTS'].strip().split()
+ objs = [obj + '.bc' for obj in objs]
+ deps = mv.get(amtgt + '_DEPENDENCIES', '').strip().split()
+ deps = [d + '.bc' for d in deps if d.endswith('.a')]
+ objs.extend(deps)
+ out_lines.append('%s.bc: %s' % (target, ' '.join(objs)))
+ out_lines.append('\t$(AM_V_LLVM_LD)$(LLVM_LINK) -o $@ $^')
+ out_lines.append('')
+
out_lines.append('# }clippy')
out_lines.append('')
diff --git a/python/makevars.py b/python/makevars.py
index e0e2031a0d..1a85fbd6f5 100644
--- a/python/makevars.py
+++ b/python/makevars.py
@@ -4,14 +4,33 @@
import os
import subprocess
+import re
-class MakeVars(object):
+class MakeVarsBase(object):
'''
- makevars['FOO_CFLAGS'] gets you "FOO_CFLAGS" from Makefile
+ common code between MakeVars and MakeReVars
'''
def __init__(self):
self._data = dict()
+ def __getitem__(self, k):
+ if k not in self._data:
+ self.getvars([k])
+ return self._data[k]
+
+ def get(self, k, defval = None):
+ if k not in self._data:
+ self.getvars([k])
+ return self._data.get(k) or defval
+
+class MakeVars(MakeVarsBase):
+ '''
+ makevars['FOO_CFLAGS'] gets you "FOO_CFLAGS" from Makefile
+
+ This variant works by invoking make as a subprocess, i.e. Makefile must
+ be valid and working. (This is sometimes a problem if depfiles have not
+ been generated.)
+ '''
def getvars(self, varlist):
'''
get a batch list of variables from make. faster than individual calls.
@@ -39,12 +58,33 @@ class MakeVars(object):
v = v[1:-1]
self._data[k] = v
- def __getitem__(self, k):
- if k not in self._data:
- self.getvars([k])
- return self._data[k]
+class MakeReVars(MakeVarsBase):
+ '''
+ makevars['FOO_CFLAGS'] gets you "FOO_CFLAGS" from Makefile
- def get(self, k, defval = None):
- if k not in self._data:
- self.getvars([k])
- return self._data[k] or defval
+ This variant works by regexing through Makefile. This means the Makefile
+ does not need to be fully working, but on the other hand it doesn't support
+ fancy complicated make expressions.
+ '''
+ var_re = re.compile(r'^([^=#\n\s]+)[ \t]*=[ \t]*([^#\n]*)(?:#.*)?$', flags=re.MULTILINE)
+ repl_re = re.compile(r'\$(?:([A-Za-z])|\(([^\)]+)\))')
+
+ def __init__(self, maketext):
+ super().__init__()
+ self._vars = dict(self.var_re.findall(maketext.replace('\\\n', '')))
+
+ def replacevar(self, match):
+ varname = match.group(1) or match.group(2)
+ return self._vars.get(varname, '')
+
+ def getvars(self, varlist):
+ for varname in varlist:
+ if varname not in self._vars:
+ continue
+
+ val, prevval = self._vars[varname], None
+ while val != prevval:
+ prevval = val
+ val = self.repl_re.sub(self.replacevar, val)
+
+ self._data[varname] = val
diff --git a/qpb/subdir.am b/qpb/subdir.am
index 1864ba7369..80f8f3aca9 100644
--- a/qpb/subdir.am
+++ b/qpb/subdir.am
@@ -29,6 +29,7 @@ CLEANFILES += \
# end
EXTRA_DIST += qpb/qpb.proto
+SUFFIXES += .proto .pb-c.c .pb-c.h
if HAVE_PROTOBUF
diff --git a/tests/topotests/bgp_gr_functionality_topo1/bgp_gr_topojson_topo1.json b/tests/topotests/bgp_gr_functionality_topo1/bgp_gr_topojson_topo1.json
new file mode 100644
index 0000000000..7b3cac814f
--- /dev/null
+++ b/tests/topotests/bgp_gr_functionality_topo1/bgp_gr_topojson_topo1.json
@@ -0,0 +1,115 @@
+{
+ "ipv4base":"192.168.0.0",
+ "ipv4mask":24,
+ "ipv6base":"fd00::",
+ "ipv6mask":64,
+ "link_ip_start":{"ipv4":"192.168.0.0", "v4mask":24, "ipv6":"fd00::", "v6mask":64},
+ "lo_prefix":{"ipv4":"1.0.", "v4mask":32, "ipv6":"2001:DB8:F::", "v6mask":128},
+ "routers":{
+ "r1":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2-link1": {"ipv4":"auto", "ipv6":"auto"},
+ "r2-link2": {"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"}
+ ],
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"}
+ ],
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes":[
+ {
+ "network":"100.0.10.1/32",
+ "no_of_ip":5,
+ "next_hop":"192.168.1.2"
+ },
+ {
+ "network":"1::1/128",
+ "no_of_ip":5,
+ "next_hop":"fd00:0:0:1::2"
+ }]},
+ "r2":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1-link1": {"ipv4":"auto", "ipv6":"auto"},
+ "r1-link2": {"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"}
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"}
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes":[
+ {
+ "network":"200.0.20.1/32",
+ "no_of_ip":5,
+ "next_hop":"192.168.1.1"
+ },
+ {
+ "network":"2::1/128",
+ "no_of_ip":5,
+ "next_hop":"fd00:0:0:1::1"
+ }]
+ }
+ }
+}
diff --git a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py
new file mode 100755
index 0000000000..fdc1bed522
--- /dev/null
+++ b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py
@@ -0,0 +1,3495 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test BGP Gracefull Restart functionality.
+Basic Common Test steps for all the test case below :
+- Create topology (setup module)
+ Creating 2 routers topology, r1, r2 in IBGP
+- Bring up topology
+- Verify for bgp to converge
+- Configure BGP Garceful Restart on both the routers.
+
+1. Helper BGP router R1, mark and unmark IPV4 routes
+ as stale as the restarting router R2 come up within the restart time.
+2. Helper BGP router R1, mark IPV4 routes as stale and
+ deletes them as the restarting router R2 did-not come up within restart
+ time.
+3. Restart BGP router R1, detects it is connected to R2,
+ which is a helper router. Verify the restart capability i.e. R bit
+ are sent after R1 reloads and comes back.
+4. Verify that the restarting node sets "R" bit while sending the
+ BGP open messages after the node restart, only if GR is enabled.
+5. Verify if restarting node resets R bit in BGP open message
+ during normal BGP session flaps as well, even when GR restarting mode is enabled.
+ Here link flap happen due to interface UP/DOWN.
+6. Verify if restarting node resets R bit in BGP open message
+ during normal BGP session flaps as well, even when GR restarting mode is enabled.
+ Here link flap happen due to neigh router restarts
+7. Verify if restarting node resets R bit in BGP open message
+ during normal BGP session flaps when GR helper mode is enabled.
+ Here link flap happen due to interface UP/DOWN.
+8. Verify if restarting node resets R bit in BGP open message
+ during normal BGP session flaps when GR helper mode is enabled.
+ Here link flap happen due to neigh router restarts.
+9. Verify that restarting nodes set "F" bit while sending
+ the BGP open messages after it restarts, only when BGP GR is enabled.
+10. Verify that restarting nodes reset "F" bit while sending
+ the BGP open messages after it's restarts, when BGP GR is **NOT** enabled.
+11. Verify that only GR helper routers keep the stale
+ route entries, not any GR disabled router.
+12. Verify that GR helper routers keeps all the routes received
+ from restarting node if both the routers are configured as GR restarting node.
+13. Verify that GR helper routers delete all the routes
+ received from a node if both the routers are configured as GR helper node.
+14. Test Objective : After BGP neighborship is established and GR capability
+ is exchanged, transition helper router to disabled state.
+15.Test Objective : After BGP neighborship is established and GR capability
+ is exchanged, transition disabled router to helper state.
+16. Verify transition from Global Restarting to Disable and then
+ Global Disable to Restarting.
+17. Verify transition from Global Helper to Disable and then Global
+ Disable to Helper.
+18. Verify transition from Global Restart to Helper and then Global
+ Helper to Restart.
+19. Verify transition from Peer-level helper to Global Restarting.
+20. Verify transition from Peer-level restart to Global Restart.
+21. Verify transition from Peer-level disabled to Global Restart.
+22. Verify Peer-level inherit from Global Restarting mode.
+23. Verify transition from Peer-level helper to Global inherit helper.
+24. Verify transition from Peer-level restart to Global inherit helper.
+25. Verify transition from Peer-level disbale to Global inherit helper.
+26. Verify default GR functional mode is Helper.
+27. Verify transition from Peer-level Helper to Global Disable.
+28. Verify transition from Peer-level Restarting to Global Disable.
+29. Verify transition from Peer-level Disable to Global Disable.
+30. Verfiy Peer-level inherit from Global Disable mode.
+
+"""
+
+import os
+import sys
+import json
+import time
+import inspect
+import pytest
+from time import sleep
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join("../"))
+sys.path.append(os.path.join("../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.bgp import (
+ clear_bgp,
+ verify_bgp_rib,
+ verify_graceful_restart,
+ create_router_bgp,
+ verify_r_bit,
+ verify_f_bit,
+ verify_bgp_convergence,
+ verify_graceful_restart_timers,
+)
+
+from lib.common_config import (
+ write_test_header,
+ reset_config_on_routers,
+ start_topology,
+ kill_router_daemons,
+ start_router_daemons,
+ verify_rib,
+ check_address_types,
+ write_test_footer,
+ check_router_status,
+ shutdown_bringup_interface,
+ step,
+ kill_mininet_routers_process,
+ get_frr_ipv6_linklocal,
+ create_route_maps,
+)
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/bgp_gr_topojson_topo1.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ logger.info("Could not read file:", jsonFile)
+
+
+# Global variables
+NEXT_HOP_IP = {"ipv4": "192.168.1.10", "ipv6": "fd00:0:0:1::10"}
+NEXT_HOP_IP_1 = {"ipv4": "192.168.0.1", "ipv6": "fd00::1"}
+NEXT_HOP_IP_2 = {"ipv4": "192.168.0.2", "ipv6": "fd00::2"}
+BGP_CONVERGENCE = False
+GR_RESTART_TIMER = 20
+PREFERRED_NEXT_HOP = "link_local"
+
+
+class GenerateTopo(Topo):
+ """
+ Test topology builder
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # This function only purpose is to create topology
+ # as defined in input json file.
+ #
+ # Create topology (setup module)
+ # Creating 2 routers topology, r1, r2in IBGP
+ # Bring up topology
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ global ADDR_TYPES
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(GenerateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # Kill stale mininet routers and process
+ kill_mininet_routers_process(tgen)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ ADDR_TYPES = check_address_types()
+
+ for addr_type in ADDR_TYPES:
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module : Failed \n Error:" " {}".format(
+ BGP_CONVERGENCE
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+def configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut, peer):
+ """
+ This function groups the repetitive function calls into one function.
+ """
+
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ clear_bgp(tgen, addr_type, dut)
+
+ return True
+
+
+def next_hop_per_address_family(
+ tgen, dut, peer, addr_type, next_hop_dict, preferred_next_hop=PREFERRED_NEXT_HOP
+):
+ """
+ This function returns link_local or global next_hop per address-family
+ """
+
+ intferface = topo["routers"][peer]["links"]["{}-link1".format(dut)]["interface"]
+ if addr_type == "ipv6" and "link_local" in preferred_next_hop:
+ next_hop = get_frr_ipv6_linklocal(tgen, peer, intf=intferface)
+ else:
+ next_hop = next_hop_dict[addr_type]
+
+ return next_hop
+
+
+def test_BGP_GR_TC_46_p1(request):
+ """
+ Test Objective : transition from Peer-level helper to Global Restarting
+ Global Mode : GR Restarting
+ PerPeer Mode : GR Helper
+ GR Mode effective : GR Helper
+
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ step(
+ "Configure R1 and R2 as GR restarting node in global"
+ " and helper in per-Peer-level"
+ )
+
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "graceful-restart": {"graceful-restart": True,},
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ },
+ }
+ },
+ "r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}},
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ step("Verify on R2 that R1 advertises GR capabilities as a restarting node")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Kill BGP on R2")
+
+ kill_router_daemons(tgen, "r2", ["bgpd"])
+
+ step(
+ "Verify that R1 keeps the stale entries in RIB & FIB and R2 keeps stale entries in FIB using"
+ )
+
+ for addr_type in ADDR_TYPES:
+ protocol = "bgp"
+ next_hop = next_hop_per_address_family(
+ tgen, "r2", "r1", addr_type, NEXT_HOP_IP_1
+ )
+ input_topo = {"r1": topo["routers"]["r1"]}
+ result = verify_rib(tgen, addr_type, "r2", input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ next_hop = next_hop_per_address_family(
+ tgen, "r1", "r2", addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {"r2": topo["routers"]["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Bring up BGP on R1 and remove Peer-level GR config"
+ " from R1 following by a session reset"
+ )
+
+ start_router_daemons(tgen, "r2", ["bgpd"])
+
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-helper": False}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-helper": False}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ step("Verify on R2 that R1 advertises GR capabilities as a restarting node")
+
+ input_dict = {
+ "r1": {"bgp": {"graceful-restart": {"graceful-restart": True}}},
+ "r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}},
+ }
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Kill BGP on R1")
+
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ step(
+ "Verify that R1 keeps the stale entries in FIB command and R2 keeps stale entries in RIB & FIB"
+ )
+
+ for addr_type in ADDR_TYPES:
+ protocol = "bgp"
+ next_hop = next_hop_per_address_family(
+ tgen, "r1", "r2", addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {"r2": topo["routers"]["r2"]}
+ result = verify_rib(tgen, addr_type, "r1", input_topo, next_hop, protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n Routes are still present \n Error {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ next_hop = next_hop_per_address_family(
+ tgen, "r2", "r1", addr_type, NEXT_HOP_IP_1
+ )
+ input_topo = {"r1": topo["routers"]["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, "r2", input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r2", input_topo, next_hop, protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n Routes are still present \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Start BGP on R1")
+
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_TC_50_p1(request):
+ """
+ Test Objective : Transition from Peer-level helper to Global inherit helper
+ Global Mode : None
+ PerPeer Mode : Helper
+ GR Mode effective : GR Helper
+
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ step(
+ "Configure R1 as GR helper node at per Peer-level for R2"
+ " and configure R2 as global restarting node."
+ )
+
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}},
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ step("Verify on R2 that R1 advertises GR capabilities as a helper node")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Kill BGP on R2")
+
+ kill_router_daemons(tgen, "r2", ["bgpd"])
+
+ step(
+ "Verify that R2 keeps the stale entries in FIB & R1 keeps stale entries in RIB & FIB"
+ )
+
+ for addr_type in ADDR_TYPES:
+ protocol = "bgp"
+ next_hop = next_hop_per_address_family(
+ tgen, "r2", "r1", addr_type, NEXT_HOP_IP_1
+ )
+ input_topo = {"r1": topo["routers"]["r1"]}
+ result = verify_rib(tgen, addr_type, "r2", input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ next_hop = next_hop_per_address_family(
+ tgen, "r1", "r2", addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {"r2": topo["routers"]["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Bring up BGP on R2 and remove Peer-level GR config from R1 ")
+
+ start_router_daemons(tgen, "r2", ["bgpd"])
+
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-helper": False}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-helper": False}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ step("Verify on R2 that R1 still advertises GR capabilities as a helper node")
+
+ input_dict = {
+ "r1": {"bgp": {"graceful-restart": {"graceful-restart-helper": True}}},
+ "r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}},
+ }
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Kill BGP on R2")
+
+ kill_router_daemons(tgen, "r2", ["bgpd"])
+
+ step(
+ "Verify that R2 keeps the stale entries in FIB & R1 keeps stale entries in RIB & FIB"
+ )
+
+ for addr_type in ADDR_TYPES:
+ protocol = "bgp"
+ next_hop = next_hop_per_address_family(
+ tgen, "r2", "r1", addr_type, NEXT_HOP_IP_1
+ )
+ input_topo = {"r1": topo["routers"]["r1"]}
+ result = verify_rib(tgen, addr_type, "r2", input_topo, next_hop, protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n Routes are still present \n Error {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ next_hop = next_hop_per_address_family(
+ tgen, "r1", "r2", addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {"r2": topo["routers"]["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_topo, next_hop, protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n Routes are still present \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Start BGP on R2")
+
+ start_router_daemons(tgen, "r2", ["bgpd"])
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_TC_51_p1(request):
+ """
+ Test Objective : Transition from Peer-level restarting to Global inherit helper
+ Global Mode : None
+ PerPeer Mode : GR Restart
+ GR Mode effective : GR Restart
+
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ step("Configure R1 as GR restarting node at per Peer-level for R2")
+
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}},
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+ step("Verify on R2 that R1 advertises GR capabilities as a restarting node")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Kill BGP on R1")
+
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ step(
+ "Verify that R1 keeps the stale entries in FIB & R2 keeps stale entries in RIB & FIB"
+ )
+
+ for addr_type in ADDR_TYPES:
+ protocol = "bgp"
+ next_hop = next_hop_per_address_family(
+ tgen, "r1", "r2", addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {"r2": topo["routers"]["r2"]}
+ result = verify_rib(tgen, addr_type, "r1", input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ next_hop = next_hop_per_address_family(
+ tgen, "r2", "r1", addr_type, NEXT_HOP_IP_1
+ )
+ input_topo = {"r1": topo["routers"]["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, "r2", input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r2", input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Bring up BGP on R1 and remove Peer-level GR config")
+
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": False}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": False}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ step("Verify on R2 that R1 advertises GR capabilities as a helper node")
+
+ input_dict = {
+ "r1": {"bgp": {"graceful-restart": {"graceful-restart-helper": True}}},
+ "r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}},
+ }
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Kill BGPd on R2")
+
+ kill_router_daemons(tgen, "r2", ["bgpd"])
+
+ step(
+ "Verify that R2 keeps the stale entries in FIB & R1 keeps stale entries in RIB & FIB"
+ )
+
+ for addr_type in ADDR_TYPES:
+ protocol = "bgp"
+ next_hop = next_hop_per_address_family(
+ tgen, "r2", "r1", addr_type, NEXT_HOP_IP_1
+ )
+ input_topo = {"r1": topo["routers"]["r1"]}
+ result = verify_rib(tgen, addr_type, "r2", input_topo, next_hop, protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n Routes are still present \n Error {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ next_hop = next_hop_per_address_family(
+ tgen, "r1", "r2", addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {"r2": topo["routers"]["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_topo, next_hop, protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n Routes are still present \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Start BGP on R2")
+
+ start_router_daemons(tgen, "r2", ["bgpd"])
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_TC_53_p1(request):
+ """
+ Test Objective : Default GR functional mode is Helper.
+ Global Mode : None
+ PerPeer Mode : None
+ GR Mode effective : GR Helper
+
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ step("configure R2 as global restarting node")
+
+ input_dict = {"r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}}}
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ step(
+ "Verify on R2 that R1 advertises GR capabilities as a helper node based on inherit"
+ )
+
+ input_dict = {
+ "r1": {"bgp": {"graceful-restart": {"graceful-restart-helper": True}}},
+ "r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}},
+ }
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Kill BGPd on R2")
+
+ kill_router_daemons(tgen, "r2", ["bgpd"])
+
+ step(
+ "Verify that R2 keeps the stale entries in FIB & R1 keeps stale entries in RIB & FIB"
+ )
+
+ for addr_type in ADDR_TYPES:
+ protocol = "bgp"
+ next_hop = next_hop_per_address_family(
+ tgen, "r2", "r1", addr_type, NEXT_HOP_IP_1
+ )
+ input_topo = {"r1": topo["routers"]["r1"]}
+ result = verify_rib(tgen, addr_type, "r2", input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ next_hop = next_hop_per_address_family(
+ tgen, "r1", "r2", addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {"r2": topo["routers"]["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Start BGP on R2")
+
+ start_router_daemons(tgen, "r2", ["bgpd"])
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_UTP_1_3_p0(request):
+ """
+ Test Objective : Helper BGP router R1, mark and unmark IPV4 routes
+ as stale as the restarting router R2 come up within the restart time
+
+ Test Objective : Helper BGP router R1, mark IPV4 routes as stale and
+ deletes them as the restarting router R2 did-not come up within
+ restart time.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Create route-map to prefer global next-hop
+ input_dict = {
+ "r1": {
+ "route_maps": {
+ "rmap_global": [
+ {"action": "permit", "set": {"ipv6": {"nexthop": "prefer-global"}}}
+ ]
+ }
+ },
+ "r2": {
+ "route_maps": {
+ "rmap_global": [
+ {"action": "permit", "set": {"ipv6": {"nexthop": "prefer-global"}}}
+ ]
+ }
+ },
+ }
+ result = create_route_maps(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Configure neighbor for route map
+ input_dict_1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {
+ "route_maps": [
+ {
+ "name": "rmap_global",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {
+ "route_maps": [
+ {
+ "name": "rmap_global",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "graceful-restart": {"timer": {"restart-time": GR_RESTART_TIMER}},
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ },
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r2", peer="r1")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r2", peer="r1"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r1"
+ peer = "r2"
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2, preferred_next_hop="global"
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, "bgp")
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 2] : R2 goes for reload ")
+
+ kill_router_daemons(tgen, "r2", ["bgpd"])
+
+ logger.info(
+ "[Phase 3] : R2 is still down, restart time 120 sec."
+ " So time verify the routes are present in BGP RIB"
+ " and ZEBRA"
+ )
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2, preferred_next_hop="global"
+ )
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ protocol = "bgp"
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 4] : sleep for {} sec".format(GR_RESTART_TIMER))
+ sleep(GR_RESTART_TIMER)
+
+ logger.info("[Phase 5] : Verify the routes from r2 ")
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes
+ next_hop = NEXT_HOP_IP_2[addr_type]
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ # Verifying RIB routes
+ result = verify_rib(
+ tgen, addr_type, dut, input_topo, next_hop, "bgp", expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ logger.info("[Phase 5] : R2 is about to come up now ")
+ start_router_daemons(tgen, "r2", ["bgpd"])
+
+ logger.info("[Phase 5] : R2 is UP Now ! ")
+
+ for addr_type in ADDR_TYPES:
+ # Verifying GR stats
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r2", peer="r1"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_r_bit(tgen, topo, addr_type, input_dict, dut="r1", peer="r2")
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2, preferred_next_hop="global"
+ )
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_UTP_15_TC_9_p1(request):
+ """
+ Test Objective : Restart BGP router R1, detects it is connected to R2,
+ which is a helper router. Verify the restart capability i.e. R bit
+ are sent after R1 reloads and comes back.
+
+ Test Objective : Verify that restarting nodes reset "F" bit while sending
+ the BGP open messages after it's restarts, when BGP GR is **NOT** enabled.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Checking router status, starting if not running
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ # reset_config_on_routers(tgen)
+
+ # Create route-map to prefer global next-hop
+ input_dict = {
+ "r1": {
+ "route_maps": {
+ "rmap_global": [
+ {"action": "permit", "set": {"ipv6": {"nexthop": "prefer-global"}}}
+ ]
+ }
+ },
+ "r2": {
+ "route_maps": {
+ "rmap_global": [
+ {"action": "permit", "set": {"ipv6": {"nexthop": "prefer-global"}}}
+ ]
+ }
+ },
+ }
+ result = create_route_maps(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Configure neighbor for route map
+ input_dict_1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {
+ "route_maps": [
+ {
+ "name": "rmap_global",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {
+ "route_maps": [
+ {
+ "name": "rmap_global",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ logger.info(
+ "[Phase 1] : Test Setup " "[Helper Mode]R1-----R2[Restart Mode] initialized "
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r1"
+ peer = "r2"
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2, preferred_next_hop="global"
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ protocol = "bgp"
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 2] : R1 goes for reload ")
+
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info("[Phase 6] : R1 is about to come up now ")
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ for addr_type in ADDR_TYPES:
+ # Verifying GR stats
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_r_bit(tgen, topo, addr_type, input_dict, dut="r2", peer="r1")
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2, preferred_next_hop="global"
+ )
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_r_bit(tgen, topo, addr_type, input_dict, dut="r1", peer="r2")
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_f_bit(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2", expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_UTP_35_p1(request):
+ """
+ Test Objective : Restart BGP router R1 connected to R2,
+ which is a restart router.
+ R1 should not send any GR capability in the open message,
+ however it would process open message from R2 with GR -restart
+ capability, but would not perform any BGP GR functionality.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ "[Phase 1] : Test Setup" " [Disable Mode]R1-----R2[Restart Mode] initialized "
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-disable": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-disable": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r1"
+ peer = "r2"
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ protocol = "bgp"
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 2] : R1 goes for reload ")
+
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info("[Phase 3] : R1 is about to come up now ")
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info("[Phase 4] : R2 is UP now, so time to collect GR stats ")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_TC_4_p0(request):
+ """
+ Test Objective : Verify that the restarting node sets "R" bit while sending the
+ BGP open messages after the node restart, only if GR is enabled.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ "[Phase 1] : Test Setup" " [Restart Mode]R1-----R2[Helper Mode] initialized "
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r1"
+ peer = "r2"
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ protocol = "bgp"
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 2] : R2 goes for reload ")
+
+ kill_router_daemons(tgen, "r2", ["bgpd"])
+
+ logger.info(
+ "[Phase 3] : R2 is still down, restart time {} sec."
+ "So time verify the routes are present in BGP RIB and ZEBRA ".format(
+ GR_RESTART_TIMER
+ )
+ )
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, input_topo, next_hop, expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ # Verifying RIB routes
+ result = verify_rib(
+ tgen, addr_type, dut, input_topo, next_hop, protocol, expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ logger.info("[Phase 5] : R2 is about to come up now ")
+ start_router_daemons(tgen, "r2", ["bgpd"])
+
+ logger.info("[Phase 4] : R2 is UP now, so time to collect GR stats ")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_r_bit(tgen, topo, addr_type, input_dict, dut="r1", peer="r2")
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_TC_5_1_2_p1(request):
+ """
+ Test Objective : Verify if restarting node resets R bit in BGP open message
+ during normal BGP session flaps as well, even when GR restarting mode is enabled.
+ Here link flap happen due to interface UP/DOWN.
+
+ """
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ "[Phase 1] : Test Setup" " [Restart Mode]R1-----R2[Restart Mode] initialized "
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r1"
+ peer = "r2"
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ protocol = "bgp"
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 2] : Now flap the link running the BGP session ")
+ # Shutdown interface
+ intf = "r2-r1-eth0"
+ shutdown_bringup_interface(tgen, "r2", intf)
+
+ # Bring up Interface
+ shutdown_bringup_interface(tgen, dut, intf, ifaceaction=True)
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_r_bit(tgen, topo, addr_type, input_dict, dut="r1", peer="r2")
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 2] : Restart BGPd on router R2. ")
+ kill_router_daemons(tgen, "r2", ["bgpd"])
+
+ start_router_daemons(tgen, "r2", ["bgpd"])
+
+ logger.info("[Phase 4] : R2 is UP now, so time to collect GR stats ")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_r_bit(tgen, topo, addr_type, input_dict, dut="r1", peer="r2")
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_TC_6_1_2_p1(request):
+ """
+ Test Objective : Verify if restarting node resets R bit in BGP
+ open message during normal BGP session flaps when GR is disabled.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ "[Phase 1] : Test Setup" "[Restart Mode]R1-----R2[Helper Mode] initialized "
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r1"
+ peer = "r2"
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ protocol = "bgp"
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 1] : Changing mode" "[Disable Mode]R1-----R2[Helper Mode]")
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-disable": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-disable": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ clear_bgp(tgen, addr_type, "r1")
+ clear_bgp(tgen, addr_type, "r2")
+
+ # Verify GR stats
+ input_dict = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-disable": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-disable": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ # here the verify_graceful_restart fro the neighbor would be
+ # "NotReceived" as the latest GR config is not yet applied.
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 2] : Now flap the link running the BGP session ")
+ # Shutdown interface
+ intf = "r2-r1-eth0"
+ shutdown_bringup_interface(tgen, "r2", intf)
+
+ # Bring up Interface
+ shutdown_bringup_interface(tgen, dut, intf, ifaceaction=True)
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_r_bit(
+ tgen, topo, addr_type, input_dict, dut="r2", peer="r1", expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("Restart BGPd on R2 ")
+ kill_router_daemons(tgen, "r2", ["bgpd"])
+
+ start_router_daemons(tgen, "r2", ["bgpd"])
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_r_bit(
+ tgen, topo, addr_type, input_dict, dut="r2", peer="r1", expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_TC_8_p1(request):
+ """
+ Test Objective : Verify that restarting nodes set "F" bit while sending
+ the BGP open messages after it restarts, only when BGP GR is enabled.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ "[Phase 1] : Test Setup" " [Restart Mode]R1-----R2[Restart Mode] initialized "
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "graceful-restart": {"preserve-fw-state": True},
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ },
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r1"
+ peer = "r2"
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ protocol = "bgp"
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 2] : R1 goes for reload ")
+
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info("[Phase 3] : R1 is about to come up now ")
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info("[Phase 4] : R2 is UP now, so time to collect GR stats ")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_r_bit(tgen, topo, addr_type, input_dict, dut="r2", peer="r1")
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_f_bit(tgen, topo, addr_type, input_dict, dut="r2", peer="r1")
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_TC_17_p1(request):
+ """
+ Test Objective : Verify that only GR helper routers keep the stale
+ route entries, not any GR disabled router.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info("[Phase 1] : Test Setup [Disable]R1-----R2[Restart] initialized ")
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "graceful-restart": {
+ "graceful-restart": True,
+ "preserve-fw-state": True,
+ },
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-disable": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-disable": True}
+ }
+ }
+ }
+ }
+ },
+ },
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r1"
+ peer = "r2"
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ protocol = "bgp"
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 2] : R2 goes for reload ")
+
+ kill_router_daemons(tgen, "r2", ["bgpd"])
+
+ logger.info(
+ "[Phase 3] : R2 is still down, restart time 120 sec."
+ " So time verify the routes are present in BGP RIB and ZEBRA "
+ )
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, input_topo, next_hop, expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ # Verifying RIB routes
+ result = verify_rib(
+ tgen, addr_type, dut, input_topo, next_hop, protocol, expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ logger.info("[Phase 5] : R2 is about to come up now ")
+ start_router_daemons(tgen, "r2", ["bgpd"])
+
+ logger.info("[Phase 4] : R2 is UP now, so time to collect GR stats ")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_r_bit(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2", expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_TC_19_p1(request):
+ """
+ Test Objective : Verify that GR helper routers keeps all the routes received
+ from restarting node if both the routers are configured as GR restarting node.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info("[Phase 1] : Test Setup [Helper]R1-----R2[Restart] initialized ")
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "graceful-restart": {
+ "graceful-restart": True,
+ "preserve-fw-state": True,
+ },
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ },
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r1"
+ peer = "r2"
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ protocol = "bgp"
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info(
+ "[Phase 2] : R1's Gr state cahnge to Graceful"
+ " Restart without resetting the session "
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ logger.info(
+ "[Phase 3] : R2 is still down, restart time 120 sec."
+ " So time verify the routes are present in BGP RIB and ZEBRA "
+ )
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_TC_20_p1(request):
+ """
+ Test Objective : Verify that GR helper routers delete all the routes
+ received from a node if both the routers are configured as GR helper node.
+ """
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info("[Phase 1] : Test Setup [Helper]R1-----R2[Helper] initialized ")
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "graceful-restart": {
+ "graceful-restart": True,
+ "preserve-fw-state": True,
+ },
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ },
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r1"
+ peer = "r2"
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ protocol = "bgp"
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ kill_router_daemons(tgen, "r2", ["bgpd"])
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, input_topo, next_hop, expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ # Verifying RIB routes
+ result = verify_rib(
+ tgen, addr_type, dut, input_topo, next_hop, protocol, expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ logger.info("[Phase 5] : R2 is about to come up now ")
+
+ start_router_daemons(tgen, "r2", ["bgpd"])
+
+ logger.info("[Phase 4] : R2 is UP now, so time to collect GR stats ")
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_TC_31_1_p1(request):
+ """
+ After BGP neighborship is established and GR capability is exchanged,
+ transition restarting router to disabled state and vice versa.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ "[Phase 1] : Test Setup" " [Helper Mode]R2-----R1[Restart Mode] initialized "
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r1": {
+ "bgp": {
+ "graceful-restart": {"preserve-fw-state": True},
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ },
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r1"
+ peer = "r2"
+ protocol = "bgp"
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 2] : R1 Goes from Restart to Disable Mode ")
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-disable": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-disable": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ clear_bgp(tgen, addr_type, "r1")
+ clear_bgp(tgen, addr_type, "r2")
+
+ # Verify GR stats
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-disable": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-disable": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 2] : R1 goes for reload ")
+
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info(
+ "[Phase 3] : R1 is still down, restart time 120 sec."
+ " So time verify the routes are not present in BGP RIB and ZEBRA"
+ )
+
+ for addr_type in ADDR_TYPES:
+ # Verifying RIB routes
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_rib(
+ tgen, addr_type, dut, input_topo, next_hop, protocol, expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 4] : R1 is about to come up now ")
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info("[Phase 5] : R1 is UP now, so time to collect GR stats ")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_TC_31_2_p1(request):
+ """
+ After BGP neighborship is established and GR capability is exchanged,
+ transition restarting router to disabled state and vice versa.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ "[Phase 1] : Test Setup " "[Disable Mode]R1-----R2[Restart Mode] initialized "
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-disable": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart-disable": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r1"
+ peer = "r2"
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ protocol = "bgp"
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 2] : R2 Goes from Disable to Restart Mode ")
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "graceful-restart": {"preserve-fw-state": True},
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ clear_bgp(tgen, addr_type, "r1")
+ clear_bgp(tgen, addr_type, "r2")
+
+ # Verify GR stats
+ input_dict = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {"graceful-restart": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ # here the verify_graceful_restart fro the neighbor would be
+ # "NotReceived" as the latest GR config is not yet applied.
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 3] : R1 goes for reload ")
+
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info(
+ "[Phase 4] : R1 is still down, restart time 120 sec."
+ " So time verify the routes are present in BGP RIB and ZEBRA "
+ )
+
+ for addr_type in ADDR_TYPES:
+ # Verifying RIB routes
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Phase 6] : R1 is about to come up now ")
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info("[Phase 4] : R1 is UP now, so time to collect GR stats ")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ next_hop = next_hop_per_address_family(
+ tgen, dut, peer, addr_type, NEXT_HOP_IP_2
+ )
+ input_topo = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_gr_functionality_topo2/bgp_gr_topojson_topo2.json b/tests/topotests/bgp_gr_functionality_topo2/bgp_gr_topojson_topo2.json
new file mode 100644
index 0000000000..75f192d3b6
--- /dev/null
+++ b/tests/topotests/bgp_gr_functionality_topo2/bgp_gr_topojson_topo2.json
@@ -0,0 +1,334 @@
+{
+ "ipv4base":"192.168.0.0",
+ "ipv4mask":24,
+ "ipv6base":"fd00::",
+ "ipv6mask":64,
+ "link_ip_start":{"ipv4":"192.168.0.0", "v4mask":24, "ipv6":"fd00::", "v6mask":64},
+ "lo_prefix":{"ipv4":"1.0.", "v4mask":32, "ipv6":"2001:DB8:F::", "v6mask":128},
+ "routers":{
+ "r1":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4":"auto", "ipv6":"auto"},
+ "r3": {"ipv4":"auto", "ipv6":"auto"},
+ "r5": {"ipv4":"auto", "ipv6":"auto"},
+ "r6": {"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "101.0.20.1/32",
+ "no_of_network": 5
+ }
+ ],
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r6": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "1::1/128",
+ "no_of_network": 5
+ }
+ ],
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {
+ }
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r1": {
+ }
+ }
+ },
+ "r6": {
+ "dest_link": {
+ "r1": {
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1": {"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "102.0.20.1/32",
+ "no_of_network": 5
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "2::1/128",
+ "no_of_network": 5
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1": {"ipv4":"auto", "ipv6":"auto"},
+ "r4": {"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "103.0.20.1/32",
+ "no_of_network": 5
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "3::1/128",
+ "no_of_network": 5
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r3": {"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "104.0.20.1/32",
+ "no_of_network": 5
+ }
+ ],
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "4::1/128",
+ "no_of_network": 5
+ }
+ ],
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r5":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1": {"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as": "500",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "105.0.20.1/32",
+ "no_of_network": 5
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r5": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "5::1/128",
+ "no_of_network": 5
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r5": {
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r6":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1": {"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as": "600",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "106.0.20.1/32",
+ "no_of_network": 5
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r6": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "6::1/128",
+ "no_of_network": 5
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r6": {
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py
new file mode 100755
index 0000000000..83a04f491f
--- /dev/null
+++ b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py
@@ -0,0 +1,3085 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test BGP Graceful Restart functionality.
+Basic Common Test steps for all the test case below :
+- Create topology (setup module)
+ Creating 7 routers topology
+- Bring up topology
+- Verify for bgp to converge
+- Configure BGP Graceful Restart on both the routers.
+
+TC_1_2:
+ Verify that EOR message is sent out only after initial convergence
+ Verify whether EOR message is received from all the peers after restart
+TC_3:
+ Verify the selection deferral timer functionality when EOR is not sent
+ by the helper router
+TC_11:
+ Verify that selection-deferral timer sets the maximum time to
+ avoid deadlock during which the best-path
+TC_10:
+ Test Objective : Test GR scenarios on helper router by enabling
+ Graceful Restart for multiple address families.
+TC_15:
+ Test Objective : Test GR scenarios by enabling Graceful Restart
+ for multiple address families..
+TC_16:
+ Test Objective : Verify BGP-GR feature when restarting node
+ is a transit router for it's iBGP peers.
+TC_18:
+ Test Objective : Verify that GR helper router deletes stale routes
+ received from restarting node, if GR capability is not present in
+TC_19:
+ Test Objective : Verify that GR routers keeps all the routes
+ received from restarting node if both the routers are
+TC_26:
+ Test Objective : Test GR scenarios on helper router by enabling
+ Graceful Restart for multiple address families.
+TC_28:
+ Test Objective : Verify if helper node goes down before restarting
+ node comes up online, helper node sets the R-bit to avoid dead-lock
+TC_29:
+ Test Objective : Change timers on the fly, and
+ verify if it takes immediate effect.
+TC_33:
+ Test Objective : Helper router receives same prefixes from two
+ different routers (GR-restarting and GR-disabled). Keeps the
+TC_34_1:
+ Test Objective : Restarting node doesn't preserve forwarding
+ state, helper router should not keep the stale entries.
+TC_34_2:
+ Test Objective : Restarting node doesn't preserve the forwarding
+ state verify the behaviour on helper node, if it still keeps the
+TC_32:
+ Test Objective : Restarting node is connected to multiple helper
+ nodes, one of them doesn't send EOR to restarting router. Verify
+TC_37:
+ Test Objective : Verify if helper node restarts before sending the
+ EOR message, restarting node doesn't wait until stale path timer
+TC_30:
+ Test Objective : Restarting node removes stale routes from Zebra
+ after receiving an EOR from helper router.
+
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+from time import sleep
+from copy import deepcopy
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join("../"))
+sys.path.append(os.path.join("../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.bgp import (
+ clear_bgp,
+ verify_bgp_rib,
+ verify_graceful_restart,
+ create_router_bgp,
+ verify_r_bit,
+ verify_eor,
+ verify_f_bit,
+ verify_bgp_convergence,
+ verify_gr_address_family,
+ modify_bgp_config_when_bgpd_down,
+ verify_graceful_restart_timers,
+)
+
+from lib.common_config import (
+ write_test_header,
+ reset_config_on_routers,
+ start_topology,
+ kill_router_daemons,
+ start_router_daemons,
+ verify_rib,
+ check_address_types,
+ write_test_footer,
+ check_router_status,
+ shutdown_bringup_interface,
+ step,
+ kill_mininet_routers_process,
+ get_frr_ipv6_linklocal,
+ create_route_maps,
+)
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/bgp_gr_topojson_topo2.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ logger.info("Could not read file:", jsonFile)
+
+# Global variables
+BGP_CONVERGENCE = False
+GR_RESTART_TIMER = 5
+GR_SELECT_DEFER_TIMER = 5
+PREFERRED_NEXT_HOP = "link_local"
+NEXT_HOP_4 = ["192.168.1.1", "192.168.4.2"]
+NEXT_HOP_6 = ["fd00:0:0:1::1", "fd00:0:0:4::2"]
+
+
+class GenerateTopo(Topo):
+ """
+ Test topology builder
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # This function only purpose is to create topology
+ # as defined in input json file.
+ #
+ # Create topology (setup module)
+ # Creating 2 routers topology, r1, r2in IBGP
+ # Bring up topology
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ global ADDR_TYPES
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(GenerateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Api call verify whether BGP is converged
+ ADDR_TYPES = check_address_types()
+
+ for addr_type in ADDR_TYPES:
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module : Failed \n Error:" " {}".format(
+ BGP_CONVERGENCE
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+def configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut, peer):
+ """
+ This function groups the repetitive function calls into one function.
+ """
+
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ clear_bgp(tgen, addr_type, dut)
+
+ return True
+
+
+def next_hop_per_address_family(tgen, dut, peer, addr_type, next_hop_dict):
+ """
+ This function returns link_local or global next_hop per address-family
+ """
+
+ intferface = topo["routers"][peer]["links"]["{}-link1".format(dut)]["interface"]
+ if addr_type == "ipv6" and "link_local" in PREFERRED_NEXT_HOP:
+ next_hop = get_frr_ipv6_linklocal(tgen, peer, intf=intferface)
+ else:
+ next_hop = next_hop_dict[addr_type]
+
+ return next_hop
+
+
+def test_BGP_GR_TC_1_2_p0(request):
+ """
+ Verify that EOR message is sent out only after initial convergence
+ Verify whether EOR message is received from all the peers after restart
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ "Verify EOR Sent and Received : BGP_GR_TC_1_2 >> "
+ "BGP GR [Helper Mode]R3-----R1[Restart Mode] "
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r1": {
+ "bgp": {
+ "graceful-restart": {
+ "graceful-restart": True,
+ "preserve-fw-state": True,
+ },
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ },
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes received from router R3
+ dut = "r1"
+ input_dict_1 = {key: topo["routers"][key] for key in ["r3"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("R1 goes for reload")
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ for addr_type in ADDR_TYPES:
+ # Verifying RIB routes
+ input_dict_1 = {key: topo["routers"][key] for key in ["r3"]}
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("Starting bgpd process")
+ start_router_daemons(tgen, "r1", ["bgpd"])
+ logger.info("R1 is UP Now")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes received from router R3
+ input_dict_1 = {key: topo["routers"][key] for key in ["r3"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying EOR on restarting router
+ result = verify_eor(tgen, topo, addr_type, input_dict, dut="r3", peer="r1")
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_TC_3_p0(request):
+ """
+ Verify the selection deferral timer functionality when EOR is not sent
+ by the helper router
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ " Verify route download to RIB: BGP_GR_TC_3 >> "
+ "BGP GR [Helper Mode]R1-----R2[Restart Mode] "
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "graceful-restart": {"disable-eor": True,},
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ },
+ }
+ },
+ "r2": {
+ "bgp": {
+ "graceful-restart": {
+ "graceful-restart": True,
+ "preserve-fw-state": True,
+ "timer": {"select-defer-time": GR_SELECT_DEFER_TIMER},
+ },
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {"dest_link": {"r2": {"graceful-restart": True}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {"dest_link": {"r2": {"graceful-restart": True}}}
+ }
+ }
+ },
+ },
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes received from router R1
+ dut = "r2"
+ input_dict_1 = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("R2 goes for reload ")
+ kill_router_daemons(tgen, "r2", ["bgpd"])
+
+ logger.info("R2 is about to come up now")
+ start_router_daemons(tgen, "r2", ["bgpd"])
+ logger.info("R2 is UP Now")
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes received from router R1
+ input_dict_1 = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verify EOR on restarting router
+ result = verify_eor(
+ tgen, topo, addr_type, input_dict, dut="r2", peer="r1", expected=False
+ )
+ assert result is not True, (
+ "Testcase " + tc_name + " : Failed \n Error: {}".format(result)
+ )
+
+ logger.info(
+ "Waiting for selection deferral timer({} sec)..".format(GR_SELECT_DEFER_TIMER)
+ )
+ sleep(GR_SELECT_DEFER_TIMER)
+
+ for addr_type in ADDR_TYPES:
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, "r2", input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_TC_11_p0(request):
+ """
+ Verify that selection-deferral timer sets the maximum time to
+ avoid deadlock during which the best-path
+ selection process is deferred, after a peer session was restarted
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info("Verify EOR Sent after deferral timeout : BGP_GR_TC_11")
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "graceful-restart": {
+ "graceful-restart": True,
+ "select-defer-time": GR_SELECT_DEFER_TIMER,
+ },
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r1": {"graceful-restart": True}}},
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}},
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r1": {"graceful-restart": True}}},
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}},
+ }
+ }
+ },
+ },
+ }
+ },
+ "r3": {
+ "bgp": {
+ "graceful-restart": {"disable-eor": True},
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ },
+ }
+ },
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ clear_bgp(tgen, addr_type, "r1")
+ clear_bgp(tgen, addr_type, "r3")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes received from router R1
+ dut = "r1"
+ input_dict_1 = {key: topo["routers"][key] for key in ["r3"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("R1 goes for reload")
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info("Starting bgpd process")
+ start_router_daemons(tgen, "r1", ["bgpd"])
+ logger.info("R1 is UP Now")
+
+ for addr_type in ADDR_TYPES:
+ # Verify EOR on restarting router
+ result = verify_eor(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r3", expected=False
+ )
+ assert result is not True, (
+ "Testcase " + tc_name + " : Failed \n Error: {}".format(result)
+ )
+
+ logger.info(
+ "Waiting for selection deferral timer({} sec).. ".format(
+ GR_SELECT_DEFER_TIMER + 2
+ )
+ )
+ sleep(GR_SELECT_DEFER_TIMER + 2)
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes received from router R1
+ input_dict_1 = {key: topo["routers"][key] for key in ["r3"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying EOR on restarting router
+ result = verify_eor(
+ tgen, topo, addr_type, input_dict, dut="r3", peer="r1", expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_10_p2(request):
+ """
+ Test Objective : Test GR scenarios on helper router by enabling
+ Graceful Restart for multiple address families.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ "[Step 1] : Test Setup " "[Helper Mode]R3-----R1[Restart Mode] initialized"
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "next_hop_self": True,
+ "graceful-restart": True,
+ "activate": "ipv6",
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "next_hop_self": True,
+ "graceful-restart": True,
+ "activate": "ipv4",
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "graceful-restart-helper": True,
+ "activate": "ipv6",
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "graceful-restart-helper": True,
+ "activate": "ipv4",
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r3"
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # verify multi address family
+ result = verify_gr_address_family(
+ tgen, topo, addr_type, "ipv4Unicast", dut="r1"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # verify multi address family
+ result = verify_gr_address_family(
+ tgen, topo, addr_type, "ipv6Unicast", dut="r1"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # verify multi address family
+ result = verify_gr_address_family(
+ tgen, topo, addr_type, "ipv4Unicast", dut="r3"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # verify multi address family
+ result = verify_gr_address_family(
+ tgen, topo, addr_type, "ipv6Unicast", dut="r3"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Kill BGPd daemon on R1
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Start BGPd daemon on R1
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_16_p2(request):
+ """
+ Test Objective : Verify BGP-GR feature when restarting node
+ is a transit router for it's iBGP peers.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ "[Step 1] : Test Setup " "[Helper Mode]R3-----R1[Restart Mode] initialized"
+ )
+
+ # Configure graceful-restart and timers
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "graceful-restart": True,
+ "next_hop_self": True,
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "graceful-restart": True,
+ "next_hop_self": True,
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info(
+ "[Step 2] : Test Setup "
+ "[Helper Mode]R3-----R1[Restart Mode]"
+ "--------R6[Helper Mode] initialized"
+ )
+
+ # Configure graceful-restart and timers
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r3"
+ input_dict_1 = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ input_dict_2 = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Kill BGPd daemon on R1
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes
+ input_dict_1 = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ input_dict_2 = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Start BGPd daemon on R1
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes
+ input_dict_1 = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ input_dict_2 = {key: topo["routers"][key] for key in ["r2"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_18_p1(request):
+ """
+ Test Objective : Verify that GR helper router deletes stale routes
+ received from restarting node, if GR capability is not present in
+ restarting node's OPEN message.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ "[Step 1] : Test Setup " "[Helper Mode]R6-----R1[Restart Mode] initialized"
+ )
+
+ # Configure graceful-restart and timers
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r6": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r6": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ }
+ }
+ },
+ "r6": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r6": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r6": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r6")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r6"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info(
+ "[Step 2] : Test Setup "
+ "[Helper Mode]R6-----R1[Restart Mode]"
+ "--------R2[Helper Mode] initialized"
+ )
+
+ # Configure graceful-restart and timers
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r6"
+ input_dict_1 = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r2"
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Kill BGPd daemon on R1
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info("[Step 3] : Configure R1 to prevent sending EOR")
+
+ # Modify graceful-restart config to prevent sending EOR
+ input_dict_3 = {"r1": {"bgp": {"graceful-restart": {"disable-eor": True}}}}
+
+ result = modify_bgp_config_when_bgpd_down(tgen, topo, input_dict_3)
+
+ # Modify configuration to delete routes
+ network = {"ipv4": "101.0.20.1/32", "ipv6": "1::1/128"}
+ for addr_type in ADDR_TYPES:
+ input_dict_3 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": network[addr_type],
+ "no_of_network": 5,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = modify_bgp_config_when_bgpd_down(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Modify graceful-restart config
+ input_dict_3 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {"graceful-restart-disable": True}
+ }
+ },
+ "r6": {
+ "dest_link": {
+ "r1": {"graceful-restart-disable": True}
+ }
+ },
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {"graceful-restart-disable": True}
+ }
+ },
+ "r6": {
+ "dest_link": {
+ "r1": {"graceful-restart-disable": True}
+ }
+ },
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+
+ result = modify_bgp_config_when_bgpd_down(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(tc_name, result)
+
+ logger.info("[Step 4] : Bring up the BGPd daemon on R1 for 30" " seconds..")
+
+ # Start BGPd daemon on R1
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes
+ dut = "r6"
+ input_dict_1 = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_dict_1, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ # Verifying BGP RIB routes
+ dut = "r2"
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_dict_1, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_26_p2(request):
+ """
+ Test Objective : Test GR scenarios on helper router by enabling
+ Graceful Restart for multiple address families.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ "[Step 1] : Test Setup " "[Helper Mode]R3-----R1[Restart Mode] initialized"
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "graceful-restart": True,
+ "next_hop_self": True,
+ "activate": "ipv6",
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "graceful-restart": True,
+ "next_hop_self": True,
+ "activate": "ipv4",
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "graceful-restart-helper": True,
+ "activate": "ipv6",
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "graceful-restart-helper": True,
+ "activate": "ipv4",
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r3", peer="r1"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes
+ dut = "r3"
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Kill BGPd daemon on R1
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Start BGPd daemon on R1
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # verify multi address family
+ result = verify_gr_address_family(
+ tgen, topo, addr_type, "ipv4Unicast", dut="r1"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # verify multi address family
+ result = verify_gr_address_family(
+ tgen, topo, addr_type, "ipv6Unicast", dut="r1"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # verify multi address family
+ result = verify_gr_address_family(
+ tgen, topo, addr_type, "ipv4Unicast", dut="r3"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # verify multi address family
+ result = verify_gr_address_family(
+ tgen, topo, addr_type, "ipv6Unicast", dut="r3"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_chaos_28_p1(request):
+ """
+ Test Objective : Verify if helper node goes down before restarting
+ node comes up online, helper node sets the R-bit to avoid dead-lock
+ till SDT expiry.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ "Test Case: test_BGP_GR_chaos_28 :"
+ "[Helper Mode]R3-----R1[Restart Mode] initialized"
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Step 1] : Kill BGPd daemon on R1..")
+
+ # Kill BGPd daemon on R1
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info("[Step 2] : Kill BGPd daemon on R3..")
+
+ # Kill BGPd daemon on R3
+ kill_router_daemons(tgen, "r3", ["bgpd"])
+
+ logger.info("[Step 3] : Start BGPd daemon on R1..")
+
+ # Start BGPd daemon on R1
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info("[Step 4] : Start BGPd daemon on R3..")
+
+ # Start BGPd daemon on R3
+ start_router_daemons(tgen, "r3", ["bgpd"])
+
+ # Verify r_bit
+ for addr_type in ADDR_TYPES:
+ result = verify_r_bit(tgen, topo, addr_type, input_dict, dut="r3", peer="r1")
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_chaos_29_p1(request):
+ """
+ Test Objective : Change timers on the fly, and
+ verify if it takes immediate effect.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ " Test Case : BGP_GR_UTP_29"
+ " BGP GR [Helper Mode]R3-----R1[Restart Mode]"
+ " and [restart-time 150]R1 initialized"
+ )
+
+ # Configure graceful-restart and timers
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "graceful-restart": {"timer": {"restart-time": GR_RESTART_TIMER}},
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ },
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verify graceful-restart timers
+ input_dict_2 = {
+ "r1": {
+ "bgp": {
+ "graceful-restart": {"timer": {"restart-time": GR_RESTART_TIMER + 5}}
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_2 = {
+ "r1": {
+ "bgp": {
+ "graceful-restart": {"timer": {"restart-time": GR_RESTART_TIMER}}
+ }
+ }
+ }
+
+ result = verify_graceful_restart_timers(
+ tgen, topo, addr_type, input_dict_2, dut="r3", peer="r1"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes before shutting down BGPd daemon
+ dut = "r3"
+ input_dict = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Step 2] : Kill BGPd daemon on R1..")
+
+ # Kill BGPd daemon on R1
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ # Waiting for 120 sec
+ logger.info("[Step 3] : Wait for {} seconds..".format(GR_RESTART_TIMER))
+
+ # Waiting for 120 sec
+ sleep(GR_RESTART_TIMER)
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes before shutting down BGPd daemon
+ input_dict = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ logger.info("[Step 4] : Start BGPd daemon on R1..")
+
+ # Start BGPd daemon on R1
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_chaos_33_p1(request):
+ """
+ Test Objective : Helper router receives same prefixes from two
+ different routers (GR-restarting and GR-disabled). Keeps the
+ stale entry only for GR-restarting node(next-hop is correct).
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ " Test Case : test_BGP_GR_chaos_33 "
+ "BGP GR "
+ "[Restart Mode]R1--R3[Helper Mode]--R4[Disabled Mode]"
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {"graceful-restart-disable": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {"graceful-restart-disable": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Step 2] : Advertise same networks from R1 and R4..")
+
+ # Api call to delete advertised networks
+ input_dict_2 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {"network": "200.0.20.1/32", "no_of_network": 2,}
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [
+ {"network": "2001::1/128", "no_of_network": 2}
+ ]
+ }
+ },
+ }
+ }
+ },
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {"network": "200.0.20.1/32", "no_of_network": 2}
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [
+ {"network": "2001::1/128", "no_of_network": 2}
+ ]
+ }
+ },
+ }
+ }
+ },
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ # Verifying RIB routes
+ dut = "r3"
+ peer1 = "r1"
+ peer2 = "r4"
+ intf1 = topo["routers"][peer1]["links"][dut]["interface"]
+ intf2 = topo["routers"][peer2]["links"][dut]["interface"]
+
+ if addr_type == "ipv4":
+ next_hop_4 = NEXT_HOP_4
+ result = verify_rib(tgen, addr_type, dut, input_dict_2, next_hop_4)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ if addr_type == "ipv6":
+ if "link_local" in PREFERRED_NEXT_HOP:
+ next_hop1 = get_frr_ipv6_linklocal(tgen, peer1, intf=intf1)
+ next_hop2 = get_frr_ipv6_linklocal(tgen, peer2, intf=intf2)
+
+ next_hop_6 = [next_hop1, next_hop2]
+ else:
+ next_hop_6 = NEXT_HOP_6
+
+ result = verify_rib(tgen, addr_type, dut, input_dict_2, next_hop_6)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Step 3] : Kill BGPd daemon on R1 and R4..")
+
+ # Kill BGPd daemon on R1
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ # Kill BGPd daemon on R4
+ kill_router_daemons(tgen, "r4", ["bgpd"])
+
+ for addr_type in ADDR_TYPES:
+ # Verifying RIB routes
+ next_hop_6 = ["fd00:0:0:1::1"]
+ if addr_type == "ipv4":
+ next_hop_4 = NEXT_HOP_4[0]
+
+ result = verify_rib(tgen, addr_type, dut, input_dict_2, next_hop_4)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ if addr_type == "ipv6":
+ if "link_local" in PREFERRED_NEXT_HOP:
+ next_hop_6 = get_frr_ipv6_linklocal(tgen, peer1, intf=intf1)
+ else:
+ next_hop_6 = NEXT_HOP_6[0]
+
+ result = verify_rib(tgen, addr_type, dut, input_dict_2, next_hop_6)
+
+ # Verifying RIB routes
+ if addr_type == "ipv4":
+ next_hop_4 = NEXT_HOP_4[1]
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict_2, next_hop_4, expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ if addr_type == "ipv6":
+ if "link_local" in PREFERRED_NEXT_HOP:
+ next_hop_6 = get_frr_ipv6_linklocal(tgen, peer2, intf=intf2)
+ else:
+ next_hop_6 = NEXT_HOP_6[1]
+
+ result = verify_rib(tgen, addr_type, dut, input_dict_2, next_hop_6)
+
+ logger.info("[Step 4] : Start BGPd daemon on R1 and R4..")
+
+ # Start BGPd daemon on R1
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ # Start BGPd daemon on R4
+ start_router_daemons(tgen, "r4", ["bgpd"])
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_chaos_34_2_p1(request):
+ """
+ Test Objective : Restarting node doesn't preserve the forwarding
+ state verify the behaviour on helper node, if it still keeps the
+ stale routes.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ " Test Case : test_BGP_GR_chaos_34 "
+ "BGP GR "
+ "[Restart Mode]R1---R3[Helper Mode]"
+ )
+
+ logger.info("[Step 1] : Configure restarting" " router R1 to prevent ")
+ logger.info("[Step 2] : Reset the session" " between R1 and R3..")
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "graceful-restart": {"preserve-fw-state": True, "disable-eor": True},
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ },
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verify f-bit before killing BGPd daemon
+ result = verify_f_bit(tgen, topo, addr_type, input_dict, "r3", "r1")
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes after starting BGPd daemon
+ dut = "r3"
+ input_dict_1 = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Step 3] : Kill BGPd daemon on R1..")
+
+ # Kill BGPd daemon on R1
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info("[Step 4] : Withdraw/delete the prefixes " "originated from R1..")
+
+ # Api call to delete advertised networks
+ network = {"ipv4": "101.0.20.1/32", "ipv6": "1::1/128"}
+ for addr_type in ADDR_TYPES:
+ input_dict_2 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": network[addr_type],
+ "no_of_network": 5,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = modify_bgp_config_when_bgpd_down(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Step 5] : Remove the CLI from R1's config to " "set the F-bit..")
+
+ # Modify graceful-restart config not to set f-bit
+ # and write to /etc/frr
+ input_dict_2 = {"r1": {"bgp": {"graceful-restart": {"preserve-fw-state": False}}}}
+
+ result = modify_bgp_config_when_bgpd_down(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(tc_name, result)
+
+ logger.info("[Step 6] : Bring up the BGPd daemon on R1 again..")
+
+ # Start BGPd daemon on R1
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ for addr_type in ADDR_TYPES:
+ # Verify f-bit after starting BGPd daemon
+ result = verify_f_bit(
+ tgen, topo, addr_type, input_dict, "r3", "r1", expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ # Verifying BGP RIB routes after starting BGPd daemon
+ input_dict_1 = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_dict_1, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_chaos_34_1_p1(request):
+ """
+ Test Objective : Restarting node doesn't preserve forwarding
+ state, helper router should not keep the stale entries.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ " Test Case : test_BGP_GR_chaos_31 "
+ "BGP GR "
+ "[Restart Mode]R1---R3[Helper Mode]"
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "graceful-restart": {
+ "preserve-fw-state": True,
+ "timer": {"restart-time": GR_RESTART_TIMER},
+ },
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ },
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes after starting BGPd daemon
+ dut = "r3"
+ input_dict_1 = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info(
+ "[Step 1] : Remove the preserve-fw-state command"
+ " from restarting node R1's config"
+ )
+
+ # Configure graceful-restart to set f-bit as False
+ input_dict_2 = {"r1": {"bgp": {"graceful-restart": {"preserve-fw-state": False}}}}
+
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ logger.info("[Step 2] : Reset the session between R1 and R3..")
+
+ # Reset sessions
+ for addr_type in ADDR_TYPES:
+ clear_bgp(tgen, addr_type, "r1")
+
+ for addr_type in ADDR_TYPES:
+ # Verify f-bit after starting BGPd daemon
+ result = verify_f_bit(
+ tgen, topo, addr_type, input_dict_2, "r3", "r1", expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ logger.info("[Step 3] : Kill BGPd daemon on R1..")
+
+ # Kill BGPd daemon on R1
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ # Waiting for 120 sec
+ logger.info("Waiting for {} sec..".format(GR_RESTART_TIMER))
+ sleep(GR_RESTART_TIMER)
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes
+ input_dict = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ # Start BGPd daemon on R1
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_chaos_32_p1(request):
+ """
+ Test Objective : Restarting node is connected to multiple helper
+ nodes, one of them doesn't send EOR to restarting router. Verify
+ that only after SDT restarting node send EOR to all helper peers
+ excluding the prefixes originated by faulty router.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ " Test Case : test_BGP_GR_chaos_32 "
+ "BGP GR "
+ "[Restart Mode]R1---R3&R5[Helper Mode]"
+ )
+
+ logger.info(
+ "[Step 1] : Change the mode on R1 be a restarting" " node on global level"
+ )
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "graceful-restart": {"graceful-restart": True},
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"next_hop_self": True}}},
+ "r5": {"dest_link": {"r1": {"graceful-restart": True}}},
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"next_hop_self": True}}},
+ "r5": {"dest_link": {"r1": {"graceful-restart": True}}},
+ }
+ }
+ },
+ },
+ }
+ },
+ "r5": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r5": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r5": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r5")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r5"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying BGP RIB routes after starting BGPd daemon
+ dut = "r3"
+ input_dict_1 = {key: topo["routers"][key] for key in ["r5"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Step 2] : Kill BGPd daemon on R1..")
+ # Kill BGPd daemon on R1
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info("[Step 3] : Withdraw all the advertised prefixes from R5")
+
+ # Api call to delete advertised networks
+ network = {"ipv4": "105.0.20.1/32", "ipv6": "5::1/128"}
+ for addr_type in ADDR_TYPES:
+ input_dict_2 = {
+ "r5": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": network[addr_type],
+ "no_of_network": 5,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ logger.info(
+ "[Step 4] : Stop the helper router R5 from sending EOR" " message using CLI"
+ )
+
+ # Modify graceful-restart config to prevent sending EOR
+ input_dict_3 = {"r5": {"bgp": {"graceful-restart": {"disable-eor": True}}}}
+
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ logger.info("[Step 5] : Bring up the BGPd daemon on R1..")
+
+ # Start BGPd daemon on R1
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ for addr_type in ADDR_TYPES:
+ # Verify EOR is disabled
+ result = verify_eor(
+ tgen, topo, addr_type, input_dict_3, dut="r5", peer="r1", expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ # Verifying BGP RIB routes after starting BGPd daemon
+ input_dict_1 = {key: topo["routers"][key] for key in ["r5"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_chaos_37_p1(request):
+ """
+ Test Objective : Verify if helper node restarts before sending the
+ EOR message, restarting node doesn't wait until stale path timer
+ expiry to do the best path selection and sends an EOR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ " Test Case : test_BGP_GR_chaos_37 "
+ "BGP GR "
+ "[Restart Mode]R1---R3[Helper Mode]"
+ )
+
+ logger.info(
+ "[Step 1] : Configure restarting router R3 to prevent " "sending an EOR.."
+ )
+
+ logger.info("[Step 2] : Reset the session between R3 and R1..")
+
+ # Configure graceful-restart
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "graceful-restart": {"disable-eor": True},
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ },
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verify EOR is disabled
+ result = verify_eor(
+ tgen, topo, addr_type, input_dict, dut="r3", peer="r1", expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ # Verifying BGP RIB routes after starting BGPd daemon
+ dut = "r1"
+ input_dict_1 = {key: topo["routers"][key] for key in ["r3"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Step 3] : Kill BGPd daemon on R1..")
+
+ # Kill BGPd daemon on R1
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info("[Step 4] : Start BGPd daemon on R1..")
+
+ # Start BGPd daemon on R1
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info("[Step 5] : Kill BGPd daemon on R3..")
+
+ # Kill BGPd daemon on R3
+ kill_router_daemons(tgen, "r3", ["bgpd"])
+
+ # Modify graceful-restart config to prevent sending EOR
+ input_dict_2 = {"r3": {"bgp": {"graceful-restart": {"disable-eor": True}}}}
+
+ result = modify_bgp_config_when_bgpd_down(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(tc_name, result)
+
+ logger.info("[Step 6] : Start BGPd daemon on R3..")
+
+ # Start BGPd daemon on R3
+ start_router_daemons(tgen, "r3", ["bgpd"])
+
+ for addr_type in ADDR_TYPES:
+ # Verify r_bit
+ result = verify_r_bit(tgen, topo, addr_type, input_dict, dut="r1", peer="r3")
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes
+ input_dict_1 = {key: topo["routers"][key] for key in ["r3"]}
+ result = verify_rib(tgen, addr_type, dut, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verify EOR is send from R1 to R3
+ input_dict_3 = {"r1": {"bgp": {"graceful-restart": {"disable-eor": True}}}}
+
+ result = verify_eor(
+ tgen, topo, addr_type, input_dict_3, dut="r1", peer="r3", expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_BGP_GR_chaos_30_p1(request):
+ """
+ Test Objective : Restarting node removes stale routes from Zebra
+ after receiving an EOR from helper router.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Check router status
+ check_router_status(tgen)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ logger.info(
+ " Test Case : test_BGP_GR_chaos_30 "
+ "BGP GR [Helper Mode]R3-----R1[Restart Mode] "
+ )
+
+ # Configure graceful-restart and timers
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "graceful-restart": {"preserve-fw-state": True},
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r1": {"graceful-restart": True}}}
+ }
+ }
+ },
+ },
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
+
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes before shutting down BGPd daemon
+ dut = "r1"
+ input_dict = {key: topo["routers"][key] for key in ["r3"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Step 2] : Kill BGPd daemon on R1..")
+
+ # Kill BGPd daemon on R1
+ kill_router_daemons(tgen, "r1", ["bgpd"])
+
+ logger.info("[Step 3] : Withdraw advertised prefixes from R3...")
+
+ # Api call to delete advertised networks
+ network = {"ipv4": "103.0.20.1/32", "ipv6": "3::1/128"}
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": network[addr_type],
+ "no_of_network": 5,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ logger.info("[Step 4] : Start BGPd daemon on R1..")
+
+ # Start BGPd daemon on R1
+ start_router_daemons(tgen, "r1", ["bgpd"])
+
+ for addr_type in ADDR_TYPES:
+ # Verifying BGP RIB routes before shutting down BGPd daemon
+ input_dict = {key: topo["routers"][key] for key in ["r3"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ # Verifying RIB routes before shutting down BGPd daemon
+ result = verify_rib(tgen, addr_type, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+ logger.info(" Expected behavior: {}".format(result))
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ldp-topo1/r3/how_mpls_table.ref b/tests/topotests/ldp-topo1/r3/how_mpls_table.ref
deleted file mode 100644
index 18f7df0ee4..0000000000
--- a/tests/topotests/ldp-topo1/r3/how_mpls_table.ref
+++ /dev/null
@@ -1,10 +0,0 @@
- Inbound Outbound
- Label Type Nexthop Label
--------- ------- --------------- --------
- 16 LDP 10.0.2.2 3
- 16 LDP 10.0.3.2 3
- 17 LDP 10.0.2.2 3
- 17 LDP 10.0.3.2 3
- 18 LDP 10.0.2.2 17
- 18 LDP 10.0.3.2 17
- 19 LDP 10.0.2.4 3
diff --git a/tests/topotests/ldp-topo1/r4/how_mpls_table.ref b/tests/topotests/ldp-topo1/r4/how_mpls_table.ref
deleted file mode 100644
index 40efab8b5b..0000000000
--- a/tests/topotests/ldp-topo1/r4/how_mpls_table.ref
+++ /dev/null
@@ -1,9 +0,0 @@
- Inbound Outbound
- Label Type Nexthop Label
--------- ------- --------------- --------
- 16 LDP 10.0.2.2 17
- 17 LDP 10.0.2.2 3
- 18 LDP 10.0.2.3 3
- 19 LDP 10.0.2.2 3
- 20 LDP 10.0.2.3 3
- 20 LDP 10.0.2.2 3
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
index 87765e4db5..2dd90e9a86 100644
--- a/tests/topotests/lib/bgp.py
+++ b/tests/topotests/lib/bgp.py
@@ -22,8 +22,9 @@ from copy import deepcopy
from time import sleep
import traceback
import ipaddr
+import os
+import sys
from lib import topotest
-
from lib.topolog import logger
# Import common_config to use commomnly used APIs
@@ -36,13 +37,15 @@ from lib.common_config import (
validate_ip_address,
find_interface_with_greater_ip,
run_frr_cmd,
+ FRRCFG_FILE,
retry,
)
-BGP_CONVERGENCE_TIMEOUT = 10
+LOGDIR = "/tmp/topotests/"
+TMPDIR = None
-def create_router_bgp(tgen, topo, input_dict=None, build=False):
+def create_router_bgp(tgen, topo, input_dict=None, build=False, load_config=True):
"""
API to configure bgp on router
@@ -60,6 +63,15 @@ def create_router_bgp(tgen, topo, input_dict=None, build=False):
"bgp": {
"local_as": "200",
"router_id": "22.22.22.22",
+ "graceful-restart": {
+ "graceful-restart": True,
+ "preserve-fw-state": True,
+ "timer": {
+ "restart-time": 30,
+ "rib-stale-time": 30,
+ "select-defer-time": 30,
+ }
+ },
"address_family": {
"ipv4": {
"unicast": {
@@ -84,7 +96,7 @@ def create_router_bgp(tgen, topo, input_dict=None, build=False):
"dest_link": {
"r4": {
"allowas-in": {
- "number_occurences":2
+ "number_occurences": 2
},
"prefix_lists": [
{
@@ -92,12 +104,13 @@ def create_router_bgp(tgen, topo, input_dict=None, build=False):
"direction": "in"
}
],
- "route_maps": [
- {"name": "RMAP_MED_R3",
- "direction": "in"}
- ],
+ "route_maps": [{
+ "name": "RMAP_MED_R3",
+ "direction": "in"
+ }],
"next_hop_self": True
- }
+ },
+ "r1": {"graceful-restart-helper": True}
}
}
}
@@ -113,8 +126,12 @@ def create_router_bgp(tgen, topo, input_dict=None, build=False):
-------
True or False
"""
- logger.debug("Entering lib API: create_router_bgp()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
result = False
+
+ # Flag is used when testing ipv6 over ipv4 or vice-versa
+ afi_test = False
+
if not input_dict:
input_dict = deepcopy(topo)
else:
@@ -151,12 +168,17 @@ def create_router_bgp(tgen, topo, input_dict=None, build=False):
if neigh_unicast:
data_all_bgp = __create_bgp_unicast_neighbor(
- tgen, topo, input_dict, router, config_data=data_all_bgp
+ tgen,
+ topo,
+ input_dict,
+ router,
+ afi_test,
+ config_data=data_all_bgp,
)
try:
result = create_common_configuration(
- tgen, router, data_all_bgp, "bgp", build
+ tgen, router, data_all_bgp, "bgp", build, load_config
)
except InvalidCLIError:
# Traceback
@@ -164,7 +186,7 @@ def create_router_bgp(tgen, topo, input_dict=None, build=False):
logger.error(errormsg)
return errormsg
- logger.debug("Exiting lib API: create_router_bgp()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
@@ -184,7 +206,7 @@ def __create_bgp_global(tgen, input_dict, router, build=False):
True or False
"""
- logger.debug("Entering lib API: __create_bgp_global()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
bgp_data = input_dict[router]["bgp"]
del_bgp_action = bgp_data.setdefault("delete", False)
@@ -208,8 +230,6 @@ def __create_bgp_global(tgen, input_dict, router, build=False):
cmd = "{} vrf {}".format(cmd, vrf_id)
config_data.append(cmd)
-
- # Skip RFC8212 in topotests
config_data.append("no bgp ebgp-requires-policy")
router_id = bgp_data.setdefault("router_id", None)
@@ -220,10 +240,78 @@ def __create_bgp_global(tgen, input_dict, router, build=False):
config_data.append("bgp router-id {}".format(router_id))
config_data.append("no bgp network import-check")
+
+ if "graceful-restart" in bgp_data:
+ graceful_config = bgp_data["graceful-restart"]
+
+ graceful_restart = graceful_config.setdefault("graceful-restart", None)
+
+ graceful_restart_disable = graceful_config.setdefault(
+ "graceful-restart-disable", None
+ )
+
+ preserve_fw_state = graceful_config.setdefault("preserve-fw-state", None)
+
+ disable_eor = graceful_config.setdefault("disable-eor", None)
+
+ if graceful_restart == False:
+ cmd = "no bgp graceful-restart"
+ if graceful_restart:
+ cmd = "bgp graceful-restart"
+
+ if graceful_restart is not None:
+ config_data.append(cmd)
+
+ if graceful_restart_disable == False:
+ cmd = "no bgp graceful-restart-disable"
+ if graceful_restart_disable:
+ cmd = "bgp graceful-restart-disable"
+
+ if graceful_restart_disable is not None:
+ config_data.append(cmd)
+
+ if preserve_fw_state == False:
+ cmd = "no bgp graceful-restart preserve-fw-state"
+ if preserve_fw_state:
+ cmd = "bgp graceful-restart preserve-fw-state"
+
+ if preserve_fw_state is not None:
+ config_data.append(cmd)
+
+ if disable_eor == False:
+ cmd = "no bgp graceful-restart disable-eor"
+ if disable_eor:
+ cmd = "bgp graceful-restart disable-eor"
+
+ if disable_eor is not None:
+ config_data.append(cmd)
+
+ if "timer" in bgp_data["graceful-restart"]:
+ timer = bgp_data["graceful-restart"]["timer"]
+
+ if "delete" in timer:
+ del_action = timer["delete"]
+ else:
+ del_action = False
+
+ for rs_timer, value in timer.items():
+ rs_timer_value = timer.setdefault(rs_timer, None)
+
+ if rs_timer_value and rs_timer != "delete":
+ cmd = "bgp graceful-restart {} {}".format(rs_timer, rs_timer_value)
+
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return config_data
-def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, config_data=None):
+def __create_bgp_unicast_neighbor(
+ tgen, topo, input_dict, router, afi_test, config_data=None
+):
"""
Helper API to create configuration for address-family unicast
@@ -233,10 +321,11 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, config_data=No
* `topo` : json file data
* `input_dict` : Input dict data, required when configuring from testcase
* `router` : router id to be configured.
+ * `afi_test` : use when ipv6 needs to be tested over ipv4 or vice-versa
* `build` : Only for initial setup phase this is set as True.
"""
- logger.debug("Entering lib API: __create_bgp_unicast_neighbor()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
add_neigh = True
if "router bgp" in config_data:
@@ -247,7 +336,7 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, config_data=No
if not addr_dict:
continue
- if not check_address_types(addr_type):
+ if not check_address_types(addr_type) and not afi_test:
continue
addr_data = addr_dict["unicast"]
@@ -345,7 +434,7 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, config_data=No
config_data.extend(neigh_addr_data)
- logger.debug("Exiting lib API: __create_bgp_unicast_neighbor()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return config_data
@@ -362,7 +451,7 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True):
"""
config_data = []
- logger.debug("Entering lib API: __create_bgp_neighbor()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
bgp_data = input_dict[router]["bgp"]["address_family"]
neigh_data = bgp_data[addr_type]["unicast"]["neighbor"]
@@ -392,6 +481,9 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True):
hold_down = peer.setdefault("holddowntimer", 180)
password = peer.setdefault("password", None)
max_hop_limit = peer.setdefault("ebgp_multihop", 1)
+ graceful_restart = peer.setdefault("graceful-restart", None)
+ graceful_restart_helper = peer.setdefault("graceful-restart-helper", None)
+ graceful_restart_disable = peer.setdefault("graceful-restart-disable", None)
if update_source:
config_data.append(
@@ -409,6 +501,22 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True):
config_data.append(
"{} timers {} {}".format(neigh_cxt, keep_alive, hold_down)
)
+
+ if graceful_restart:
+ config_data.append("{} graceful-restart".format(neigh_cxt))
+ elif graceful_restart == False:
+ config_data.append("no {} graceful-restart".format(neigh_cxt))
+
+ if graceful_restart_helper:
+ config_data.append("{} graceful-restart-helper".format(neigh_cxt))
+ elif graceful_restart_helper == False:
+ config_data.append("no {} graceful-restart-helper".format(neigh_cxt))
+
+ if graceful_restart_disable:
+ config_data.append("{} graceful-restart-disable".format(neigh_cxt))
+ elif graceful_restart_disable == False:
+ config_data.append("no {} graceful-restart-disable".format(neigh_cxt))
+
if password:
config_data.append("{} password {}".format(neigh_cxt, password))
@@ -418,7 +526,7 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True):
)
config_data.append("{} enforce-multihop".format(neigh_cxt))
- logger.debug("Exiting lib API: __create_bgp_unicast_neighbor()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return config_data
@@ -439,7 +547,7 @@ def __create_bgp_unicast_address_family(
"""
config_data = []
- logger.debug("Entering lib API: __create_bgp_unicast_neighbor()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
bgp_data = input_dict[router]["bgp"]["address_family"]
neigh_data = bgp_data[addr_type]["unicast"]["neighbor"]
@@ -447,7 +555,10 @@ def __create_bgp_unicast_address_family(
for peer_name, peer_dict in deepcopy(neigh_data).iteritems():
for dest_link, peer in peer_dict["dest_link"].iteritems():
deactivate = None
+ activate = None
nh_details = topo[peer_name]
+ activate_addr_family = peer.setdefault("activate", None)
+ deactivate_addr_family = peer.setdefault("deactivate", None)
# Loopback interface
if "source_link" in peer and peer["source_link"] == "lo":
for destRouterLink, data in sorted(nh_details["links"].iteritems()):
@@ -469,9 +580,23 @@ def __create_bgp_unicast_address_family(
neigh_cxt = "neighbor {}".format(ip_addr)
config_data.append("address-family {} unicast".format(addr_type))
- if deactivate:
+
+ if activate_addr_family is not None:
+ config_data.append(
+ "address-family {} unicast".format(activate_addr_family)
+ )
+
+ config_data.append("{} activate".format(neigh_cxt))
+
+ if deactivate and activate_addr_family is None:
config_data.append("no neighbor {} activate".format(deactivate))
+ if deactivate_addr_family is not None:
+ config_data.append(
+ "address-family {} unicast".format(deactivate_addr_family)
+ )
+ config_data.append("no {} activate".format(neigh_cxt))
+
next_hop_self = peer.setdefault("next_hop_self", None)
send_community = peer.setdefault("send_community", None)
prefix_lists = peer.setdefault("prefix_lists", {})
@@ -547,6 +672,100 @@ def __create_bgp_unicast_address_family(
return config_data
+def modify_bgp_config_when_bgpd_down(tgen, topo, input_dict):
+ """
+ API will save the current config to router's /etc/frr/ for BGPd
+ daemon(bgpd.conf file)
+
+ Paramters
+ ---------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : defines for which router, and which config
+ needs to be modified
+
+ Usage:
+ ------
+ # Modify graceful-restart config not to set f-bit
+ # and write to /etc/frr
+
+ # Api call to delete advertised networks
+ input_dict_2 = {
+ "r5": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "101.0.20.1/32",
+ "no_of_network": 5,
+ "delete": True
+ }
+ ],
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "5::1/128",
+ "no_of_network": 5,
+ "delete": True
+ }
+ ],
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = modify_bgp_config_when_bgpd_down(tgen, topo, input_dict)
+
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ try:
+
+ global LOGDIR
+
+ result = create_router_bgp(
+ tgen, topo, input_dict, build=False, load_config=False
+ )
+ if result is not True:
+ return result
+
+ # Copy bgp config file to /etc/frr
+ for dut in input_dict.keys():
+ router_list = tgen.routers()
+ for router, rnode in router_list.iteritems():
+ if router != dut:
+ continue
+
+ TMPDIR = os.path.join(LOGDIR, tgen.modname)
+
+ logger.info("Delete BGP config when BGPd is down in {}".format(router))
+ # Reading the config from /tmp/topotests and
+ # copy to /etc/frr/bgpd.conf
+ cmd = "cat {}/{}/{} >> /etc/frr/bgpd.conf".format(
+ TMPDIR, router, FRRCFG_FILE
+ )
+ router_list[router].run(cmd)
+
+ except Exception as e:
+ # handle any exception
+ logger.error("Error %s occured. Arguments %s.", e.message, e.args)
+
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
#############################################
# Verification APIs
#############################################
@@ -585,7 +804,7 @@ def verify_router_id(tgen, topo, input_dict):
errormsg(str) or True
"""
- logger.debug("Entering lib API: verify_router_id()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for router in input_dict.keys():
if router not in tgen.routers():
continue
@@ -616,7 +835,7 @@ def verify_router_id(tgen, topo, input_dict):
)
return errormsg
- logger.debug("Exiting lib API: verify_router_id()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@@ -641,7 +860,7 @@ def verify_bgp_convergence(tgen, topo):
errormsg(str) or True
"""
- logger.debug("Entering lib API: verify_bgp_convergence()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for router, rnode in tgen.routers().iteritems():
if "bgp" not in topo["routers"][router]:
continue
@@ -723,7 +942,7 @@ def modify_as_number(tgen, topo, input_dict):
errormsg(str) or True
"""
- logger.debug("Entering lib API: modify_as_number()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
try:
new_topo = deepcopy(topo["routers"])
@@ -750,8 +969,7 @@ def modify_as_number(tgen, topo, input_dict):
logger.error(errormsg)
return errormsg
- logger.debug("Exiting lib API: modify_as_number()")
-
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@@ -785,7 +1003,7 @@ def verify_as_numbers(tgen, topo, input_dict):
errormsg(str) or True
"""
- logger.debug("Entering lib API: verify_as_numbers()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for router in input_dict.keys():
if router not in tgen.routers():
continue
@@ -850,10 +1068,47 @@ def verify_as_numbers(tgen, topo, input_dict):
remote_as,
)
- logger.debug("Exiting lib API: verify_AS_numbers()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
+def clear_bgp(tgen, addr_type, router, vrf=None):
+ """
+ This API is to clear bgp neighborship by running
+ clear ip bgp */clear bgp ipv6 * command,
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `addr_type`: ip type ipv4/ipv6
+ * `router`: device under test
+ * `vrf`: vrf name
+
+ Usage
+ -----
+ clear_bgp(tgen, addr_type, "r1")
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ if router not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[router]
+
+ if vrf:
+ if type(vrf) is not list:
+ vrf = [vrf]
+
+ # Clearing BGP
+ logger.info("Clearing BGP neighborship for router %s..", router)
+ if addr_type == "ipv4":
+ run_frr_cmd(rnode, "clear ip bgp *")
+ elif addr_type == "ipv6":
+ run_frr_cmd(rnode, "clear bgp ipv6 *")
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+
def clear_bgp_and_verify(tgen, topo, router):
"""
This API is to clear bgp neighborship and verify bgp neighborship
@@ -877,7 +1132,7 @@ def clear_bgp_and_verify(tgen, topo, router):
errormsg(str) or True
"""
- logger.debug("Entering lib API: clear_bgp_and_verify()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
if router not in tgen.routers():
return False
@@ -1048,7 +1303,7 @@ def clear_bgp_and_verify(tgen, topo, router):
)
return errormsg
- logger.debug("Exiting lib API: clear_bgp_and_verify()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@@ -1084,7 +1339,7 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
errormsg(str) or True
"""
- logger.debug("Entering lib API: verify_bgp_timers_and_functionality()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
sleep(5)
router_list = tgen.routers()
for router in input_dict.keys():
@@ -1291,7 +1546,7 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
bgp_neighbor,
)
- logger.debug("Exiting lib API: verify_bgp_timers_and_functionality()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@@ -1355,7 +1610,7 @@ def verify_bgp_attributes(
errormsg(str) or True
"""
- logger.debug("Entering lib API: verify_bgp_attributes()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for router, rnode in tgen.routers().iteritems():
if router != dut:
continue
@@ -1430,7 +1685,7 @@ def verify_bgp_attributes(
)
return errormsg
- logger.debug("Exiting lib API: verify_bgp_attributes()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@@ -1485,7 +1740,7 @@ def verify_best_path_as_per_bgp_attribute(
errormsg(str) or True
"""
- logger.debug("Entering lib API: verify_best_path_as_per_bgp_attribute()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
if router not in tgen.routers():
return False
@@ -1595,7 +1850,7 @@ def verify_best_path_as_per_bgp_attribute(
router,
)
- logger.debug("Exiting lib API: verify_best_path_as_per_bgp_attribute()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@@ -1634,7 +1889,7 @@ def verify_best_path_as_per_admin_distance(
errormsg(str) or True
"""
- logger.debug("Entering lib API: verify_best_path_as_per_admin_distance()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
router_list = tgen.routers()
if router not in router_list:
return False
@@ -1706,11 +1961,11 @@ def verify_best_path_as_per_admin_distance(
router,
)
- logger.info("Exiting lib API: verify_best_path_as_per_admin_distance()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
-@retry(attempts=5, wait=2, return_is_str=True, initial_wait=2)
+@retry(attempts=10, wait=2, return_is_str=True, initial_wait=2)
def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, aspath=None):
"""
This API is to verify whether bgp rib has any
@@ -1740,7 +1995,7 @@ def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, aspath=None)
errormsg(str) or True
"""
- logger.debug("Entering lib API: verify_bgp_rib()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
router_list = tgen.routers()
additional_nexthops_in_required_nhs = []
@@ -1955,254 +2210,891 @@ def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, aspath=None)
"routes are: {}\n".format(dut, found_routes)
)
- logger.debug("Exiting lib API: verify_bgp_rib()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
-@retry(attempts=5, wait=2, return_is_str=True, initial_wait=2)
-def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, aspath=None):
+@retry(attempts=4, wait=2, return_is_str=True, initial_wait=2)
+def verify_graceful_restart(tgen, topo, addr_type, input_dict, dut, peer):
"""
- This API is to verify whether bgp rib has any
- matching route for a nexthop.
+ This API is to verify verify_graceful_restart configuration of DUT and
+ cross verify the same from the peer bgp routerrouter.
Parameters
----------
* `tgen`: topogen object
- * `dut`: input dut router name
+ * `topo`: input json file data
* `addr_type` : ip type ipv4/ipv6
- * `input_dict` : input dict, has details of static routes
- * `next_hop`[optional]: next_hop which needs to be verified,
- default = static
- * 'aspath'[optional]: aspath which needs to be verified
+ * `input_dict`: input dictionary, have details of Device Under Test, for
+ which user wants to test the data
+ * `dut`: input dut router name
+ * `peer`: input peer router name
Usage
-----
- dut = 'r1'
- next_hop = "192.168.1.10"
- input_dict = topo['routers']
- aspath = "100 200 300"
- result = verify_bgp_rib(tgen, addr_type, dut, tgen, input_dict,
- next_hop, aspath)
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link":{
+ "r1": {
+ "graceful-restart": True
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link":{
+ "r1": {
+ "graceful-restart": True
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = verify_graceful_restart(tgen, topo, addr_type, input_dict,
+ dut = "r1", peer = 'r2')
Returns
-------
errormsg(str) or True
"""
- logger.debug("Entering lib API: verify_bgp_rib()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
- router_list = tgen.routers()
- additional_nexthops_in_required_nhs = []
- list1 = []
- list2 = []
- for routerInput in input_dict.keys():
- for router, rnode in router_list.iteritems():
- if router != dut:
+ for router, rnode in tgen.routers().iteritems():
+ if router != dut:
+ continue
+
+ bgp_addr_type = topo["routers"][dut]["bgp"]["address_family"]
+
+ if addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
continue
- # Verifying RIB routes
- command = "show bgp"
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
- # Static routes
- sleep(2)
- logger.info("Checking router {} BGP RIB:".format(dut))
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ if bgp_neighbor != peer:
+ continue
- if "static_routes" in input_dict[routerInput]:
- static_routes = input_dict[routerInput]["static_routes"]
+ for dest_link, peer_dict in peer_data["dest_link"].items():
+ data = topo["routers"][bgp_neighbor]["links"]
- for static_route in static_routes:
- found_routes = []
- missing_routes = []
- st_found = False
- nh_found = False
- vrf = static_route.setdefault("vrf", None)
- if vrf:
- cmd = "{} vrf {} {}".format(command, vrf, addr_type)
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
- else:
- cmd = "{} {}".format(command, addr_type)
+ logger.info(
+ "[DUT: {}]: Checking bgp graceful-restart show"
+ " o/p {}".format(dut, neighbor_ip)
+ )
- cmd = "{} json".format(cmd)
+ show_bgp_graceful_json = None
- rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True)
+ show_bgp_graceful_json = run_frr_cmd(
+ rnode,
+ "show bgp {} neighbor {} graceful-restart json".format(
+ addr_type, neighbor_ip
+ ),
+ isjson=True,
+ )
- # Verifying output dictionary rib_routes_json is not empty
- if bool(rib_routes_json) == False:
- errormsg = "No route found in rib of router {}..".format(router)
- return errormsg
+ show_bgp_graceful_json_out = show_bgp_graceful_json[neighbor_ip]
- network = static_route["network"]
+ if show_bgp_graceful_json_out["neighborAddr"] == neighbor_ip:
+ logger.info(
+ "[DUT: {}]: Neighbor ip matched {}".format(dut, neighbor_ip)
+ )
+ else:
+ errormsg = "[DUT: {}]: Neighbor ip NOT a matched {}".format(
+ dut, neighbor_ip
+ )
+ return errormsg
- if "no_of_ip" in static_route:
- no_of_ip = static_route["no_of_ip"]
+ lmode = None
+ rmode = None
+ # Local GR mode
+ if "address_family" in input_dict[dut]["bgp"]:
+ bgp_neighbors = input_dict[dut]["bgp"]["address_family"][addr_type][
+ "unicast"
+ ]["neighbor"][peer]["dest_link"]
+
+ for dest_link, data in bgp_neighbors.items():
+ if (
+ "graceful-restart-helper" in data
+ and data["graceful-restart-helper"]
+ ):
+ lmode = "Helper"
+ elif "graceful-restart" in data and data["graceful-restart"]:
+ lmode = "Restart"
+ elif (
+ "graceful-restart-disable" in data
+ and data["graceful-restart-disable"]
+ ):
+ lmode = "Disable"
else:
- no_of_ip = 1
+ lmode = None
- # Generating IPs for verification
- ip_list = generate_ips(network, no_of_ip)
+ if lmode is None:
+ if "graceful-restart" in input_dict[dut]["bgp"]:
- for st_rt in ip_list:
- st_rt = str(ipaddr.IPNetwork(unicode(st_rt)))
+ if (
+ "graceful-restart" in input_dict[dut]["bgp"]["graceful-restart"]
+ and input_dict[dut]["bgp"]["graceful-restart"][
+ "graceful-restart"
+ ]
+ ):
+ lmode = "Restart*"
+ elif (
+ "graceful-restart-disable"
+ in input_dict[dut]["bgp"]["graceful-restart"]
+ and input_dict[dut]["bgp"]["graceful-restart"][
+ "graceful-restart-disable"
+ ]
+ ):
+ lmode = "Disable*"
+ else:
+ lmode = "Helper*"
+ else:
+ lmode = "Helper*"
+
+ if lmode == "Disable" or lmode == "Disable*":
+ return True
+
+ # Remote GR mode
+ if "address_family" in input_dict[peer]["bgp"]:
+ bgp_neighbors = input_dict[peer]["bgp"]["address_family"][addr_type][
+ "unicast"
+ ]["neighbor"][dut]["dest_link"]
+
+ for dest_link, data in bgp_neighbors.items():
+ if (
+ "graceful-restart-helper" in data
+ and data["graceful-restart-helper"]
+ ):
+ rmode = "Helper"
+ elif "graceful-restart" in data and data["graceful-restart"]:
+ rmode = "Restart"
+ elif (
+ "graceful-restart-disable" in data
+ and data["graceful-restart-disable"]
+ ):
+ rmode = "Disable"
+ else:
+ rmode = None
- _addr_type = validate_ip_address(st_rt)
- if _addr_type != addr_type:
- continue
+ if rmode is None:
+ if "graceful-restart" in input_dict[peer]["bgp"]:
- if st_rt in rib_routes_json["routes"]:
- st_found = True
- found_routes.append(st_rt)
+ if (
+ "graceful-restart"
+ in input_dict[peer]["bgp"]["graceful-restart"]
+ and input_dict[peer]["bgp"]["graceful-restart"][
+ "graceful-restart"
+ ]
+ ):
+ rmode = "Restart"
+ elif (
+ "graceful-restart-disable"
+ in input_dict[peer]["bgp"]["graceful-restart"]
+ and input_dict[peer]["bgp"]["graceful-restart"][
+ "graceful-restart-disable"
+ ]
+ ):
+ rmode = "Disable"
+ else:
+ rmode = "Helper"
+ else:
+ rmode = "Helper"
- if next_hop:
- if not isinstance(next_hop, list):
- next_hop = [next_hop]
- list1 = next_hop
- found_hops = [
- rib_r["ip"]
- for rib_r in rib_routes_json["routes"][st_rt][0][
- "nexthops"
- ]
- ]
- list2 = found_hops
- missing_list_of_nexthops = set(list2).difference(list1)
- additional_nexthops_in_required_nhs = set(
- list1
- ).difference(list2)
+ if show_bgp_graceful_json_out["localGrMode"] == lmode:
+ logger.info(
+ "[DUT: {}]: localGrMode : {} ".format(
+ dut, show_bgp_graceful_json_out["localGrMode"]
+ )
+ )
+ else:
+ errormsg = (
+ "[DUT: {}]: localGrMode is not correct"
+ " Expected: {}, Found: {}".format(
+ dut, lmode, show_bgp_graceful_json_out["localGrMode"]
+ )
+ )
+ return errormsg
- if list2:
- if additional_nexthops_in_required_nhs:
- logger.info(
- "Missing nexthop %s for route"
- " %s in RIB of router %s\n",
- additional_nexthops_in_required_nhs,
- st_rt,
- dut,
- )
- errormsg = (
- "Nexthop {} is Missing for "
- "route {} in RIB of router {}\n".format(
- additional_nexthops_in_required_nhs,
- st_rt,
- dut,
- )
- )
- return errormsg
- else:
- nh_found = True
- if aspath:
- found_paths = rib_routes_json["routes"][st_rt][0][
- "path"
- ]
- if aspath == found_paths:
- aspath_found = True
- logger.info(
- "Found AS path {} for route"
- " {} in RIB of router "
- "{}\n".format(aspath, st_rt, dut)
- )
- else:
- errormsg = (
- "AS Path {} is missing for route"
- "for route {} in RIB of router {}\n".format(
- aspath, st_rt, dut
- )
- )
- return errormsg
+ if show_bgp_graceful_json_out["remoteGrMode"] == rmode:
+ logger.info(
+ "[DUT: {}]: remoteGrMode : {} ".format(
+ dut, show_bgp_graceful_json_out["remoteGrMode"]
+ )
+ )
+ elif (
+ show_bgp_graceful_json_out["remoteGrMode"] == "NotApplicable"
+ and rmode == "Disable"
+ ):
+ logger.info(
+ "[DUT: {}]: remoteGrMode : {} ".format(
+ dut, show_bgp_graceful_json_out["remoteGrMode"]
+ )
+ )
+ else:
+ errormsg = (
+ "[DUT: {}]: remoteGrMode is not correct"
+ " Expected: {}, Found: {}".format(
+ dut, rmode, show_bgp_graceful_json_out["remoteGrMode"]
+ )
+ )
+ return errormsg
- else:
- missing_routes.append(st_rt)
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
- if nh_found:
- logger.info(
- "Found next_hop {} for all bgp"
- " routes in RIB of"
- " router {}\n".format(next_hop, router)
- )
- if len(missing_routes) > 0:
- errormsg = (
- "Missing route in RIB of router {}, "
- "routes: {}\n".format(dut, missing_routes)
- )
- return errormsg
+@retry(attempts=4, wait=2, return_is_str=True, initial_wait=2)
+def verify_r_bit(tgen, topo, addr_type, input_dict, dut, peer):
+ """
+ This API is to verify r_bit in the BGP gr capability advertised
+ by the neighbor router
- if found_routes:
- logger.info(
- "Verified routes in router {} BGP RIB, "
- "found routes are: {} \n".format(dut, found_routes)
- )
- continue
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type` : ip type ipv4/ipv6
+ * `input_dict`: input dictionary, have details of Device Under Test, for
+ which user wants to test the data
+ * `dut`: input dut router name
+ * `peer`: peer name
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link":{
+ "r1": {
+ "graceful-restart": True
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link":{
+ "r1": {
+ "graceful-restart": True
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = verify_r_bit(tgen, topo, addr_type, input_dict, dut, peer)
- if "bgp" not in input_dict[routerInput]:
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router, rnode in tgen.routers().iteritems():
+ if router != dut:
+ continue
+
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+
+ if addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
continue
- # Advertise networks
- bgp_data_list = input_dict[routerInput]["bgp"]
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
- if type(bgp_data_list) is not list:
- bgp_data_list = [bgp_data_list]
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ if bgp_neighbor != peer:
+ continue
- for bgp_data in bgp_data_list:
- vrf_id = bgp_data.setdefault("vrf", None)
- if vrf_id:
- cmd = "{} vrf {} {}".format(command, vrf_id, addr_type)
+ for dest_link, peer_dict in peer_data["dest_link"].items():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
+
+ logger.info(
+ "[DUT: {}]: Checking bgp graceful-restart show"
+ " o/p {}".format(dut, neighbor_ip)
+ )
+
+ show_bgp_graceful_json = run_frr_cmd(
+ rnode,
+ "show bgp {} neighbor {} graceful-restart json".format(
+ addr_type, neighbor_ip
+ ),
+ isjson=True,
+ )
+
+ show_bgp_graceful_json_out = show_bgp_graceful_json[neighbor_ip]
+
+ if show_bgp_graceful_json_out["neighborAddr"] == neighbor_ip:
+ logger.info(
+ "[DUT: {}]: Neighbor ip matched {}".format(dut, neighbor_ip)
+ )
+ else:
+ errormsg = "[DUT: {}]: Neighbor ip NOT a matched {}".format(
+ dut, neighbor_ip
+ )
+ return errormsg
+
+ if "rBit" in show_bgp_graceful_json_out:
+ if show_bgp_graceful_json_out["rBit"]:
+ logger.info("[DUT: {}]: Rbit true {}".format(dut, neighbor_ip))
else:
- cmd = "{} {}".format(command, addr_type)
+ errormsg = "[DUT: {}]: Rbit false {}".format(dut, neighbor_ip)
+ return errormsg
- cmd = "{} json".format(cmd)
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
- rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True)
- # Verifying output dictionary rib_routes_json is not empty
- if bool(rib_routes_json) == False:
- errormsg = "No route found in rib of router {}..".format(router)
+@retry(attempts=4, wait=2, return_is_str=True, initial_wait=2)
+def verify_eor(tgen, topo, addr_type, input_dict, dut, peer):
+ """
+ This API is to verify EOR
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type` : ip type ipv4/ipv6
+ * `input_dict`: input dictionary, have details of DUT, for
+ which user wants to test the data
+ * `dut`: input dut router name
+ * `peer`: peer name
+ Usage
+ -----
+ input_dict = {
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link":{
+ "r1": {
+ "graceful-restart": True
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link":{
+ "r1": {
+ "graceful-restart": True
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = verify_eor(tgen, topo, addr_type, input_dict, dut, peer)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router, rnode in tgen.routers().iteritems():
+ if router != dut:
+ continue
+
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+
+ if addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ if bgp_neighbor != peer:
+ continue
+
+ for dest_link, peer_dict in peer_data["dest_link"].items():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
+
+ logger.info(
+ "[DUT: %s]: Checking bgp graceful-restart" " show o/p %s",
+ dut,
+ neighbor_ip,
+ )
+
+ show_bgp_graceful_json = run_frr_cmd(
+ rnode,
+ "show bgp {} neighbor {} graceful-restart json".format(
+ addr_type, neighbor_ip
+ ),
+ isjson=True,
+ )
+
+ show_bgp_graceful_json_out = show_bgp_graceful_json[neighbor_ip]
+
+ if show_bgp_graceful_json_out["neighborAddr"] == neighbor_ip:
+ logger.info("[DUT: %s]: Neighbor ip matched %s", dut, neighbor_ip)
+ else:
+ errormsg = "[DUT: %s]: Neighbor ip is NOT matched %s" % (
+ dut,
+ neighbor_ip,
+ )
+ return errormsg
+
+ if addr_type == "ipv4":
+ afi = "ipv4Unicast"
+ elif addr_type == "ipv6":
+ afi = "ipv6Unicast"
+ else:
+ errormsg = "Address type %s is not supported" % (addr_type)
+ return errormsg
+
+ eor_json = show_bgp_graceful_json_out[afi]["endOfRibStatus"]
+ if "endOfRibSend" in eor_json:
+
+ if eor_json["endOfRibSend"]:
+ logger.info(
+ "[DUT: %s]: EOR Send true for %s " "%s", dut, neighbor_ip, afi
+ )
+ else:
+ errormsg = "[DUT: %s]: EOR Send false for %s" " %s" % (
+ dut,
+ neighbor_ip,
+ afi,
+ )
return errormsg
- bgp_net_advertise = bgp_data["address_family"][addr_type]["unicast"]
- advertise_network = bgp_net_advertise.setdefault(
- "advertise_networks", []
+ if "endOfRibRecv" in eor_json:
+ if eor_json["endOfRibRecv"]:
+ logger.info(
+ "[DUT: %s]: EOR Recv true %s " "%s", dut, neighbor_ip, afi
+ )
+ else:
+ errormsg = "[DUT: %s]: EOR Recv false %s " "%s" % (
+ dut,
+ neighbor_ip,
+ afi,
+ )
+ return errormsg
+
+ if "endOfRibSentAfterUpdate" in eor_json:
+ if eor_json["endOfRibSentAfterUpdate"]:
+ logger.info(
+ "[DUT: %s]: EOR SendTime true for %s" " %s",
+ dut,
+ neighbor_ip,
+ afi,
+ )
+ else:
+ errormsg = "[DUT: %s]: EOR SendTime false for " "%s %s" % (
+ dut,
+ neighbor_ip,
+ afi,
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(attempts=4, wait=2, return_is_str=True, initial_wait=2)
+def verify_f_bit(tgen, topo, addr_type, input_dict, dut, peer):
+ """
+ This API is to verify f_bit in the BGP gr capability advertised
+ by the neighbor router
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type` : ip type ipv4/ipv6
+ * `input_dict`: input dictionary, have details of Device Under Test, for
+ which user wants to test the data
+ * `dut`: input dut router name
+ * `peer`: peer name
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link":{
+ "r1": {
+ "graceful-restart": True
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link":{
+ "r1": {
+ "graceful-restart": True
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = verify_f_bit(tgen, topo, 'ipv4', input_dict, dut, peer)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router, rnode in tgen.routers().iteritems():
+ if router != dut:
+ continue
+
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+
+ if addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ if bgp_neighbor != peer:
+ continue
+
+ for dest_link, peer_dict in peer_data["dest_link"].items():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
+
+ logger.info(
+ "[DUT: {}]: Checking bgp graceful-restart show"
+ " o/p {}".format(dut, neighbor_ip)
+ )
+
+ show_bgp_graceful_json = run_frr_cmd(
+ rnode,
+ "show bgp {} neighbor {} graceful-restart json".format(
+ addr_type, neighbor_ip
+ ),
+ isjson=True,
+ )
+
+ show_bgp_graceful_json_out = show_bgp_graceful_json[neighbor_ip]
+
+ if show_bgp_graceful_json_out["neighborAddr"] == neighbor_ip:
+ logger.info(
+ "[DUT: {}]: Neighbor ip matched {}".format(dut, neighbor_ip)
)
+ else:
+ errormsg = "[DUT: {}]: Neighbor ip NOT a match {}".format(
+ dut, neighbor_ip
+ )
+ return errormsg
- for advertise_network_dict in advertise_network:
- found_routes = []
- missing_routes = []
- found = False
+ if "ipv4Unicast" in show_bgp_graceful_json_out:
+ if show_bgp_graceful_json_out["ipv4Unicast"]["fBit"]:
+ logger.info(
+ "[DUT: {}]: Fbit True for {} IPv4"
+ " Unicast".format(dut, neighbor_ip)
+ )
+ else:
+ errormsg = "[DUT: {}]: Fbit False for {} IPv4" " Unicast".format(
+ dut, neighbor_ip
+ )
+ return errormsg
- network = advertise_network_dict["network"]
+ elif "ipv6Unicast" in show_bgp_graceful_json_out:
+ if show_bgp_graceful_json_out["ipv6Unicast"]["fBit"]:
+ logger.info(
+ "[DUT: {}]: Fbit True for {} IPv6"
+ " Unicast".format(dut, neighbor_ip)
+ )
+ else:
+ errormsg = "[DUT: {}]: Fbit False for {} IPv6" " Unicast".format(
+ dut, neighbor_ip
+ )
+ return errormsg
+ else:
+ show_bgp_graceful_json_out["ipv4Unicast"]
+ show_bgp_graceful_json_out["ipv6Unicast"]
- if "no_of_network" in advertise_network_dict:
- no_of_network = advertise_network_dict["no_of_network"]
- else:
- no_of_network = 1
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
- # Generating IPs for verification
- ip_list = generate_ips(network, no_of_network)
- for st_rt in ip_list:
- st_rt = str(ipaddr.IPNetwork(unicode(st_rt)))
+@retry(attempts=4, wait=2, return_is_str=True, initial_wait=2)
+def verify_graceful_restart_timers(tgen, topo, addr_type, input_dict, dut, peer):
+ """
+ This API is to verify graceful restart timers, configured and recieved
- _addr_type = validate_ip_address(st_rt)
- if _addr_type != addr_type:
- continue
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type` : ip type ipv4/ipv6
+ * `input_dict`: input dictionary, have details of Device Under Test,
+ for which user wants to test the data
+ * `dut`: input dut router name
+ * `peer`: peer name
+ Usage
+ -----
+ # Configure graceful-restart
+ input_dict_1 = {
+ "r1": {
+ "bgp": {
+ "bgp_neighbors": {
+ "r3": {
+ "graceful-restart": "graceful-restart-helper"
+ }
+ },
+ "gracefulrestart": ["restart-time 150"]
+ }
+ },
+ "r3": {
+ "bgp": {
+ "bgp_neighbors": {
+ "r1": {
+ "graceful-restart": "graceful-restart"
+ }
+ }
+ }
+ }
+ }
- if st_rt in rib_routes_json["routes"]:
- found = True
- found_routes.append(st_rt)
- else:
- found = False
- missing_routes.append(st_rt)
+ result = verify_graceful_restart_timers(tgen, topo, 'ipv4', input_dict)
- if len(missing_routes) > 0:
- errormsg = (
- "Missing route in BGP RIB of router {},"
- " are: {}\n".format(dut, missing_routes)
- )
- return errormsg
+ Returns
+ -------
+ errormsg(str) or True
+ """
- if found_routes:
- logger.info(
- "Verified routes in router {} BGP RIB, found "
- "routes are: {}\n".format(dut, found_routes)
- )
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router, rnode in tgen.routers().iteritems():
+ if router != dut:
+ continue
+
+ bgp_addr_type = topo["routers"][dut]["bgp"]["address_family"]
+
+ if addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ if bgp_neighbor != peer:
+ continue
+
+ for dest_link, peer_dict in peer_data["dest_link"].items():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
- logger.debug("Exiting lib API: verify_bgp_rib()")
+ logger.info(
+ "[DUT: {}]: Checking bgp graceful-restart show"
+ " o/p {}".format(dut, neighbor_ip)
+ )
+
+ show_bgp_graceful_json = run_frr_cmd(
+ rnode,
+ "show bgp {} neighbor {} graceful-restart json".format(
+ addr_type, neighbor_ip
+ ),
+ isjson=True,
+ )
+
+ show_bgp_graceful_json_out = show_bgp_graceful_json[neighbor_ip]
+ if show_bgp_graceful_json_out["neighborAddr"] == neighbor_ip:
+ logger.info(
+ "[DUT: {}]: Neighbor ip matched {}".format(dut, neighbor_ip)
+ )
+ else:
+ errormsg = "[DUT: {}]: Neighbor ip is NOT matched {}".format(
+ dut, neighbor_ip
+ )
+ return errormsg
+
+ # Graceful-restart timer
+ if "graceful-restart" in input_dict[peer]["bgp"]:
+ if "timer" in input_dict[peer]["bgp"]["graceful-restart"]:
+ for rs_timer, value in input_dict[peer]["bgp"]["graceful-restart"][
+ "timer"
+ ].items():
+ if rs_timer == "restart-time":
+
+ receivedTimer = value
+ if (
+ show_bgp_graceful_json_out["timers"][
+ "receivedRestartTimer"
+ ]
+ == receivedTimer
+ ):
+ logger.info(
+ "receivedRestartTimer is {}"
+ " on {} from peer {}".format(
+ receivedTimer, router, peer
+ )
+ )
+ else:
+ errormsg = (
+ "receivedRestartTimer is not"
+ " as expected {}".format(receivedTimer)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
+
+
+@retry(attempts=4, wait=2, return_is_str=True, initial_wait=2)
+def verify_gr_address_family(tgen, topo, addr_type, addr_family, dut):
+ """
+ This API is to verify gr_address_family in the BGP gr capability advertised
+ by the neighbor router
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type` : ip type ipv4/ipv6
+ * `addr_type` : ip type IPV4 Unicast/IPV6 Unicast
+ * `dut`: input dut router name
+
+ Usage
+ -----
+
+ result = verify_gr_address_family(tgen, topo, "ipv4", "ipv4Unicast", "r1")
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router, rnode in tgen.routers().iteritems():
+ if router != dut:
+ continue
+
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+
+ if addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ for dest_link, peer_dict in peer_data["dest_link"].items():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
+
+ logger.info(
+ "[DUT: {}]: Checking bgp graceful-restart"
+ " show o/p {}".format(dut, neighbor_ip)
+ )
+
+ show_bgp_graceful_json = run_frr_cmd(
+ rnode,
+ "show bgp {} neighbor {} graceful-restart json".format(
+ addr_type, neighbor_ip
+ ),
+ isjson=True,
+ )
+
+ show_bgp_graceful_json_out = show_bgp_graceful_json[neighbor_ip]
+
+ if show_bgp_graceful_json_out["neighborAddr"] == neighbor_ip:
+ logger.info("Neighbor ip matched {}".format(neighbor_ip))
+ else:
+ errormsg = "Neighbor ip NOT a match {}".format(neighbor_ip)
+ return errormsg
+
+ if addr_family == "ipv4Unicast":
+ if "ipv4Unicast" in show_bgp_graceful_json_out:
+ logger.info("ipv4Unicast present for {} ".format(neighbor_ip))
+ return True
+ else:
+ errormsg = "ipv4Unicast NOT present for {} ".format(neighbor_ip)
+ return errormsg
+
+ elif addr_family == "ipv6Unicast":
+ if "ipv6Unicast" in show_bgp_graceful_json_out:
+ logger.info("ipv6Unicast present for {} ".format(neighbor_ip))
+ return True
+ else:
+ errormsg = "ipv6Unicast NOT present for {} ".format(neighbor_ip)
+ return errormsg
+ else:
+ errormsg = "Aaddress family: {} present for {} ".format(
+ addr_family, neighbor_ip
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index 5ee59070cc..0b19877aff 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -32,6 +32,7 @@ from tempfile import mkdtemp
import StringIO
import os
+import sys
import ConfigParser
import traceback
import socket
@@ -100,13 +101,11 @@ SEQ_ID = {"prefix_lists": {}, "route_maps": {}}
def get_seq_id(obj_type, router, obj_name):
"""
Generates and saves sequence number in interval of 10
-
Parameters
----------
* `obj_type`: prefix_lists or route_maps
* `router`: router name
*` obj_name`: name of the prefix-list or route-map
-
Returns
--------
Sequence number generated
@@ -125,7 +124,6 @@ def get_seq_id(obj_type, router, obj_name):
def set_seq_id(obj_type, router, id, obj_name):
"""
Saves sequence number if not auto-generated and given by user
-
Parameters
----------
* `obj_type`: prefix_lists or route_maps
@@ -149,11 +147,9 @@ class InvalidCLIError(Exception):
def run_frr_cmd(rnode, cmd, isjson=False):
"""
Execute frr show commands in priviledged mode
-
* `rnode`: router node on which commands needs to executed
* `cmd`: Command to be executed on frr
* `isjson`: If command is to get json data or not
-
:return str:
"""
@@ -179,12 +175,13 @@ def run_frr_cmd(rnode, cmd, isjson=False):
raise InvalidCLIError("No actual cmd passed")
-def create_common_configuration(tgen, router, data, config_type=None, build=False):
+def create_common_configuration(
+ tgen, router, data, config_type=None, build=False, load_config=True
+):
"""
API to create object of class FRRConfig and also create frr_json.conf
file. It will create interface and common configurations and save it to
frr_json.conf and load to router
-
Parameters
----------
* `tgen`: tgen onject
@@ -193,7 +190,6 @@ def create_common_configuration(tgen, router, data, config_type=None, build=Fals
* `config_type` : Syntactic information while writing configuration. Should
be one of the value as mentioned in the config_map below.
* `build` : Only for initial setup phase this is set as True
-
Returns
-------
True or False
@@ -216,6 +212,8 @@ def create_common_configuration(tgen, router, data, config_type=None, build=Fals
if build:
mode = "a"
+ elif not load_config:
+ mode = "a"
else:
mode = "w"
@@ -236,17 +234,127 @@ def create_common_configuration(tgen, router, data, config_type=None, build=Fals
frr_cfg_fd.close()
# If configuration applied from build, it will done at last
- if not build:
+ if not build and load_config:
load_config_to_router(tgen, router)
return True
+def kill_router_daemons(tgen, router, daemons):
+ """
+ Router's current config would be saved to /etc/frr/ for each deamon
+ and deamon would be killed forcefully using SIGKILL.
+ * `tgen` : topogen object
+ * `router`: Device under test
+ * `daemons`: list of daemons to be killed
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ try:
+ router_list = tgen.routers()
+
+ # Saving router config to /etc/frr, which will be loaded to router
+ # when it starts
+ router_list[router].vtysh_cmd("write memory")
+
+ # Kill Daemons
+ result = router_list[router].killDaemons(daemons)
+ if len(result) > 0:
+ assert "Errors found post shutdown - details follow:" == 0, result
+ return result
+
+ except Exception as e:
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+
+def start_router_daemons(tgen, router, daemons):
+ """
+ Daemons defined by user would be started
+ * `tgen` : topogen object
+ * `router`: Device under test
+ * `daemons`: list of daemons to be killed
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ try:
+ router_list = tgen.routers()
+
+ # Start daemons
+ result = router_list[router].startDaemons(daemons)
+ return result
+
+ except Exception as e:
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+def kill_mininet_routers_process(tgen):
+ """
+ Kill all mininet stale router' processes
+ * `tgen` : topogen object
+ """
+
+ router_list = tgen.routers()
+ for rname, router in router_list.iteritems():
+ daemon_list = [
+ "zebra",
+ "ospfd",
+ "ospf6d",
+ "bgpd",
+ "ripd",
+ "ripngd",
+ "isisd",
+ "pimd",
+ "ldpd",
+ "staticd",
+ ]
+ for daemon in daemon_list:
+ router.run("killall -9 {}".format(daemon))
+
+
+def check_router_status(tgen):
+ """
+ Check if all daemons are running for all routers in topology
+ * `tgen` : topogen object
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ try:
+ router_list = tgen.routers()
+ for router, rnode in router_list.iteritems():
+
+ result = rnode.check_router_running()
+ if result != "":
+ daemons = []
+ if "bgpd" in result:
+ daemons.append("bgpd")
+ if "zebra" in result:
+ daemons.append("zebra")
+
+ rnode.startDaemons(daemons)
+
+ except Exception as e:
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
def reset_config_on_routers(tgen, routerName=None):
"""
Resets configuration on routers to the snapshot created using input JSON
file. It replaces existing router configuration with FRRCFG_BKUP_FILE
-
Parameters
----------
* `tgen` : Topogen object
@@ -362,7 +470,6 @@ def reset_config_on_routers(tgen, routerName=None):
def load_config_to_router(tgen, routerName, save_bkup=False):
"""
Loads configuration on router from the file FRRCFG_FILE.
-
Parameters
----------
* `tgen` : Topogen object
@@ -414,6 +521,66 @@ def load_config_to_router(tgen, routerName, save_bkup=False):
return True
+def get_frr_ipv6_linklocal(tgen, router, intf=None):
+ """
+ API to get the link local ipv6 address of a perticular interface using
+ FRR command 'show interface'
+ * `tgen`: tgen onject
+ * `router` : router for which hightest interface should be
+ calculated
+ * `intf` : interface for which linklocal address needs to be taken
+ Usage
+ -----
+ linklocal = get_frr_ipv6_linklocal(tgen, router, "intf1", RED_A)
+ Returns
+ -------
+ 1) array of interface names to link local ips.
+ """
+
+ router_list = tgen.routers()
+ for rname, rnode in router_list.iteritems():
+ if rname != router:
+ continue
+
+ linklocal = []
+
+ cmd = "show interface"
+
+ ifaces = router_list[router].run('vtysh -c "{}"'.format(cmd))
+
+ # Fix newlines (make them all the same)
+ ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
+
+ interface = None
+ ll_per_if_count = 0
+ for line in ifaces:
+ # Interface name
+ m = re_search("Interface ([a-zA-Z0-9-]+) is", line)
+ if m:
+ interface = m.group(1).split(" ")[0]
+ ll_per_if_count = 0
+
+ # Interface ip
+ m1 = re_search("inet6 (fe80[:a-fA-F0-9]+[\/0-9]+)", line)
+ if m1:
+ local = m1.group(1)
+ ll_per_if_count += 1
+ if ll_per_if_count > 1:
+ linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
+ else:
+ linklocal += [[interface, local]]
+
+ if linklocal:
+ if intf:
+ return [_linklocal[1] for _linklocal in linklocal if _linklocal[0] == intf][
+ 0
+ ].split("/")[0]
+ return linklocal
+ else:
+ errormsg = "Link local ip missing on router {}"
+ return errormsg
+
+
def start_topology(tgen):
"""
Starting topology, create tmp files which are loaded to routers
@@ -494,11 +661,9 @@ def number_to_column(routerName):
def validate_ip_address(ip_address):
"""
Validates the type of ip address
-
Parameters
----------
* `ip_address`: IPv4/IPv6 address
-
Returns
-------
Type of address as string
@@ -565,7 +730,6 @@ def generate_ips(network, no_of_ips):
"""
Returns list of IPs.
based on start_ip and no_of_ips
-
* `network` : from here the ip will start generating,
start_ip will be
* `no_of_ips` : these many IPs will be generated
@@ -606,7 +770,6 @@ def find_interface_with_greater_ip(topo, router, loopback=True, interface=True):
Returns highest interface ip for ipv4/ipv6. If loopback is there then
it will return highest IP from loopback IPs otherwise from physical
interface IPs.
-
* `topo` : json file data
* `router` : router for which hightest interface should be calculated
"""
@@ -654,11 +817,9 @@ def write_test_footer(tc_name):
def interface_status(tgen, topo, input_dict):
"""
Delete ip route maps from device
-
* `tgen` : Topogen object
* `topo` : json file data
* `input_dict` : for which router, route map has to be deleted
-
Usage
-----
input_dict = {
@@ -671,7 +832,7 @@ def interface_status(tgen, topo, input_dict):
-------
errormsg(str) or True
"""
- logger.debug("Entering lib API: interface_status()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
try:
global frr_cfg
@@ -695,19 +856,17 @@ def interface_status(tgen, topo, input_dict):
logger.error(errormsg)
return errormsg
- logger.debug("Exiting lib API: interface_status()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0):
"""
Retries function execution, if return is an errormsg or exception
-
* `attempts`: Number of attempts to make
* `wait`: Number of seconds to wait between each attempt
* `return_is_str`: Return val is an errormsg in case of failure
* `initial_wait`: Sleeps for this much seconds before executing function
-
"""
def _retry(func):
@@ -786,13 +945,11 @@ def create_interfaces_cfg(tgen, topo, build=False):
"""
Create interface configuration for created topology. Basic Interface
configuration is provided in input json file.
-
Parameters
----------
* `tgen` : Topogen object
* `topo` : json file data
* `build` : Only for initial setup phase this is set as True.
-
Returns
-------
True or False
@@ -831,13 +988,11 @@ def create_interfaces_cfg(tgen, topo, build=False):
def create_static_routes(tgen, input_dict, build=False):
"""
Create static routes for given router as defined in input_dict
-
Parameters
----------
* `tgen` : Topogen object
* `input_dict` : Input dict data, required when configuring from testcase
* `build` : Only for initial setup phase this is set as True.
-
Usage
-----
input_dict should be in the format below:
@@ -848,7 +1003,6 @@ def create_static_routes(tgen, input_dict, build=False):
# next_hop: starting next-hop address
# tag: tag id for static routes
# delete: True if config to be removed. Default False.
-
Example:
"routers": {
"r1": {
@@ -864,13 +1018,12 @@ def create_static_routes(tgen, input_dict, build=False):
]
}
}
-
Returns
-------
errormsg(str) or True
"""
result = False
- logger.debug("Entering lib API: create_static_routes()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
input_dict = deepcopy(input_dict)
try:
for router in input_dict.keys():
@@ -927,7 +1080,7 @@ def create_static_routes(tgen, input_dict, build=False):
logger.error(errormsg)
return errormsg
- logger.debug("Exiting lib API: create_static_routes()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
@@ -935,13 +1088,11 @@ def create_prefix_lists(tgen, input_dict, build=False):
"""
Create ip prefix lists as per the config provided in input
JSON or input_dict
-
Parameters
----------
* `tgen` : Topogen object
* `input_dict` : Input dict data, required when configuring from testcase
* `build` : Only for initial setup phase this is set as True.
-
Usage
-----
# pf_lists_1: name of prefix-list, user defined
@@ -950,7 +1101,6 @@ def create_prefix_lists(tgen, input_dict, build=False):
# action: permit/deny
# le: less than or equal number of bits
# ge: greater than or equal number of bits
-
Example
-------
input_dict = {
@@ -971,13 +1121,12 @@ def create_prefix_lists(tgen, input_dict, build=False):
}
}
}
-
Returns
-------
errormsg or True
"""
- logger.debug("Entering lib API: create_prefix_lists()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
result = False
try:
for router in input_dict.keys():
@@ -1036,20 +1185,18 @@ def create_prefix_lists(tgen, input_dict, build=False):
logger.error(errormsg)
return errormsg
- logger.debug("Exiting lib API: create_prefix_lists()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
def create_route_maps(tgen, input_dict, build=False):
"""
Create route-map on the devices as per the arguments passed
-
Parameters
----------
* `tgen` : Topogen object
* `input_dict` : Input dict data, required when configuring from testcase
* `build` : Only for initial setup phase this is set as True.
-
Usage
-----
# route_maps: key, value pair for route-map name and its attribute
@@ -1070,7 +1217,6 @@ def create_route_maps(tgen, input_dict, build=False):
# large_community: large community value to be attached
# community_additive: if set to "additive", adds community/large-community
value to the existing values of the network prefix
-
Example:
--------
input_dict = {
@@ -1086,7 +1232,6 @@ def create_route_maps(tgen, input_dict, build=False):
"ipv6": {
"prefix_list": "pf_list_1"
}
-
"large-community-list": {
"id": "community_1",
"exact_match": True
@@ -1119,14 +1264,13 @@ def create_route_maps(tgen, input_dict, build=False):
}
}
}
-
Returns
-------
errormsg(str) or True
"""
result = False
- logger.debug("Entering lib API: create_route_maps()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
input_dict = deepcopy(input_dict)
try:
for router in input_dict.keys():
@@ -1391,18 +1535,16 @@ def create_route_maps(tgen, input_dict, build=False):
logger.error(errormsg)
return errormsg
- logger.debug("Exiting lib API: create_route_maps()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
def delete_route_maps(tgen, input_dict):
"""
Delete ip route maps from device
-
* `tgen` : Topogen object
* `input_dict` : for which router,
route map has to be deleted
-
Usage
-----
# Delete route-map rmap_1 and rmap_2 from router r1
@@ -1412,12 +1554,11 @@ def delete_route_maps(tgen, input_dict):
}
}
result = delete_route_maps("ipv4", input_dict)
-
Returns
-------
errormsg(str) or True
"""
- logger.info("Entering lib API: delete_route_maps()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for router in input_dict.keys():
route_maps = input_dict[router]["route_maps"][:]
@@ -1433,7 +1574,6 @@ def create_bgp_community_lists(tgen, input_dict, build=False):
"""
Create bgp community-list or large-community-list on the devices as per
the arguments passed. Takes list of communities in input.
-
Parameters
----------
* `tgen` : Topogen object
@@ -1459,7 +1599,7 @@ def create_bgp_community_lists(tgen, input_dict, build=False):
"""
result = False
- logger.debug("Entering lib API: create_bgp_community_lists()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
input_dict = deepcopy(input_dict)
try:
for router in input_dict.keys():
@@ -1515,30 +1655,26 @@ def create_bgp_community_lists(tgen, input_dict, build=False):
logger.error(errormsg)
return errormsg
- logger.debug("Exiting lib API: create_bgp_community_lists()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
def shutdown_bringup_interface(tgen, dut, intf_name, ifaceaction=False):
"""
Shutdown or bringup router's interface "
-
* `tgen` : Topogen object
* `dut` : Device under test
* `intf_name` : Interface name to be shut/no shut
* `ifaceaction` : Action, to shut/no shut interface,
by default is False
-
Usage
-----
dut = "r3"
intf = "r3-r1-eth0"
# Shut down ineterface
shutdown_bringup_interface(tgen, dut, intf, False)
-
# Bring up ineterface
shutdown_bringup_interface(tgen, dut, intf, True)
-
Returns
-------
errormsg(str) or True
@@ -1564,7 +1700,6 @@ def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
advertise_networks_using_network_command() and do will verify next_hop and
each prefix/routes is present in "show ip/ipv6 route {bgp/stataic} json"
command o/p.
-
Parameters
----------
* `tgen` : topogen object
@@ -1574,14 +1709,12 @@ def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
* `next_hop`[optional]: next_hop which needs to be verified,
default: static
* `protocol`[optional]: protocol, default = None
-
Usage
-----
# RIB can be verified for static routes OR network advertised using
network command. Following are input_dicts to create static routes
and advertise networks using network command. Any one of the input_dict
can be passed to verify_rib() to verify routes in DUT"s RIB.
-
# Creating static routes for r1
input_dict = {
"r1": {
@@ -1599,13 +1732,12 @@ def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
dut = "r2"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol = protocol)
-
Returns
-------
errormsg(str) or True
"""
- logger.debug("Entering lib API: verify_rib()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
router_list = tgen.routers()
for routerInput in input_dict.keys():
@@ -1639,10 +1771,9 @@ def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
static_routes = input_dict[routerInput]["static_routes"]
st_found = False
nh_found = False
+ found_routes = []
+ missing_routes = []
for static_route in static_routes:
- found_routes = []
- missing_routes = []
-
network = static_route["network"]
if "no_of_ip" in static_route:
no_of_ip = static_route["no_of_ip"]
@@ -1741,15 +1872,15 @@ def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
if type(next_hop) is not list:
next_hop = [next_hop]
- for index, nh in enumerate(next_hop):
- if (
- rib_routes_json[st_rt][0]["nexthops"][
- index
- ]["ip"]
- == nh
- ):
+ for nh in next_hop:
+ for nh_json in rib_routes_json[st_rt][0][
+ "nexthops"
+ ]:
+ if nh != nh_json["ip"]:
+ continue
nh_found = True
- else:
+
+ if not nh_found:
errormsg = (
"Nexthop {} is Missing"
" for {} route {} in "
@@ -1780,7 +1911,7 @@ def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
" routes are: {}\n".format(addr_type, dut, found_routes)
)
- logger.debug("Exiting lib API: verify_rib()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@@ -1788,7 +1919,6 @@ def verify_admin_distance_for_static_routes(tgen, input_dict):
"""
API to verify admin distance for static routes as defined in input_dict/
input JSON by running show ip/ipv6 route json command.
-
Parameter
---------
* `tgen` : topogen object
@@ -1808,13 +1938,12 @@ def verify_admin_distance_for_static_routes(tgen, input_dict):
}
}
result = verify_admin_distance_for_static_routes(tgen, input_dict)
-
Returns
-------
errormsg(str) or True
"""
- logger.debug("Entering lib API: verify_admin_distance_for_static_routes()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for router in input_dict.keys():
if router not in tgen.routers():
@@ -1871,7 +2000,7 @@ def verify_admin_distance_for_static_routes(tgen, input_dict):
)
return errormsg
- logger.debug("Exiting lib API: verify_admin_distance_for_static_routes()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@@ -1879,12 +2008,10 @@ def verify_prefix_lists(tgen, input_dict):
"""
Running "show ip prefix-list" command and verifying given prefix-list
is present in router.
-
Parameters
----------
* `tgen` : topogen object
* `input_dict`: data to verify prefix lists
-
Usage
-----
# To verify pf_list_1 is present in router r1
@@ -1893,13 +2020,12 @@ def verify_prefix_lists(tgen, input_dict):
"prefix_lists": ["pf_list_1"]
}}
result = verify_prefix_lists("ipv4", input_dict, tgen)
-
Returns
-------
errormsg(str) or True
"""
- logger.debug("Entering lib API: verify_prefix_lists()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for router in input_dict.keys():
if router not in tgen.routers():
@@ -1930,7 +2056,7 @@ def verify_prefix_lists(tgen, input_dict):
router,
)
- logger.debug("Exiting lib API: verify_prefix_lists()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@@ -1957,7 +2083,7 @@ def verify_route_maps(tgen, input_dict):
errormsg(str) or True
"""
- logger.debug("Entering lib API: verify_route_maps()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for router in input_dict.keys():
if router not in tgen.routers():
@@ -1982,7 +2108,7 @@ def verify_route_maps(tgen, input_dict):
router,
)
- logger.debug("Exiting lib API: verify_route_maps()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@@ -1991,7 +2117,6 @@ def verify_bgp_community(tgen, addr_type, router, network, input_dict=None):
"""
API to veiryf BGP large community is attached in route for any given
DUT by running "show bgp ipv4/6 {route address} json" command.
-
Parameters
----------
* `tgen`: topogen object
@@ -2007,13 +2132,12 @@ def verify_bgp_community(tgen, addr_type, router, network, input_dict=None):
"largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"
}
result = verify_bgp_community(tgen, "ipv4", dut, network, input_dict=None)
-
Returns
-------
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_bgp_community()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
if router not in tgen.routers():
return False
@@ -2075,7 +2199,7 @@ def verify_bgp_community(tgen, addr_type, router, network, input_dict=None):
)
return errormsg
- logger.debug("Exiting lib API: verify_bgp_community()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@@ -2104,7 +2228,7 @@ def verify_create_community_list(tgen, input_dict):
errormsg(str) or True
"""
- logger.debug("Entering lib API: verify_create_community_list()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for router in input_dict.keys():
if router not in tgen.routers():
@@ -2132,5 +2256,5 @@ def verify_create_community_list(tgen, input_dict):
)
return errormsg
- logger.debug("Exiting lib API: verify_create_community_list()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
index 6a6bbc7c78..7629d8a504 100644
--- a/tests/topotests/lib/topogen.py
+++ b/tests/topotests/lib/topogen.py
@@ -707,6 +707,38 @@ class TopoRouter(TopoGear):
self.logger.debug("stopping")
return self.tgen.net[self.name].stopRouter(wait, assertOnError)
+ def startDaemons(self, daemons):
+ """
+ Start Daemons: to start specific daemon(user defined daemon only)
+ * Start daemons (e.g. FRR/Quagga)
+ * Configure daemon logging files
+ """
+ self.logger.debug('starting')
+ nrouter = self.tgen.net[self.name]
+ result = nrouter.startRouterDaemons(daemons)
+
+ # Enable all daemon command logging, logging files
+ # and set them to the start dir.
+ for daemon, enabled in nrouter.daemons.iteritems():
+ for d in daemons:
+ if enabled == 0:
+ continue
+ self.vtysh_cmd('configure terminal\nlog commands\nlog file {}.log'.\
+ format(daemon), daemon=daemon)
+
+ if result != '':
+ self.tgen.set_error(result)
+
+ return result
+
+ def killDaemons(self, daemons, wait=True, assertOnError=True):
+ """
+ Kill specific daemon(user defined daemon only)
+ forcefully using SIGKILL
+ """
+ self.logger.debug('Killing daemons using SIGKILL..')
+ return self.tgen.net[self.name].killRouterDaemons(daemons, wait, assertOnError)
+
def vtysh_cmd(self, command, isjson=False, daemon=None):
"""
Runs the provided command string in the vty shell and returns a string
diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py
index fab101cb25..6464a29dd6 100644
--- a/tests/topotests/lib/topotest.py
+++ b/tests/topotests/lib/topotest.py
@@ -1088,6 +1088,120 @@ class Router(Node):
def getLog(self, log, daemon):
return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log))
+ def startRouterDaemons(self, daemons):
+ # Starts actual daemons without init (ie restart)
+ # cd to per node directory
+ self.cmd('cd {}/{}'.format(self.logdir, self.name))
+ self.cmd('umask 000')
+ #Re-enable to allow for report per run
+ self.reportCores = True
+ rundaemons = self.cmd('ls -1 /var/run/%s/*.pid' % self.routertype)
+
+ for daemon in daemons:
+ if daemon == 'zebra':
+ # Start Zebra first
+ if self.daemons['zebra'] == 1:
+ zebra_path = os.path.join(self.daemondir, 'zebra')
+ zebra_option = self.daemons_options['zebra']
+ self.cmd('{0} {1} > zebra.out 2> zebra.err &'.format(
+ zebra_path, zebra_option, self.logdir, self.name
+ ))
+ self.waitOutput()
+ logger.debug('{}: {} zebra started'.format(self, self.routertype))
+ sleep(1, '{}: waiting for zebra to start'.format(self.name))
+
+ # Fix Link-Local Addresses
+ # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local
+ # addresses on start. Fix this
+ self.cmd('for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS=\':\'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done')
+
+ if daemon == 'staticd':
+ # Start staticd next if required
+ if self.daemons['staticd'] == 1:
+ staticd_path = os.path.join(self.daemondir, 'staticd')
+ staticd_option = self.daemons_options['staticd']
+ self.cmd('{0} {1} > staticd.out 2> staticd.err &'.format(
+ staticd_path, staticd_option, self.logdir, self.name
+ ))
+ self.waitOutput()
+ logger.debug('{}: {} staticd started'.format(self, self.routertype))
+ sleep(1, '{}: waiting for staticd to start'.format(self.name))
+
+ # Now start all the daemons
+ # Skip disabled daemons and zebra
+ if self.daemons[daemon] == 0 or daemon == 'zebra' or daemon == 'staticd':
+ continue
+ daemon_path = os.path.join(self.daemondir, daemon)
+ self.cmd('{0} > {1}.out 2> {1}.err &'.format(
+ daemon_path, daemon
+ ))
+ self.waitOutput()
+ logger.debug('{}: {} {} started'.format(self, self.routertype, daemon))
+ sleep(1, '{}: waiting for {} to start'.format(self.name, daemon))
+
+ rundaemons = self.cmd('ls -1 /var/run/%s/*.pid' % self.routertype)
+
+ if re.search(r"No such file or directory", rundaemons):
+ return "Daemons are not running"
+
+ return ""
+
+ def killRouterDaemons(self, daemons, wait=True, assertOnError=True,
+ minErrorVersion='5.1'):
+ # Kill Running Quagga or FRR specific
+ # Daemons(user specified daemon only) using SIGKILL
+ rundaemons = self.cmd('ls -1 /var/run/%s/*.pid' % self.routertype)
+ errors = ""
+ daemonsNotRunning = []
+ if re.search(r"No such file or directory", rundaemons):
+ return errors
+ for daemon in daemons:
+ if rundaemons is not None and daemon in rundaemons:
+ numRunning = 0
+ for d in StringIO.StringIO(rundaemons):
+ if re.search(r"%s" % daemon, d):
+ daemonpid = self.cmd('cat %s' % d.rstrip()).rstrip()
+ if (daemonpid.isdigit() and pid_exists(int(daemonpid))):
+ logger.info('{}: killing {}'.format(
+ self.name,
+ os.path.basename(d.rstrip().rsplit(".", 1)[0])
+ ))
+ self.cmd('kill -9 %s' % daemonpid)
+ self.waitOutput()
+ if pid_exists(int(daemonpid)):
+ numRunning += 1
+ if wait and numRunning > 0:
+ sleep(2, '{}: waiting for {} daemon to be stopped'.\
+ format(self.name, daemon))
+ # 2nd round of kill if daemons didn't exit
+ for d in StringIO.StringIO(rundaemons):
+ if re.search(r"%s" % daemon, d):
+ daemonpid = \
+ self.cmd('cat %s' % d.rstrip()).rstrip()
+ if (daemonpid.isdigit() and pid_exists(
+ int(daemonpid))):
+ logger.info('{}: killing {}'.format(
+ self.name,
+ os.path.basename(d.rstrip().\
+ rsplit(".", 1)[0])
+ ))
+ self.cmd('kill -9 %s' % daemonpid)
+ self.waitOutput()
+ self.cmd('rm -- {}'.format(d.rstrip()))
+ if wait:
+ errors = self.checkRouterCores(reportOnce=True)
+ if self.checkRouterVersion('<', minErrorVersion):
+ #ignore errors in old versions
+ errors = ""
+ if assertOnError and len(errors) > 0:
+ assert "Errors found - details follow:" == 0, errors
+ else:
+ daemonsNotRunning.append(daemon)
+ if len(daemonsNotRunning) > 0:
+ errors = errors+"Daemons are not running", daemonsNotRunning
+
+ return errors
+
def checkRouterCores(self, reportLeaks=True, reportOnce=False):
if reportOnce and not self.reportCores:
return
diff --git a/tools/frr-llvm-cg.c b/tools/frr-llvm-cg.c
new file mode 100644
index 0000000000..84a756a376
--- /dev/null
+++ b/tools/frr-llvm-cg.c
@@ -0,0 +1,649 @@
+// This is free and unencumbered software released into the public domain.
+//
+// Anyone is free to copy, modify, publish, use, compile, sell, or
+// distribute this software, either in source code form or as a compiled
+// binary, for any purpose, commercial or non-commercial, and by any
+// means.
+//
+// In jurisdictions that recognize copyright laws, the author or authors
+// of this software dedicate any and all copyright interest in the
+// software to the public domain. We make this dedication for the benefit
+// of the public at large and to the detriment of our heirs and
+// successors. We intend this dedication to be an overt act of
+// relinquishment in perpetuity of all present and future rights to this
+// software under copyright law.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+// OTHER DEALINGS IN THE SOFTWARE.
+//
+// For more information, please refer to <http://unlicense.org/>
+
+/* based on example code: https://github.com/sheredom/llvm_bc_parsing_example
+ * which came under the above (un-)license. does not depend on any FRR
+ * pieces, so no reason to change the license.
+ *
+ * please note that while included in the FRR sources, this tool is in no way
+ * supported or maintained by the FRR community. it is provided as a
+ * "convenience"; while it worked at some point (using LLVM 8 / 9), it may
+ * easily break with a future LLVM version or any other factors.
+ *
+ * 2020-05-04, David Lamparter
+ */
+
+#include <string.h>
+#include <strings.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+
+#include <llvm-c/BitReader.h>
+#include <llvm-c/BitWriter.h>
+#include <llvm-c/Core.h>
+
+#include <json-c/json.h>
+
+/* if you want to use this without the special FRRouting defines,
+ * remove the following #define
+ */
+#define FRR_SPECIFIC
+
+static void dbgloc_add(struct json_object *jsobj, LLVMValueRef obj)
+{
+ unsigned file_len = 0;
+ const char *file = LLVMGetDebugLocFilename(obj, &file_len);
+ unsigned line = LLVMGetDebugLocLine(obj);
+
+ if (!file)
+ file = "???", file_len = 3;
+ else if (file[0] == '.' && file[1] == '/')
+ file += 2, file_len -= 2;
+
+ json_object_object_add(jsobj, "filename",
+ json_object_new_string_len(file, file_len));
+ json_object_object_add(jsobj, "line", json_object_new_int64(line));
+}
+
+static struct json_object *js_get_or_make(struct json_object *parent,
+ const char *key,
+ struct json_object *(*maker)(void))
+{
+ struct json_object *ret;
+
+ ret = json_object_object_get(parent, key);
+ if (ret)
+ return ret;
+ ret = maker();
+ json_object_object_add(parent, key, ret);
+ return ret;
+}
+
+static bool details_fptr_vars = false;
+static bool details_fptr_consts = true;
+
+enum called_fn {
+ FN_GENERIC = 0,
+ FN_NONAME,
+ FN_INSTALL_ELEMENT,
+ FN_THREAD_ADD,
+};
+
+static void walk_const_fptrs(struct json_object *js_call, LLVMValueRef value,
+ const char *prefix, bool *hdr_written)
+{
+ LLVMTypeRef type;
+ LLVMValueKind kind;
+
+ if (LLVMIsAGlobalVariable(value)) {
+ type = LLVMGlobalGetValueType(value);
+ value = LLVMGetInitializer(value);
+ } else {
+ type = LLVMTypeOf(value);
+ }
+
+ if (LLVMIsAFunction(value)) {
+ struct json_object *js_fptrs;
+
+ js_fptrs = js_get_or_make(js_call, "funcptrs",
+ json_object_new_array);
+
+ size_t fn_len;
+ const char *fn_name = LLVMGetValueName2(value, &fn_len);
+
+ size_t curlen = json_object_array_length(js_fptrs);
+ struct json_object *jsobj;
+ const char *s;
+
+ for (size_t i = 0; i < curlen; i++) {
+ jsobj = json_object_array_get_idx(js_fptrs, i);
+ s = json_object_get_string(jsobj);
+
+ if (s && !strcmp(s, fn_name))
+ return;
+ }
+
+ if (details_fptr_consts && !*hdr_written) {
+ fprintf(stderr,
+ "%s: calls function pointer from constant or global data\n",
+ prefix);
+ *hdr_written = true;
+ }
+ if (details_fptr_consts)
+ fprintf(stderr, "%s- constant: %.*s()\n",
+ prefix, (int)fn_len, fn_name);
+
+ json_object_array_add(js_fptrs,
+ json_object_new_string_len(fn_name,
+ fn_len));
+ return;
+ }
+
+ kind = LLVMGetValueKind(value);
+
+ unsigned len;
+ char *dump;
+
+ switch (kind) {
+ case LLVMUndefValueValueKind:
+ case LLVMConstantAggregateZeroValueKind:
+ case LLVMConstantPointerNullValueKind:
+ /* null pointer / array - ignore */
+ break;
+
+ case LLVMConstantIntValueKind:
+ /* integer - ignore */
+ break;
+
+ case LLVMConstantStructValueKind:
+ len = LLVMCountStructElementTypes(type);
+ for (unsigned i = 0; i < len; i++)
+ walk_const_fptrs(js_call, LLVMGetOperand(value, i),
+ prefix, hdr_written);
+ break;
+
+ case LLVMConstantArrayValueKind:
+ len = LLVMGetArrayLength(type);
+ for (unsigned i = 0; i < len; i++)
+ walk_const_fptrs(js_call, LLVMGetOperand(value, i),
+ prefix, hdr_written);
+ return;
+
+ default:
+ /* to help the user / development */
+ if (!*hdr_written) {
+ fprintf(stderr,
+ "%s: calls function pointer from constant or global data\n",
+ prefix);
+ *hdr_written = true;
+ }
+ dump = LLVMPrintValueToString(value);
+ fprintf(stderr,
+ "%s- value could not be processed:\n"
+ "%s- [kind=%d] %s\n",
+ prefix, prefix, kind, dump);
+ LLVMDisposeMessage(dump);
+ return;
+ }
+ return;
+}
+
+#ifdef FRR_SPECIFIC
+static bool is_thread_sched(const char *name, size_t len)
+{
+#define thread_prefix "funcname_"
+ static const char *const names[] = {
+ thread_prefix "thread_add_read_write",
+ thread_prefix "thread_add_timer",
+ thread_prefix "thread_add_timer_msec",
+ thread_prefix "thread_add_timer_tv",
+ thread_prefix "thread_add_event",
+ thread_prefix "thread_execute",
+ };
+ size_t i;
+
+ for (i = 0; i < sizeof(names) / sizeof(names[0]); i++) {
+ if (strlen(names[i]) != len)
+ continue;
+ if (!memcmp(names[i], name, len))
+ return true;
+ }
+ return false;
+}
+#endif
+
+static void process_call(struct json_object *js_calls,
+ struct json_object *js_special,
+ LLVMValueRef instr,
+ LLVMValueRef function)
+{
+ struct json_object *js_call, *js_fptrs = NULL;
+
+ LLVMValueRef called = LLVMGetCalledValue(instr);
+
+ if (LLVMIsAConstantExpr(called)) {
+ LLVMOpcode opcode = LLVMGetConstOpcode(called);
+
+ if (opcode == LLVMBitCast) {
+ LLVMValueRef op0 = LLVMGetOperand(called, 0);
+
+ if (LLVMIsAFunction(op0))
+ called = op0;
+ }
+ }
+
+ size_t called_len = 0;
+ const char *called_name = LLVMGetValueName2(called, &called_len);
+ unsigned n_args = LLVMGetNumArgOperands(instr);
+
+ bool is_external = LLVMIsDeclaration(called);
+ enum called_fn called_type = FN_GENERIC;
+
+ js_call = json_object_new_object();
+ json_object_array_add(js_calls, js_call);
+ dbgloc_add(js_call, instr);
+ json_object_object_add(js_call, "is_external",
+ json_object_new_boolean(is_external));
+
+ if (!called_name || called_len == 0) {
+ called_type = FN_NONAME;
+ json_object_object_add(js_call, "type",
+ json_object_new_string("indirect"));
+
+ LLVMValueRef last = called;
+
+ size_t name_len = 0;
+ const char *name_c = LLVMGetValueName2(function, &name_len);
+
+#ifdef FRR_SPECIFIC
+ /* information for FRR hooks is dumped for the registration
+ * in _hook_typecheck; we can safely ignore the funcptr here
+ */
+ if (strncmp(name_c, "hook_call_", 10) == 0)
+ return;
+#endif
+
+ unsigned file_len = 0;
+ const char *file = LLVMGetDebugLocFilename(instr, &file_len);
+ unsigned line = LLVMGetDebugLocLine(instr);
+
+ char prefix[256];
+ snprintf(prefix, sizeof(prefix), "%.*s:%d:%.*s()",
+ (int)file_len, file, line, (int)name_len, name_c);
+
+ while (LLVMIsALoadInst(last) || LLVMIsAGetElementPtrInst(last))
+ /* skipping over details for GEP here, but meh. */
+ last = LLVMGetOperand(last, 0);
+
+ if (LLVMIsAAllocaInst(last)) {
+ /* "alloca" is just generically all variables on the
+ * stack, this does not refer to C alloca() calls
+ *
+ * looking at the control flow in the function can
+ * give better results here, it's just not implemented
+ * (yet?)
+ */
+ fprintf(stderr,
+ "%s: call to a function pointer variable\n",
+ prefix);
+
+ if (details_fptr_vars) {
+ char *dump = LLVMPrintValueToString(called);
+ printf("%s- %s\n", prefix, dump);
+ LLVMDisposeMessage(dump);
+ }
+
+ json_object_object_add(
+ js_call, "type",
+ json_object_new_string("stack_fptr"));
+ } else if (LLVMIsACallInst(last)) {
+ /* calling the a function pointer returned from
+ * another function.
+ */
+ struct json_object *js_indirect;
+
+ js_indirect = js_get_or_make(js_call, "return_of",
+ json_object_new_array);
+
+ process_call(js_indirect, js_special, last, function);
+ } else if (LLVMIsAConstant(last)) {
+ /* function pointer is a constant (includes loading
+ * from complicated constants like structs or arrays.)
+ */
+ bool hdr_written = false;
+ walk_const_fptrs(js_call, last, prefix, &hdr_written);
+ if (details_fptr_consts && !hdr_written)
+ fprintf(stderr,
+ "%s: calls function pointer from constant or global data, but no non-NULL function pointers found\n",
+ prefix);
+ } else {
+ char *dump = LLVMPrintValueToString(called);
+ printf("\t%s\n", dump);
+ LLVMDisposeMessage(dump);
+ }
+ return;
+#ifdef FRR_SPECIFIC
+ } else if (!strcmp(called_name, "install_element")) {
+ called_type = FN_INSTALL_ELEMENT;
+
+ LLVMValueRef param0 = LLVMGetOperand(instr, 0);
+ if (!LLVMIsAConstantInt(param0))
+ goto out_nonconst;
+
+ long long vty_node = LLVMConstIntGetSExtValue(param0);
+ json_object_object_add(js_call, "vty_node",
+ json_object_new_int64(vty_node));
+
+ LLVMValueRef param1 = LLVMGetOperand(instr, 1);
+ if (!LLVMIsAGlobalVariable(param1))
+ goto out_nonconst;
+
+ LLVMValueRef intlz = LLVMGetInitializer(param1);
+ assert(intlz && LLVMIsConstant(intlz));
+
+ LLVMValueKind intlzkind = LLVMGetValueKind(intlz);
+ assert(intlzkind == LLVMConstantStructValueKind);
+
+ LLVMValueRef funcptr = LLVMGetOperand(intlz, 4);
+ assert(LLVMIsAFunction(funcptr));
+
+ size_t target_len = 0;
+ const char *target;
+ target = LLVMGetValueName2(funcptr, &target_len);
+
+ json_object_object_add(
+ js_call, "type",
+ json_object_new_string("install_element"));
+ json_object_object_add(
+ js_call, "target",
+ json_object_new_string_len(target, target_len));
+ return;
+
+ out_nonconst:
+ json_object_object_add(
+ js_call, "target",
+ json_object_new_string("install_element"));
+ return;
+ } else if (is_thread_sched(called_name, called_len)) {
+ called_type = FN_THREAD_ADD;
+
+ json_object_object_add(js_call, "type",
+ json_object_new_string("thread_sched"));
+ json_object_object_add(
+ js_call, "subtype",
+ json_object_new_string_len(called_name, called_len));
+
+ LLVMValueRef fparam;
+ if (strstr(called_name, "_read_"))
+ fparam = LLVMGetOperand(instr, 2);
+ else
+ fparam = LLVMGetOperand(instr, 1);
+ assert(fparam);
+
+ size_t target_len = 0;
+ const char *target;
+ target = LLVMGetValueName2(fparam, &target_len);
+
+ json_object_object_add(js_call, "target",
+ !target_len ? NULL :
+ json_object_new_string_len(target, target_len));
+ if (!LLVMIsAFunction(fparam))
+ json_object_object_add(js_call, "target_unresolved",
+ json_object_new_boolean(true));
+ return;
+ } else if (!strncmp(called_name, "_hook_typecheck_",
+ strlen("_hook_typecheck_"))) {
+ struct json_object *js_hook, *js_this;
+ const char *hook_name;
+
+ hook_name = called_name + strlen("_hook_typecheck_");
+
+ json_object_object_add(js_call, "type",
+ json_object_new_string("hook"));
+
+ LLVMValueRef param0 = LLVMGetOperand(instr, 0);
+ if (!LLVMIsAFunction(param0))
+ return;
+
+ size_t target_len = 0;
+ const char *target;
+ target = LLVMGetValueName2(param0, &target_len);
+
+ js_hook = js_get_or_make(js_special, "hooks",
+ json_object_new_object);
+ js_hook = js_get_or_make(js_hook, hook_name,
+ json_object_new_array);
+
+ js_this = json_object_new_object();
+ json_object_array_add(js_hook, js_this);
+
+ dbgloc_add(js_this, instr);
+ json_object_object_add(
+ js_this, "target",
+ json_object_new_string_len(target, target_len));
+ return;
+
+ /* TODO (FRR specifics):
+ * - workqueues - not sure we can do much there
+ * - zclient->* ?
+ */
+#endif /* FRR_SPECIFIC */
+ } else {
+ json_object_object_add(
+ js_call, "target",
+ json_object_new_string_len(called_name, called_len));
+ }
+
+ for (unsigned argno = 0; argno < n_args; argno++) {
+ LLVMValueRef param = LLVMGetOperand(instr, argno);
+ size_t target_len;
+ const char *target_name;
+
+ if (LLVMIsAFunction(param)) {
+ js_fptrs = js_get_or_make(js_call, "funcptrs",
+ json_object_new_array);
+
+ target_name = LLVMGetValueName2(param, &target_len);
+
+ json_object_array_add(js_fptrs,
+ json_object_new_string_len(
+ target_name, target_len));
+ }
+ }
+}
+
+static void process_fn(struct json_object *funcs,
+ struct json_object *js_special,
+ LLVMValueRef function)
+{
+ struct json_object *js_func, *js_calls;
+
+ size_t name_len = 0;
+ const char *name_c = LLVMGetValueName2(function, &name_len);
+ char *name;
+
+ name = strndup(name_c, name_len);
+
+ js_func = json_object_object_get(funcs, name);
+ if (js_func) {
+ unsigned file_len = 0;
+ const char *file = LLVMGetDebugLocFilename(function, &file_len);
+ unsigned line = LLVMGetDebugLocLine(function);
+
+ fprintf(stderr, "%.*s:%d:%s(): duplicate definition!\n",
+ (int)file_len, file, line, name);
+ free(name);
+ return;
+ }
+
+ js_func = json_object_new_object();
+ json_object_object_add(funcs, name, js_func);
+ free(name);
+
+ js_calls = json_object_new_array();
+ json_object_object_add(js_func, "calls", js_calls);
+
+ dbgloc_add(js_func, function);
+
+ for (LLVMBasicBlockRef basicBlock = LLVMGetFirstBasicBlock(function);
+ basicBlock; basicBlock = LLVMGetNextBasicBlock(basicBlock)) {
+
+ for (LLVMValueRef instr = LLVMGetFirstInstruction(basicBlock);
+ instr; instr = LLVMGetNextInstruction(instr)) {
+
+ if (LLVMIsAIntrinsicInst(instr))
+ continue;
+
+ if (LLVMIsACallInst(instr) || LLVMIsAInvokeInst(instr))
+ process_call(js_calls, js_special, instr,
+ function);
+ }
+ }
+}
+
+static void help(int retcode)
+{
+ fprintf(stderr,
+ "FRR LLVM bitcode to callgraph analyzer\n"
+ "\n"
+ "usage: frr-llvm-cg [-q|-v] [-o <JSONOUTPUT>] BITCODEINPUT\n"
+ "\n"
+ "\t-o FILENAME\twrite JSON output to file instead of stdout\n"
+ "\t-v\t\tbe more verbose\n"
+ "\t-q\t\tbe quiet\n"
+ "\n"
+ "BITCODEINPUT must be a LLVM binary bitcode file (not text\n"
+ "representation.) Use - to read from stdin.\n"
+ "\n"
+ "Note it may be necessary to build this binary tool against\n"
+ "the specific LLVM version that created the bitcode file.\n");
+ exit(retcode);
+}
+
+int main(int argc, char **argv)
+{
+ int opt;
+ const char *out = NULL;
+ const char *inp = NULL;
+ char v_or_q = '\0';
+
+ while ((opt = getopt(argc, argv, "hvqo:")) != -1) {
+ switch (opt) {
+ case 'o':
+ if (out)
+ help(1);
+ out = optarg;
+ break;
+ case 'v':
+ if (v_or_q && v_or_q != 'v')
+ help(1);
+ details_fptr_vars = true;
+ details_fptr_consts = true;
+ v_or_q = 'v';
+ break;
+ case 'q':
+ if (v_or_q && v_or_q != 'q')
+ help(1);
+ details_fptr_vars = false;
+ details_fptr_consts = false;
+ v_or_q = 'q';
+ break;
+ case 'h':
+ help(0);
+ return 0;
+ default:
+ help(1);
+ }
+ }
+
+ if (optind != argc - 1)
+ help(1);
+
+ inp = argv[optind];
+
+ LLVMMemoryBufferRef memoryBuffer;
+ char *message;
+ int ret;
+
+ // check if we are to read our input file from stdin
+ if (!strcmp(inp, "-")) {
+ inp = "<stdin>";
+ ret = LLVMCreateMemoryBufferWithSTDIN(&memoryBuffer, &message);
+ } else {
+ ret = LLVMCreateMemoryBufferWithContentsOfFile(
+ inp, &memoryBuffer, &message);
+ }
+
+ if (ret) {
+ fprintf(stderr, "failed to open %s: %s\n", inp, message);
+ free(message);
+ return 1;
+ }
+
+ // now create our module using the memorybuffer
+ LLVMModuleRef module;
+ if (LLVMParseBitcode2(memoryBuffer, &module)) {
+ fprintf(stderr, "%s: invalid bitcode\n", inp);
+ LLVMDisposeMemoryBuffer(memoryBuffer);
+ return 1;
+ }
+
+ // done with the memory buffer now, so dispose of it
+ LLVMDisposeMemoryBuffer(memoryBuffer);
+
+ struct json_object *js_root, *js_funcs, *js_special;
+
+ js_root = json_object_new_object();
+ js_funcs = json_object_new_object();
+ json_object_object_add(js_root, "functions", js_funcs);
+ js_special = json_object_new_object();
+ json_object_object_add(js_root, "special", js_special);
+
+ // loop through all the functions in the module
+ for (LLVMValueRef function = LLVMGetFirstFunction(module); function;
+ function = LLVMGetNextFunction(function)) {
+ if (LLVMIsDeclaration(function))
+ continue;
+
+ process_fn(js_funcs, js_special, function);
+ }
+
+ if (out) {
+ char tmpout[strlen(out) + 5];
+
+ snprintf(tmpout, sizeof(tmpout), "%s.tmp", out);
+ ret = json_object_to_file_ext(tmpout, js_root,
+ JSON_C_TO_STRING_PRETTY |
+ JSON_C_TO_STRING_PRETTY_TAB |
+ JSON_C_TO_STRING_NOSLASHESCAPE);
+ if (ret < 0) {
+ fprintf(stderr, "could not write JSON to file\n");
+ return 1;
+ }
+ if (rename(tmpout, out)) {
+ fprintf(stderr, "could not rename JSON output: %s\n",
+ strerror(errno));
+ unlink(tmpout);
+ return 1;
+ }
+ } else {
+ ret = json_object_to_fd(1, js_root,
+ JSON_C_TO_STRING_PRETTY |
+ JSON_C_TO_STRING_PRETTY_TAB |
+ JSON_C_TO_STRING_NOSLASHESCAPE);
+ if (ret < 0) {
+ fprintf(stderr, "could not write JSON to stdout\n");
+ return 1;
+ }
+ }
+
+ LLVMDisposeModule(module);
+
+ return 0;
+}
diff --git a/tools/frr-reload.py b/tools/frr-reload.py
index 1f9f425386..c10eb487e6 100755
--- a/tools/frr-reload.py
+++ b/tools/frr-reload.py
@@ -155,7 +155,7 @@ class Config(object):
try:
config_text = subprocess.check_output(
bindir + "/vtysh --config_dir " + confdir + " -c 'show run " + daemon + "' | /usr/bin/tail -n +4 | " + bindir + "/vtysh --config_dir " + confdir + " -m -f -",
- shell=True)
+ shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
ve = VtyshMarkException(e)
ve.output = e.output
@@ -779,13 +779,14 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del):
bfd_search_string = bfd_nbr + r' bfd (\S+) (\S+) (\S+)'
for (ctx_keys, add_line) in lines_to_add:
- re_add_nbr_bfd_timers = re.search(bfd_search_string, add_line)
+ if ctx_keys[0].startswith('router bgp'):
+ re_add_nbr_bfd_timers = re.search(bfd_search_string, add_line)
- if re_add_nbr_bfd_timers:
- found_add_bfd_nbr = line_exist(lines_to_add, ctx_keys, bfd_nbr, False)
+ if re_add_nbr_bfd_timers:
+ found_add_bfd_nbr = line_exist(lines_to_add, ctx_keys, bfd_nbr, False)
- if found_add_bfd_nbr:
- lines_to_del_to_del.append((ctx_keys, line))
+ if found_add_bfd_nbr:
+ lines_to_del_to_del.append((ctx_keys, line))
'''
We changed how we display the neighbor interface command. Older
@@ -1135,7 +1136,7 @@ def vtysh_config_available(bindir, confdir):
try:
cmd = [str(bindir + '/vtysh'), '--config_dir', confdir, '-c', 'conf t']
- output = subprocess.check_output(cmd).strip()
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip()
if 'VTY configuration is locked by other VTY' in output.decode('utf-8'):
print(output)
@@ -1393,7 +1394,7 @@ if __name__ == '__main__':
while True:
try:
- _ = subprocess.check_output(cmd)
+ _ = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
@@ -1443,7 +1444,7 @@ if __name__ == '__main__':
fh.write(line + '\n')
try:
- subprocess.check_output([str(args.bindir + '/vtysh'), '--config_dir', args.confdir, '-f', filename])
+ subprocess.check_output([str(args.bindir + '/vtysh'), '--config_dir', args.confdir, '-f', filename], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
log.warning("frr-reload.py failed due to\n%s" % e.output)
reload_ok = False
diff --git a/tools/frr.in b/tools/frr.in
index d9816c2568..40862aa4c9 100755
--- a/tools/frr.in
+++ b/tools/frr.in
@@ -21,6 +21,7 @@ VTYSH="@vtysh_bin@" # /usr/bin/vtysh
FRR_USER="@enable_user@" # frr
FRR_GROUP="@enable_group@" # frr
FRR_VTY_GROUP="@enable_vty_group@" # frrvty
+FRR_CONFIG_MODE="@enable_configfile_mask@" # 0600
FRR_DEFAULT_PROFILE="@DFLT_NAME@" # traditional / datacenter
# Local Daemon selection may be done by using /etc/frr/daemons.
@@ -56,6 +57,7 @@ chownfrr()
{
test -n "$FRR_USER" && chown "$FRR_USER" "$1"
test -n "$FRR_GROUP" && chgrp "$FRR_GROUP" "$1"
+ test -n "$FRR_CONFIG_MODE" && chmod "$FRR_CONFIG_MODE" "$1"
}
# Check if daemon is started by using the pidfile.
diff --git a/tools/frrcommon.sh.in b/tools/frrcommon.sh.in
index 0dfdfd0efa..9dc8cea609 100644
--- a/tools/frrcommon.sh.in
+++ b/tools/frrcommon.sh.in
@@ -24,6 +24,7 @@ VTYSH="@vtysh_bin@" # /usr/bin/vtysh
FRR_USER="@enable_user@" # frr
FRR_GROUP="@enable_group@" # frr
FRR_VTY_GROUP="@enable_vty_group@" # frrvty
+FRR_CONFIG_MODE="@enable_configfile_mask@" # 0600
FRR_DEFAULT_PROFILE="@DFLT_NAME@" # traditional / datacenter
# ORDER MATTERS FOR $DAEMONS!
@@ -53,6 +54,7 @@ debug() {
chownfrr() {
[ -n "$FRR_USER" ] && chown "$FRR_USER" "$1"
[ -n "$FRR_GROUP" ] && chgrp "$FRR_GROUP" "$1"
+ [ -n "$FRR_CONFIG_MODE" ] && chmod "$FRR_CONFIG_MODE" "$1"
}
vtysh_b () {
diff --git a/tools/subdir.am b/tools/subdir.am
index c637db6eb1..723a87d100 100644
--- a/tools/subdir.am
+++ b/tools/subdir.am
@@ -8,6 +8,10 @@ noinst_PROGRAMS += \
tools/gen_yang_deviations \
# end
+EXTRA_PROGRAMS += \
+ tools/frr-llvm-cg \
+ # end
+
sbin_PROGRAMS += tools/ssd
sbin_SCRIPTS += \
tools/frr-reload \
@@ -31,6 +35,14 @@ tools_gen_yang_deviations_LDADD = lib/libfrr.la $(LIBYANG_LIBS)
tools_ssd_SOURCES = tools/start-stop-daemon.c
+# don't bother autoconf'ing these for a simple optional tool
+llvm_version = $(shell echo __clang_major__ | $(CC) -xc -P -E -)
+tools_frr_llvm_cg_CFLAGS = $(AM_CFLAGS) `llvm-config-$(llvm_version) --cflags`
+tools_frr_llvm_cg_LDFLAGS = `llvm-config-$(llvm_version) --ldflags --libs`
+tools_frr_llvm_cg_SOURCES = \
+ tools/frr-llvm-cg.c \
+ # end
+
EXTRA_DIST += \
tools/etc \
tools/frr-reload \
diff --git a/yang/LICENSE b/yang/LICENSE
new file mode 100644
index 0000000000..a7cec5de91
--- /dev/null
+++ b/yang/LICENSE
@@ -0,0 +1,22 @@
+Copyright 2020 FRRouting
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/yang/frr-bfdd.yang b/yang/frr-bfdd.yang
index b870bfd0c8..a6d537f01c 100644
--- a/yang/frr-bfdd.yang
+++ b/yang/frr-bfdd.yang
@@ -21,7 +21,32 @@ module frr-bfdd {
"FRR Users List: <mailto:frog@lists.frrouting.org>
FRR Development List: <mailto:dev@lists.frrouting.org>";
description
- "This module defines a model for managing FRR bfdd daemon.";
+ "This module defines a model for managing FRR bfdd daemon.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2019-05-09 {
description "Initial revision.";
diff --git a/yang/frr-eigrpd.yang b/yang/frr-eigrpd.yang
index 092b714045..3d1bf3baa5 100644
--- a/yang/frr-eigrpd.yang
+++ b/yang/frr-eigrpd.yang
@@ -21,7 +21,32 @@ module frr-eigrpd {
"FRR Users List: <mailto:frog@lists.frrouting.org>
FRR Development List: <mailto:dev@lists.frrouting.org>";
description
- "This module defines a model for managing FRR eigrpd daemon.";
+ "This module defines a model for managing FRR eigrpd daemon.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2019-09-09 {
description
diff --git a/yang/frr-filter.yang b/yang/frr-filter.yang
index 61ffa51552..c55af34161 100644
--- a/yang/frr-filter.yang
+++ b/yang/frr-filter.yang
@@ -14,7 +14,33 @@ module frr-filter {
contact
"FRR Users List: <mailto:frog@lists.frrouting.org>
FRR Development List: <mailto:dev@lists.frrouting.org>";
- description "This module defines filter settings";
+ description
+ "This module defines filter settings
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2019-07-04 {
description "Initial revision";
diff --git a/yang/frr-igmp.yang b/yang/frr-igmp.yang
index 534fd78480..b63d0f97ec 100644
--- a/yang/frr-igmp.yang
+++ b/yang/frr-igmp.yang
@@ -28,7 +28,32 @@ module frr-igmp {
FRR Development List: <mailto:dev@lists.frrouting.org>";
description
- "This module defines a model for managing FRR pimd daemon.";
+ "This module defines a model for managing FRR pimd daemon.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2019-11-06 {
description
diff --git a/yang/frr-interface.yang b/yang/frr-interface.yang
index 7ebba935a4..dade9f97fe 100644
--- a/yang/frr-interface.yang
+++ b/yang/frr-interface.yang
@@ -21,7 +21,32 @@ module frr-interface {
"FRR Users List: <mailto:frog@lists.frrouting.org>
FRR Development List: <mailto:dev@lists.frrouting.org>";
description
- "This module defines a model for managing FRR interfaces.";
+ "This module defines a model for managing FRR interfaces.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2020-02-05 {
description
diff --git a/yang/frr-isisd.yang b/yang/frr-isisd.yang
index fc0110c24a..5882f0522f 100644
--- a/yang/frr-isisd.yang
+++ b/yang/frr-isisd.yang
@@ -25,7 +25,32 @@ module frr-isisd {
"FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development
List: <mailto:dev@lists.frrouting.org>";
description
- "This module defines a model for managing FRR isisd daemon.";
+ "This module defines a model for managing FRR isisd daemon.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2020-04-06 {
description
diff --git a/yang/frr-module-translator.yang b/yang/frr-module-translator.yang
index 6713eae76e..90d3cc8601 100644
--- a/yang/frr-module-translator.yang
+++ b/yang/frr-module-translator.yang
@@ -9,7 +9,32 @@ module frr-module-translator {
"FRR Users List: <mailto:frog@lists.frrouting.org>
FRR Development List: <mailto:dev@lists.frrouting.org>";
description
- "A model for FRR YANG module translators.";
+ "A model for FRR YANG module translators.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2018-07-31 {
description
diff --git a/yang/frr-nexthop.yang b/yang/frr-nexthop.yang
index 07e15eb774..ce6f21a663 100644
--- a/yang/frr-nexthop.yang
+++ b/yang/frr-nexthop.yang
@@ -25,7 +25,32 @@ module frr-nexthop {
"FRR Users List: <mailto:frog@lists.frrouting.org>
FRR Development List: <mailto:dev@lists.frrouting.org>";
description
- "This module defines a model for managing FRR nexthop information.";
+ "This module defines a model for managing FRR nexthop information.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2019-08-15 {
description
diff --git a/yang/frr-pim-rp.yang b/yang/frr-pim-rp.yang
index fb0a5aa209..a2eca5100a 100644
--- a/yang/frr-pim-rp.yang
+++ b/yang/frr-pim-rp.yang
@@ -29,7 +29,32 @@ module frr-pim-rp {
description
"The module defines a collection of YANG definitions common for
- all PIM (Protocol Independent Multicast) RP (Rendezvous Point) model.";
+ all PIM (Protocol Independent Multicast) RP (Rendezvous Point) model.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2017-03-09 {
description
diff --git a/yang/frr-pim.yang b/yang/frr-pim.yang
index 3b2ebdf6d3..2135d22f67 100644
--- a/yang/frr-pim.yang
+++ b/yang/frr-pim.yang
@@ -29,7 +29,32 @@ module frr-pim {
description
"The module defines a collection of YANG definitions common for
- PIM (Protocol Independent Multicast) model.";
+ PIM (Protocol Independent Multicast) model.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2017-03-09 {
description
diff --git a/yang/frr-ripd.yang b/yang/frr-ripd.yang
index 12c72b39b5..4e5795b8f7 100644
--- a/yang/frr-ripd.yang
+++ b/yang/frr-ripd.yang
@@ -22,7 +22,32 @@ module frr-ripd {
"FRR Users List: <mailto:frog@lists.frrouting.org>
FRR Development List: <mailto:dev@lists.frrouting.org>";
description
- "This module defines a model for managing FRR ripd daemon.";
+ "This module defines a model for managing FRR ripd daemon.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2019-09-09 {
description
diff --git a/yang/frr-ripngd.yang b/yang/frr-ripngd.yang
index c58962f5cd..47ae67b238 100644
--- a/yang/frr-ripngd.yang
+++ b/yang/frr-ripngd.yang
@@ -22,7 +22,32 @@ module frr-ripngd {
"FRR Users List: <mailto:frog@lists.frrouting.org>
FRR Development List: <mailto:dev@lists.frrouting.org>";
description
- "This module defines a model for managing FRR ripngd daemon.";
+ "This module defines a model for managing FRR ripngd daemon.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2019-09-09 {
description
diff --git a/yang/frr-route-map.yang b/yang/frr-route-map.yang
index 106593d9d3..f35a2976d1 100644
--- a/yang/frr-route-map.yang
+++ b/yang/frr-route-map.yang
@@ -17,7 +17,33 @@ module frr-route-map {
contact
"FRR Users List: <mailto:frog@lists.frrouting.org>
FRR Development List: <mailto:dev@lists.frrouting.org>";
- description "This module defines route map settings";
+ description
+ "This module defines route map settings
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2019-07-01 {
description "Initial revision";
diff --git a/yang/frr-route-types.yang b/yang/frr-route-types.yang
index 8fdd10121e..057c32a7e7 100644
--- a/yang/frr-route-types.yang
+++ b/yang/frr-route-types.yang
@@ -9,7 +9,32 @@ module frr-route-types {
"FRR Users List: <mailto:frog@lists.frrouting.org>
FRR Development List: <mailto:dev@lists.frrouting.org>";
description
- "This module defines typedefs for route types.";
+ "This module defines typedefs for route types.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2018-03-28 {
description
diff --git a/yang/frr-routing.yang b/yang/frr-routing.yang
index ff84954aaf..5a06e597e5 100644
--- a/yang/frr-routing.yang
+++ b/yang/frr-routing.yang
@@ -20,7 +20,32 @@ module frr-routing {
description
"This YANG module defines essential components for the management
- of a routing subsystem.";
+ of a routing subsystem.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2019-08-15 {
description
diff --git a/yang/frr-staticd.yang b/yang/frr-staticd.yang
index ce8d641de1..f59158a0fd 100644
--- a/yang/frr-staticd.yang
+++ b/yang/frr-staticd.yang
@@ -26,7 +26,32 @@ module frr-staticd {
description
"This module defines a model for managing FRR staticd information.
This YANG module augments the ietf-routing with additional
- nexthop information";
+ nexthop information
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2019-12-03 {
description
diff --git a/yang/frr-test-module.yang b/yang/frr-test-module.yang
index d85b12ea06..61915b1349 100644
--- a/yang/frr-test-module.yang
+++ b/yang/frr-test-module.yang
@@ -13,6 +13,34 @@ module frr-test-module {
prefix frr-interface;
}
+ description
+ "FRRouting internal testing module.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
+
revision 2018-11-26 {
description
"Initial revision.";
diff --git a/yang/frr-vrf.yang b/yang/frr-vrf.yang
index 4924a86e89..bb17bfaddb 100644
--- a/yang/frr-vrf.yang
+++ b/yang/frr-vrf.yang
@@ -9,7 +9,32 @@ module frr-vrf {
"FRR Users List: <mailto:frog@lists.frrouting.org>
FRR Development List: <mailto:dev@lists.frrouting.org>";
description
- "This module defines a model for managing FRR VRF.";
+ "This module defines a model for managing FRR VRF.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2019-12-06 {
description
diff --git a/yang/frr-vrrpd.yang b/yang/frr-vrrpd.yang
index 145387c4b4..c99d6d9877 100644
--- a/yang/frr-vrrpd.yang
+++ b/yang/frr-vrrpd.yang
@@ -21,7 +21,32 @@ module frr-vrrpd {
"FRR Users List: <mailto:frog@lists.frrouting.org>
FRR Development List: <mailto:dev@lists.frrouting.org>";
description
- "This module defines a model for managing FRR vrrpd daemon.";
+ "This module defines a model for managing FRR vrrpd daemon.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2019-09-09 {
description
diff --git a/yang/frr-zebra.yang b/yang/frr-zebra.yang
index 26e30b5fa9..4aeba14129 100644
--- a/yang/frr-zebra.yang
+++ b/yang/frr-zebra.yang
@@ -45,7 +45,32 @@ module frr-zebra {
"FRR Users List: <mailto:frog@lists.frrouting.org>
FRR Development List: <mailto:dev@lists.frrouting.org>";
description
- "This module defines a model for managing the FRR zebra daemon.";
+ "This module defines a model for managing the FRR zebra daemon.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
revision 2019-06-01 {
description
@@ -607,45 +632,44 @@ module frr-zebra {
"RIBs supported by FRR.";
list rib {
key "afi-safi-name table-id";
- leaf table-id {
- type uint32;
- description
- "Routing Table id (default id - 254).";
- }
-
leaf afi-safi-name {
type identityref {
base afi-safi-type;
}
- mandatory true;
description
"AFI, SAFI name.";
}
+ leaf table-id {
+ type uint32;
+ description
+ "Routing Table id (default id - 254).";
+ }
+
list route {
key "prefix";
config false;
leaf prefix {
- type inet:ip-prefix;
- description
- "The route's prefix.";
+ type inet:ip-prefix;
+ description
+ "The route's prefix.";
}
+
list route-entry {
key "protocol";
leaf protocol {
- type frr-route-types:frr-route-types-v4;
- //TODO: Use unified route types done in PR 5183 when it is merged.
- //type frr-route-types:frr-route-types;
- description
- "The protocol owning the route.";
+ type frr-route-types:frr-route-types;
+ description
+ "The protocol owning the route.";
}
leaf instance {
type uint16;
must "../protocol = \"ospf\"";
description
- "Retrieve routes from a specific OSPF instance.";
+ "Retrieve routes from a specific OSPF instance.";
}
+
uses route-common;
}
}
@@ -2045,10 +2069,12 @@ module frr-zebra {
augment "/frr-vrf:lib/frr-vrf:vrf" {
description
"Extends VRF model with Zebra-related parameters.";
- uses ribs;
+ container zebra {
+ uses ribs;
+ }
}
- augment "/frr-vrf:lib/frr-vrf:vrf/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop" {
+ augment "/frr-vrf:lib/frr-vrf:vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop" {
uses frr-nh:frr-nexthop-operational;
}
diff --git a/zebra/connected.c b/zebra/connected.c
index 5c713aa970..a982ac9b46 100644
--- a/zebra/connected.c
+++ b/zebra/connected.c
@@ -566,5 +566,5 @@ int connected_is_unnumbered(struct interface *ifp)
return CHECK_FLAG(connected->flags,
ZEBRA_IFA_UNNUMBERED);
}
- return 1;
+ return 0;
}
diff --git a/zebra/if_netlink.c b/zebra/if_netlink.c
index 17b6edfed0..429bb968a5 100644
--- a/zebra/if_netlink.c
+++ b/zebra/if_netlink.c
@@ -310,22 +310,25 @@ static void netlink_vrf_change(struct nlmsghdr *h, struct rtattr *tb,
nl_table_id = *(uint32_t *)RTA_DATA(attr[IFLA_VRF_TABLE]);
if (h->nlmsg_type == RTM_NEWLINK) {
- vrf_id_t exist_id;
-
if (IS_ZEBRA_DEBUG_KERNEL)
zlog_debug("RTM_NEWLINK for VRF %s(%u) table %u", name,
ifi->ifi_index, nl_table_id);
- exist_id = vrf_lookup_by_table(nl_table_id, ns_id);
- if (exist_id != VRF_DEFAULT) {
- vrf = vrf_lookup_by_id(exist_id);
+ if (!vrf_lookup_by_id((vrf_id_t)ifi->ifi_index)) {
+ vrf_id_t exist_id;
- flog_err(
- EC_ZEBRA_VRF_MISCONFIGURED,
- "VRF %s id %u table id overlaps existing vrf %s, misconfiguration exiting",
- name, ifi->ifi_index, vrf->name);
- exit(-1);
+ exist_id = vrf_lookup_by_table(nl_table_id, ns_id);
+ if (exist_id != VRF_DEFAULT) {
+ vrf = vrf_lookup_by_id(exist_id);
+
+ flog_err(
+ EC_ZEBRA_VRF_MISCONFIGURED,
+ "VRF %s id %u table id overlaps existing vrf %s, misconfiguration exiting",
+ name, ifi->ifi_index, vrf->name);
+ exit(-1);
+ }
}
+
/*
* vrf_get is implied creation if it does not exist
*/
diff --git a/zebra/rib.h b/zebra/rib.h
index 3717a12814..a024b6dfaa 100644
--- a/zebra/rib.h
+++ b/zebra/rib.h
@@ -41,7 +41,7 @@
extern "C" {
#endif
-typedef enum { RNH_NEXTHOP_TYPE, RNH_IMPORT_CHECK_TYPE } rnh_type_t;
+enum rnh_type { RNH_NEXTHOP_TYPE, RNH_IMPORT_CHECK_TYPE };
PREDECL_LIST(rnh_list)
@@ -58,7 +58,7 @@ struct rnh {
afi_t afi;
- rnh_type_t type;
+ enum rnh_type type;
uint32_t seqno;
@@ -251,12 +251,22 @@ DECLARE_LIST(re_list, struct route_entry, next);
(re) && ((next) = re_list_next(&((dest)->routes), (re)), 1); \
(re) = (next))
+#define RE_DEST_FIRST_ROUTE(dest, re) \
+ ((re) = (dest) ? re_list_first(&((dest)->routes)) : NULL)
+
+#define RE_DEST_NEXT_ROUTE(dest, re) \
+ ((re) = (dest) ? re_list_next(&((dest)->routes), (re)) : NULL)
+
#define RNODE_FOREACH_RE(rn, re) \
RE_DEST_FOREACH_ROUTE (rib_dest_from_rnode(rn), re)
#define RNODE_FOREACH_RE_SAFE(rn, re, next) \
RE_DEST_FOREACH_ROUTE_SAFE (rib_dest_from_rnode(rn), re, next)
+#define RNODE_FIRST_RE(rn, re) RE_DEST_FIRST_ROUTE(rib_dest_from_rnode(rn), re)
+
+#define RNODE_NEXT_RE(rn, re) RE_DEST_NEXT_ROUTE(rib_dest_from_rnode(rn), re)
+
#if defined(HAVE_RTADV)
/* Structure which hold status of router advertisement. */
struct rtadv {
@@ -276,7 +286,7 @@ struct rtadv {
* Structure that is hung off of a route_table that holds information about
* the table.
*/
-typedef struct rib_table_info_t_ {
+struct rib_table_info {
/*
* Back pointer to zebra_vrf.
@@ -284,14 +294,13 @@ typedef struct rib_table_info_t_ {
struct zebra_vrf *zvrf;
afi_t afi;
safi_t safi;
+};
-} rib_table_info_t;
-
-typedef enum {
+enum rib_tables_iter_state {
RIB_TABLES_ITER_S_INIT,
RIB_TABLES_ITER_S_ITERATING,
RIB_TABLES_ITER_S_DONE
-} rib_tables_iter_state_t;
+};
/*
* Structure that holds state for iterating over all tables in the
@@ -301,20 +310,21 @@ typedef struct rib_tables_iter_t_ {
vrf_id_t vrf_id;
int afi_safi_ix;
- rib_tables_iter_state_t state;
+ enum rib_tables_iter_state state;
} rib_tables_iter_t;
/* Events/reasons triggering a RIB update. */
-typedef enum {
+enum rib_update_event {
RIB_UPDATE_KERNEL,
RIB_UPDATE_RMAP_CHANGE,
RIB_UPDATE_OTHER,
RIB_UPDATE_MAX
-} rib_update_event_t;
+};
extern void route_entry_copy_nexthops(struct route_entry *re,
struct nexthop *nh);
-int route_entry_update_nhe(struct route_entry *re, struct nhg_hash_entry *new);
+int route_entry_update_nhe(struct route_entry *re,
+ struct nhg_hash_entry *new_nhghe);
#define route_entry_dump(prefix, src, re) _route_entry_dump(__func__, prefix, src, re)
extern void _route_entry_dump(const char *func, union prefixconstptr pp,
@@ -374,10 +384,10 @@ extern struct route_entry *rib_match_ipv4_multicast(vrf_id_t vrf_id,
extern struct route_entry *rib_lookup_ipv4(struct prefix_ipv4 *p,
vrf_id_t vrf_id);
-extern void rib_update(rib_update_event_t event);
-extern void rib_update_vrf(vrf_id_t vrf_id, rib_update_event_t event);
+extern void rib_update(enum rib_update_event event);
+extern void rib_update_vrf(vrf_id_t vrf_id, enum rib_update_event event);
extern void rib_update_table(struct route_table *table,
- rib_update_event_t event);
+ enum rib_update_event event);
extern int rib_sweep_route(struct thread *t);
extern void rib_sweep_table(struct route_table *table);
extern void rib_close_table(struct route_table *table);
@@ -412,9 +422,9 @@ extern void zebra_rib_evaluate_rn_nexthops(struct route_node *rn, uint32_t seq);
/*
* rib_table_info
*/
-static inline rib_table_info_t *rib_table_info(struct route_table *table)
+static inline struct rib_table_info *rib_table_info(struct route_table *table)
{
- return (rib_table_info_t *)route_table_get_info(table);
+ return (struct rib_table_info *)route_table_get_info(table);
}
/*
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index 4630b8a8e9..466e985494 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -90,10 +90,23 @@ struct gw_family_t {
static const char ipv4_ll_buf[16] = "169.254.0.1";
static struct in_addr ipv4_ll;
+/* Is this a ipv4 over ipv6 route? */
+static bool is_route_v4_over_v6(unsigned char rtm_family,
+ enum nexthop_types_t nexthop_type)
+{
+ if (rtm_family == AF_INET
+ && (nexthop_type == NEXTHOP_TYPE_IPV6
+ || nexthop_type == NEXTHOP_TYPE_IPV6_IFINDEX))
+ return true;
+
+ return false;
+}
+
/* Helper to control use of kernel-level nexthop ids */
static bool kernel_nexthops_supported(void)
{
- return (supports_nh && zebra_nhg_kernel_nexthops_enabled());
+ return (supports_nh && !vrf_is_backend_netns()
+ && zebra_nhg_kernel_nexthops_enabled());
}
/*
@@ -1164,9 +1177,7 @@ static void _netlink_route_build_singlepath(const struct prefix *p,
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
rtmsg->rtm_flags |= RTNH_F_ONLINK;
- if (rtmsg->rtm_family == AF_INET
- && (nexthop->type == NEXTHOP_TYPE_IPV6
- || nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX)) {
+ if (is_route_v4_over_v6(rtmsg->rtm_family, nexthop->type)) {
rtmsg->rtm_flags |= RTNH_F_ONLINK;
addattr_l(nlmsg, req_size, RTA_GATEWAY, &ipv4_ll, 4);
addattr32(nlmsg, req_size, RTA_OIF, nexthop->ifindex);
@@ -1341,9 +1352,7 @@ _netlink_route_build_multipath(const struct prefix *p, const char *routedesc,
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
rtnh->rtnh_flags |= RTNH_F_ONLINK;
- if (rtmsg->rtm_family == AF_INET
- && (nexthop->type == NEXTHOP_TYPE_IPV6
- || nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX)) {
+ if (is_route_v4_over_v6(rtmsg->rtm_family, nexthop->type)) {
bytelen = 4;
rtnh->rtnh_flags |= RTNH_F_ONLINK;
rta_addattr_l(rta, NL_PKT_BUF_SIZE, RTA_GATEWAY, &ipv4_ll,
@@ -2091,8 +2100,8 @@ static int netlink_nexthop(int cmd, struct zebra_dplane_ctx *ctx)
nexthop_done:
if (IS_ZEBRA_DEBUG_KERNEL)
- zlog_debug("%s: ID (%u): %pNHv vrf %s(%u) %s ",
- __func__, id, nh,
+ zlog_debug("%s: ID (%u): %pNHv(%d) vrf %s(%u) %s ",
+ __func__, id, nh, nh->ifindex,
vrf_id_to_name(nh->vrf_id),
nh->vrf_id, label_buf);
}
diff --git a/zebra/rt_socket.c b/zebra/rt_socket.c
index 6909bcb137..64fd7fa491 100644
--- a/zebra/rt_socket.c
+++ b/zebra/rt_socket.c
@@ -180,6 +180,7 @@ static int kernel_rtm(int cmd, const struct prefix *p,
switch (p->family) {
case AF_INET: {
struct in_addr loopback;
+
loopback.s_addr = htonl(INADDR_LOOPBACK);
sin_gate.sin.sin_addr = loopback;
#ifdef HAVE_STRUCT_SOCKADDR_SA_LEN
@@ -187,11 +188,21 @@ static int kernel_rtm(int cmd, const struct prefix *p,
sizeof(struct sockaddr_in);
#endif /* HAVE_STRUCT_SOCKADDR_SA_LEN */
gate = true;
- }
- break;
- case AF_INET6:
- zlog_warn("v6 blackhole routes have not been programmed yet");
- break;
+ } break;
+ case AF_INET6: {
+ struct in6_addr loopback;
+
+ inet_pton(AF_INET6, "::1", &loopback);
+
+ sin_gate.sin6.sin6_addr = loopback;
+ sin_gate.sin6.sin6_family = AF_INET6;
+
+#ifdef HAVE_STRUCTSOCKADDR_SA_LEN
+ sin_gate.sin6.sin6_len =
+ sizeof(struct sockaddr_in6);
+#endif /* HAVE_STRUCTSOCKADDR_SA_LEN */
+ gate = true;
+ } break;
}
}
diff --git a/zebra/rtadv.c b/zebra/rtadv.c
index 0fb3390410..013eb5819c 100644
--- a/zebra/rtadv.c
+++ b/zebra/rtadv.c
@@ -171,7 +171,7 @@ static int rtadv_recv_packet(struct zebra_vrf *zvrf, int sock, uint8_t *buf,
/* Send router advertisement packet. */
static void rtadv_send_packet(int sock, struct interface *ifp,
- ipv6_nd_suppress_ra_status stop)
+ enum ipv6_nd_suppress_ra_status stop)
{
struct msghdr msg;
struct iovec iov;
@@ -1003,7 +1003,7 @@ void rtadv_delete_prefix(struct zebra_if *zif, const struct prefix *p)
}
static void ipv6_nd_suppress_ra_set(struct interface *ifp,
- ipv6_nd_suppress_ra_status status)
+ enum ipv6_nd_suppress_ra_status status)
{
struct zebra_if *zif;
struct zebra_vrf *zvrf;
diff --git a/zebra/rtadv.h b/zebra/rtadv.h
index 68a5bbcdbe..d7a1ccfb29 100644
--- a/zebra/rtadv.h
+++ b/zebra/rtadv.h
@@ -147,10 +147,10 @@ enum ipv6_nd_prefix_source {
PREFIX_SRC_BOTH,
};
-typedef enum {
+enum ipv6_nd_suppress_ra_status {
RA_ENABLE = 0,
RA_SUPPRESS,
-} ipv6_nd_suppress_ra_status;
+};
extern void rtadv_init(struct zebra_vrf *zvrf);
extern void rtadv_vrf_terminate(struct zebra_vrf *zvrf);
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index 143354b166..cc8cab1ff5 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -1530,7 +1530,7 @@ int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
{
int ret = EINVAL;
const struct route_table *table = NULL;
- const rib_table_info_t *info;
+ const struct rib_table_info *info;
const struct prefix *p, *src_p;
struct zebra_ns *zns;
struct zebra_vrf *zvrf;
diff --git a/zebra/zebra_fpm.c b/zebra/zebra_fpm.c
index 8f97c8cf47..47b4965396 100644
--- a/zebra/zebra_fpm.c
+++ b/zebra/zebra_fpm.c
@@ -76,15 +76,15 @@ static void zfpm_iterate_rmac_table(struct hash_bucket *backet, void *args);
* Structure that holds state for iterating over all route_node
* structures that are candidates for being communicated to the FPM.
*/
-typedef struct zfpm_rnodes_iter_t_ {
+struct zfpm_rnodes_iter {
rib_tables_iter_t tables_iter;
route_table_iter_t iter;
-} zfpm_rnodes_iter_t;
+};
/*
* Statistics.
*/
-typedef struct zfpm_stats_t_ {
+struct zfpm_stats {
unsigned long connect_calls;
unsigned long connect_no_sock;
@@ -115,13 +115,12 @@ typedef struct zfpm_stats_t_ {
unsigned long t_conn_up_yields;
unsigned long t_conn_up_aborts;
unsigned long t_conn_up_finishes;
-
-} zfpm_stats_t;
+};
/*
* States for the FPM state machine.
*/
-typedef enum {
+enum zfpm_state {
/*
* In this state we are not yet ready to connect to the FPM. This
@@ -147,20 +146,21 @@ typedef enum {
*/
ZFPM_STATE_ESTABLISHED
-} zfpm_state_t;
+};
/*
* Message format to be used to communicate with the FPM.
*/
-typedef enum {
+enum zfpm_msg_format {
ZFPM_MSG_FORMAT_NONE,
ZFPM_MSG_FORMAT_NETLINK,
ZFPM_MSG_FORMAT_PROTOBUF,
-} zfpm_msg_format_e;
+};
+
/*
* Globals.
*/
-typedef struct zfpm_glob_t_ {
+struct zfpm_glob {
/*
* True if the FPM module has been enabled.
@@ -170,11 +170,11 @@ typedef struct zfpm_glob_t_ {
/*
* Message format to be used to communicate with the fpm.
*/
- zfpm_msg_format_e message_format;
+ enum zfpm_msg_format message_format;
struct thread_master *master;
- zfpm_state_t state;
+ enum zfpm_state state;
in_addr_t fpm_server;
/*
@@ -231,7 +231,7 @@ typedef struct zfpm_glob_t_ {
struct thread *t_conn_down;
struct {
- zfpm_rnodes_iter_t iter;
+ struct zfpm_rnodes_iter iter;
} t_conn_down_state;
/*
@@ -241,7 +241,7 @@ typedef struct zfpm_glob_t_ {
struct thread *t_conn_up;
struct {
- zfpm_rnodes_iter_t iter;
+ struct zfpm_rnodes_iter iter;
} t_conn_up_state;
unsigned long connect_calls;
@@ -251,18 +251,18 @@ typedef struct zfpm_glob_t_ {
* Stats from the start of the current statistics interval up to
* now. These are the counters we typically update in the code.
*/
- zfpm_stats_t stats;
+ struct zfpm_stats stats;
/*
* Statistics that were gathered in the last collection interval.
*/
- zfpm_stats_t last_ivl_stats;
+ struct zfpm_stats last_ivl_stats;
/*
* Cumulative stats from the last clear to the start of the current
* statistics interval.
*/
- zfpm_stats_t cumulative_stats;
+ struct zfpm_stats cumulative_stats;
/*
* Stats interval timer.
@@ -273,18 +273,17 @@ typedef struct zfpm_glob_t_ {
* If non-zero, the last time when statistics were cleared.
*/
time_t last_stats_clear_time;
+};
-} zfpm_glob_t;
-
-static zfpm_glob_t zfpm_glob_space;
-static zfpm_glob_t *zfpm_g = &zfpm_glob_space;
+static struct zfpm_glob zfpm_glob_space;
+static struct zfpm_glob *zfpm_g = &zfpm_glob_space;
static int zfpm_trigger_update(struct route_node *rn, const char *reason);
static int zfpm_read_cb(struct thread *thread);
static int zfpm_write_cb(struct thread *thread);
-static void zfpm_set_state(zfpm_state_t state, const char *reason);
+static void zfpm_set_state(enum zfpm_state state, const char *reason);
static void zfpm_start_connect_timer(const char *reason);
static void zfpm_start_stats_timer(void);
static void zfpm_mac_info_del(struct fpm_mac_info_t *fpm_mac);
@@ -300,7 +299,7 @@ static inline int zfpm_thread_should_yield(struct thread *t)
/*
* zfpm_state_to_str
*/
-static const char *zfpm_state_to_str(zfpm_state_t state)
+static const char *zfpm_state_to_str(enum zfpm_state state)
{
switch (state) {
@@ -343,7 +342,7 @@ static time_t zfpm_get_elapsed_time(time_t reference)
/*
* zfpm_rnodes_iter_init
*/
-static inline void zfpm_rnodes_iter_init(zfpm_rnodes_iter_t *iter)
+static inline void zfpm_rnodes_iter_init(struct zfpm_rnodes_iter *iter)
{
memset(iter, 0, sizeof(*iter));
rib_tables_iter_init(&iter->tables_iter);
@@ -360,7 +359,8 @@ static inline void zfpm_rnodes_iter_init(zfpm_rnodes_iter_t *iter)
/*
* zfpm_rnodes_iter_next
*/
-static inline struct route_node *zfpm_rnodes_iter_next(zfpm_rnodes_iter_t *iter)
+static inline struct route_node *
+zfpm_rnodes_iter_next(struct zfpm_rnodes_iter *iter)
{
struct route_node *rn;
struct route_table *table;
@@ -389,7 +389,7 @@ static inline struct route_node *zfpm_rnodes_iter_next(zfpm_rnodes_iter_t *iter)
/*
* zfpm_rnodes_iter_pause
*/
-static inline void zfpm_rnodes_iter_pause(zfpm_rnodes_iter_t *iter)
+static inline void zfpm_rnodes_iter_pause(struct zfpm_rnodes_iter *iter)
{
route_table_iter_pause(&iter->iter);
}
@@ -397,7 +397,7 @@ static inline void zfpm_rnodes_iter_pause(zfpm_rnodes_iter_t *iter)
/*
* zfpm_rnodes_iter_cleanup
*/
-static inline void zfpm_rnodes_iter_cleanup(zfpm_rnodes_iter_t *iter)
+static inline void zfpm_rnodes_iter_cleanup(struct zfpm_rnodes_iter *iter)
{
route_table_iter_cleanup(&iter->iter);
rib_tables_iter_cleanup(&iter->tables_iter);
@@ -408,7 +408,7 @@ static inline void zfpm_rnodes_iter_cleanup(zfpm_rnodes_iter_t *iter)
*
* Initialize a statistics block.
*/
-static inline void zfpm_stats_init(zfpm_stats_t *stats)
+static inline void zfpm_stats_init(struct zfpm_stats *stats)
{
memset(stats, 0, sizeof(*stats));
}
@@ -416,7 +416,7 @@ static inline void zfpm_stats_init(zfpm_stats_t *stats)
/*
* zfpm_stats_reset
*/
-static inline void zfpm_stats_reset(zfpm_stats_t *stats)
+static inline void zfpm_stats_reset(struct zfpm_stats *stats)
{
zfpm_stats_init(stats);
}
@@ -424,7 +424,8 @@ static inline void zfpm_stats_reset(zfpm_stats_t *stats)
/*
* zfpm_stats_copy
*/
-static inline void zfpm_stats_copy(const zfpm_stats_t *src, zfpm_stats_t *dest)
+static inline void zfpm_stats_copy(const struct zfpm_stats *src,
+ struct zfpm_stats *dest)
{
memcpy(dest, src, sizeof(*dest));
}
@@ -440,8 +441,9 @@ static inline void zfpm_stats_copy(const zfpm_stats_t *src, zfpm_stats_t *dest)
* structure is composed entirely of counters. This can easily be
* changed when necessary.
*/
-static void zfpm_stats_compose(const zfpm_stats_t *s1, const zfpm_stats_t *s2,
- zfpm_stats_t *result)
+static void zfpm_stats_compose(const struct zfpm_stats *s1,
+ const struct zfpm_stats *s2,
+ struct zfpm_stats *result)
{
const unsigned long *p1, *p2;
unsigned long *result_p;
@@ -451,7 +453,7 @@ static void zfpm_stats_compose(const zfpm_stats_t *s1, const zfpm_stats_t *s2,
p2 = (const unsigned long *)s2;
result_p = (unsigned long *)result;
- num_counters = (sizeof(zfpm_stats_t) / sizeof(unsigned long));
+ num_counters = (sizeof(struct zfpm_stats) / sizeof(unsigned long));
for (i = 0; i < num_counters; i++) {
result_p[i] = p1[i] + p2[i];
@@ -512,7 +514,7 @@ static inline void zfpm_connect_off(void)
static int zfpm_conn_up_thread_cb(struct thread *thread)
{
struct route_node *rnode;
- zfpm_rnodes_iter_t *iter;
+ struct zfpm_rnodes_iter *iter;
rib_dest_t *dest;
zfpm_g->t_conn_up = NULL;
@@ -626,7 +628,7 @@ static void zfpm_connect_check(void)
static int zfpm_conn_down_thread_cb(struct thread *thread)
{
struct route_node *rnode;
- zfpm_rnodes_iter_t *iter;
+ struct zfpm_rnodes_iter *iter;
rib_dest_t *dest;
struct fpm_mac_info_t *mac = NULL;
@@ -1308,9 +1310,9 @@ static int zfpm_connect_cb(struct thread *t)
*
* Move state machine into the given state.
*/
-static void zfpm_set_state(zfpm_state_t state, const char *reason)
+static void zfpm_set_state(enum zfpm_state state, const char *reason)
{
- zfpm_state_t cur_state = zfpm_g->state;
+ enum zfpm_state cur_state = zfpm_g->state;
if (!reason)
reason = "Unknown";
@@ -1649,7 +1651,7 @@ static void zfpm_iterate_rmac_table(struct hash_bucket *backet, void *args)
}
/*
- * zfpm_stats_timer_cb
+ * struct zfpm_statsimer_cb
*/
static int zfpm_stats_timer_cb(struct thread *t)
{
@@ -1714,7 +1716,7 @@ void zfpm_start_stats_timer(void)
*/
static void zfpm_show_stats(struct vty *vty)
{
- zfpm_stats_t total_stats;
+ struct zfpm_stats total_stats;
time_t elapsed;
vty_out(vty, "\n%-40s %10s Last %2d secs\n\n", "Counter", "Total",
diff --git a/zebra/zebra_fpm_netlink.c b/zebra/zebra_fpm_netlink.c
index 00909df1db..c580fe40d5 100644
--- a/zebra/zebra_fpm_netlink.c
+++ b/zebra/zebra_fpm_netlink.c
@@ -143,13 +143,13 @@ struct fpm_nh_encap_info_t {
};
/*
- * netlink_nh_info_t
+ * netlink_nh_info
*
* Holds information about a single nexthop for netlink. These info
* structures are transient and may contain pointers into rib
* data structures for convenience.
*/
-typedef struct netlink_nh_info_t_ {
+struct netlink_nh_info {
uint32_t if_index;
union g_addr *gateway;
@@ -160,14 +160,14 @@ typedef struct netlink_nh_info_t_ {
int recursive;
enum nexthop_types_t type;
struct fpm_nh_encap_info_t encap_info;
-} netlink_nh_info_t;
+};
/*
- * netlink_route_info_t
+ * netlink_route_info
*
* A structure for holding information for a netlink route message.
*/
-typedef struct netlink_route_info_t_ {
+struct netlink_route_info {
uint16_t nlmsg_type;
uint8_t rtm_type;
uint32_t rtm_table;
@@ -180,9 +180,9 @@ typedef struct netlink_route_info_t_ {
/*
* Nexthop structures
*/
- netlink_nh_info_t nhs[MULTIPATH_NUM];
+ struct netlink_nh_info nhs[MULTIPATH_NUM];
union g_addr *pref_src;
-} netlink_route_info_t;
+};
/*
* netlink_route_info_add_nh
@@ -192,11 +192,11 @@ typedef struct netlink_route_info_t_ {
*
* Returns true if a nexthop was added, false otherwise.
*/
-static int netlink_route_info_add_nh(netlink_route_info_t *ri,
+static int netlink_route_info_add_nh(struct netlink_route_info *ri,
struct nexthop *nexthop,
struct route_entry *re)
{
- netlink_nh_info_t nhi;
+ struct netlink_nh_info nhi;
union g_addr *src;
zebra_l3vni_t *zl3vni = NULL;
@@ -275,7 +275,7 @@ static uint8_t netlink_proto_from_route_type(int type)
*
* Returns true on success and false on failure.
*/
-static int netlink_route_info_fill(netlink_route_info_t *ri, int cmd,
+static int netlink_route_info_fill(struct netlink_route_info *ri, int cmd,
rib_dest_t *dest, struct route_entry *re)
{
struct nexthop *nexthop;
@@ -353,13 +353,13 @@ static int netlink_route_info_fill(netlink_route_info_t *ri, int cmd,
* Returns the number of bytes written to the buffer. 0 or a negative
* value indicates an error.
*/
-static int netlink_route_info_encode(netlink_route_info_t *ri, char *in_buf,
- size_t in_buf_len)
+static int netlink_route_info_encode(struct netlink_route_info *ri,
+ char *in_buf, size_t in_buf_len)
{
size_t bytelen;
unsigned int nexthop_num = 0;
size_t buf_offset;
- netlink_nh_info_t *nhi;
+ struct netlink_nh_info *nhi;
enum fpm_nh_encap_type_t encap;
struct rtattr *nest;
struct vxlan_encap_info_t *vxlan;
@@ -520,9 +520,10 @@ done:
*
* Helper function to log the information in a route_info structure.
*/
-static void zfpm_log_route_info(netlink_route_info_t *ri, const char *label)
+static void zfpm_log_route_info(struct netlink_route_info *ri,
+ const char *label)
{
- netlink_nh_info_t *nhi;
+ struct netlink_nh_info *nhi;
unsigned int i;
zfpm_debug("%s : %s %s/%d, Proto: %s, Metric: %u", label,
@@ -554,7 +555,7 @@ static void zfpm_log_route_info(netlink_route_info_t *ri, const char *label)
int zfpm_netlink_encode_route(int cmd, rib_dest_t *dest, struct route_entry *re,
char *in_buf, size_t in_buf_len)
{
- netlink_route_info_t ri_space, *ri;
+ struct netlink_route_info ri_space, *ri;
ri = &ri_space;
diff --git a/zebra/zebra_gr.c b/zebra/zebra_gr.c
index 19a280c0ca..a4f32dbf39 100644
--- a/zebra/zebra_gr.c
+++ b/zebra/zebra_gr.c
@@ -133,7 +133,7 @@ static void zebra_gr_client_info_delte(struct zserv *client,
zebra_route_string(client->proto));
/* Delete all the stale routes. */
- info->delete = true;
+ info->do_delete = true;
zebra_gr_delete_stale_routes(info);
XFREE(MTYPE_TMP, info);
@@ -456,7 +456,7 @@ static int32_t zebra_gr_route_stale_delete_timer_expiry(struct thread *thread)
/* Set the flag to indicate all stale route deletion */
if (thread->u.val == 1)
- info->delete = true;
+ info->do_delete = true;
cnt = zebra_gr_delete_stale_routes(info);
@@ -581,7 +581,7 @@ static int32_t zebra_gr_delete_stale_route(struct client_gr_info *info,
* Store the current prefix and afi
*/
if ((n >= ZEBRA_MAX_STALE_ROUTE_COUNT)
- && (info->delete == false)) {
+ && (info->do_delete == false)) {
info->current_afi = afi;
info->current_prefix = XCALLOC(
MTYPE_TMP,
diff --git a/zebra/zebra_nb.c b/zebra/zebra_nb.c
index 1f3468d6dc..25b8b44ec9 100644
--- a/zebra/zebra_nb.c
+++ b/zebra/zebra_nb.c
@@ -22,6 +22,40 @@
#include "libfrr.h"
#include "zebra_nb.h"
+const char *zebra_afi_safi_value2identity(afi_t afi, safi_t safi)
+{
+ if (afi == AFI_IP && safi == SAFI_UNICAST)
+ return "ipv4-unicast";
+ if (afi == AFI_IP6 && safi == SAFI_UNICAST)
+ return "ipv6-unicast";
+ if (afi == AFI_IP && safi == SAFI_MULTICAST)
+ return "ipv4-multicast";
+ if (afi == AFI_IP6 && safi == SAFI_MULTICAST)
+ return "ipv6-multicast";
+
+ return " ";
+}
+
+void zebra_afi_safi_identity2value(const char *key, afi_t *afi, safi_t *safi)
+{
+ if (strmatch(key, "frr-zebra:ipv4-unicast")) {
+ *afi = AFI_IP;
+ *safi = SAFI_UNICAST;
+ } else if (strmatch(key, "frr-zebra:ipv6-unicast")) {
+ *afi = AFI_IP6;
+ *safi = SAFI_UNICAST;
+ } else if (strmatch(key, "frr-zebra:ipv4-multicast")) {
+ *afi = AFI_IP;
+ *safi = SAFI_MULTICAST;
+ } else if (strmatch(key, "frr-zebra:ipv6-multicast")) {
+ *afi = AFI_IP6;
+ *safi = SAFI_MULTICAST;
+ } else {
+ *afi = AFI_UNSPEC;
+ *safi = SAFI_UNSPEC;
+ }
+}
+
/* clang-format off */
const struct frr_yang_module_info frr_zebra_info = {
.name = "frr-zebra",
@@ -420,221 +454,221 @@ const struct frr_yang_module_info frr_zebra_info = {
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib",
.cbs = {
- .create = lib_vrf_ribs_rib_create,
- .destroy = lib_vrf_ribs_rib_destroy,
- .get_next = lib_vrf_ribs_rib_get_next,
- .get_keys = lib_vrf_ribs_rib_get_keys,
- .lookup_entry = lib_vrf_ribs_rib_lookup_entry,
+ .create = lib_vrf_zebra_ribs_rib_create,
+ .destroy = lib_vrf_zebra_ribs_rib_destroy,
+ .get_next = lib_vrf_zebra_ribs_rib_get_next,
+ .get_keys = lib_vrf_zebra_ribs_rib_get_keys,
+ .lookup_entry = lib_vrf_zebra_ribs_rib_lookup_entry,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route",
.cbs = {
- .get_next = lib_vrf_ribs_rib_route_get_next,
- .get_keys = lib_vrf_ribs_rib_route_get_keys,
- .lookup_entry = lib_vrf_ribs_rib_route_lookup_entry,
+ .get_next = lib_vrf_zebra_ribs_rib_route_get_next,
+ .get_keys = lib_vrf_zebra_ribs_rib_route_get_keys,
+ .lookup_entry = lib_vrf_zebra_ribs_rib_route_lookup_entry,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/prefix",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/prefix",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_prefix_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_prefix_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry",
.cbs = {
- .get_next = lib_vrf_ribs_rib_route_route_entry_get_next,
- .get_keys = lib_vrf_ribs_rib_route_route_entry_get_keys,
- .lookup_entry = lib_vrf_ribs_rib_route_route_entry_lookup_entry,
+ .get_next = lib_vrf_zebra_ribs_rib_route_route_entry_get_next,
+ .get_keys = lib_vrf_zebra_ribs_rib_route_route_entry_get_keys,
+ .lookup_entry = lib_vrf_zebra_ribs_rib_route_route_entry_lookup_entry,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/protocol",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/protocol",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_protocol_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_protocol_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/instance",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/instance",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_instance_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_instance_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/distance",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/distance",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_distance_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_distance_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/metric",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/metric",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_metric_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_metric_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/tag",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/tag",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_tag_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_tag_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/selected",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/selected",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_selected_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_selected_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/installed",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/installed",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_installed_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_installed_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/failed",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/failed",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_failed_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_failed_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/queued",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/queued",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_queued_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_queued_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/internal-flags",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/internal-flags",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_internal_flags_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_internal_flags_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/internal-status",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/internal-status",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_internal_status_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_internal_status_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/uptime",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/uptime",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_uptime_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_uptime_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group",
.cbs = {
- .get_next = lib_vrf_ribs_rib_route_route_entry_nexthop_group_get_next,
- .get_keys = lib_vrf_ribs_rib_route_route_entry_nexthop_group_get_keys,
- .lookup_entry = lib_vrf_ribs_rib_route_route_entry_nexthop_group_lookup_entry,
+ .get_next = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_get_next,
+ .get_keys = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_get_keys,
+ .lookup_entry = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_lookup_entry,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/name",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/name",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_name_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_name_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop",
.cbs = {
- .get_next = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_next,
- .get_keys = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_keys,
- .lookup_entry = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_lookup_entry,
+ .get_next = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_next,
+ .get_keys = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_keys,
+ .lookup_entry = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_lookup_entry,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/nh-type",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/nh-type",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_nh_type_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_nh_type_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/vrf",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/vrf",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_vrf_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_vrf_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/gateway",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/gateway",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_gateway_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_gateway_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/interface",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/interface",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_interface_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_interface_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/bh-type",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/bh-type",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_bh_type_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_bh_type_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/onlink",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/onlink",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_onlink_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_onlink_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry",
.cbs = {
- .get_next = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_next,
- .get_keys = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_keys,
- .lookup_entry = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_lookup_entry,
+ .get_next = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_next,
+ .get_keys = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_keys,
+ .lookup_entry = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_lookup_entry,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/id",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/id",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_id_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_id_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/label",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/label",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_label_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_label_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/ttl",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/ttl",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/traffic-class",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/traffic-class",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/duplicate",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/duplicate",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_duplicate_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_duplicate_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/recursive",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/recursive",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_recursive_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_recursive_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/active",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/active",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_active_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_active_get_elem,
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/fib",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/fib",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_fib_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_fib_get_elem,
}
},
{
@@ -680,9 +714,9 @@ const struct frr_yang_module_info frr_zebra_info = {
}
},
{
- .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/weight",
+ .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/weight",
.cbs = {
- .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_weight_get_elem,
+ .get_elem = lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_weight_get_elem,
}
},
{
diff --git a/zebra/zebra_nb.h b/zebra/zebra_nb.h
index 09a9edff99..15350eb53b 100644
--- a/zebra/zebra_nb.h
+++ b/zebra/zebra_nb.h
@@ -26,6 +26,10 @@ extern "C" {
extern const struct frr_yang_module_info frr_zebra_info;
+/* helper functions */
+const char *zebra_afi_safi_value2identity(afi_t afi, safi_t safi);
+void zebra_afi_safi_identity2value(const char *key, afi_t *afi, safi_t *safi);
+
/* prototypes */
int get_route_information_rpc(struct nb_cb_rpc_args *args);
int get_v6_mroute_info_rpc(struct nb_cb_rpc_args *args);
@@ -155,165 +159,172 @@ struct yang_data *lib_interface_zebra_state_remote_vtep_get_elem(
struct nb_cb_get_elem_args *args);
struct yang_data *lib_interface_zebra_state_mcast_group_get_elem(
struct nb_cb_get_elem_args *args);
-int lib_vrf_ribs_rib_create(struct nb_cb_create_args *args);
-int lib_vrf_ribs_rib_destroy(struct nb_cb_destroy_args *args);
-const void *lib_vrf_ribs_rib_get_next(struct nb_cb_get_next_args *args);
-int lib_vrf_ribs_rib_get_keys(struct nb_cb_get_keys_args *args);
-const void *lib_vrf_ribs_rib_lookup_entry(struct nb_cb_lookup_entry_args *args);
-const void *lib_vrf_ribs_rib_route_get_next(struct nb_cb_get_next_args *args);
-int lib_vrf_ribs_rib_route_get_keys(struct nb_cb_get_keys_args *args);
+int lib_vrf_zebra_ribs_rib_create(struct nb_cb_create_args *args);
+int lib_vrf_zebra_ribs_rib_destroy(struct nb_cb_destroy_args *args);
+const void *lib_vrf_zebra_ribs_rib_get_next(struct nb_cb_get_next_args *args);
+int lib_vrf_zebra_ribs_rib_get_keys(struct nb_cb_get_keys_args *args);
const void *
-lib_vrf_ribs_rib_route_lookup_entry(struct nb_cb_lookup_entry_args *args);
-struct yang_data *
-lib_vrf_ribs_rib_route_prefix_get_elem(struct nb_cb_get_elem_args *args);
-struct yang_data *
-lib_vrf_ribs_rib_route_protocol_get_elem(struct nb_cb_get_elem_args *args);
-struct yang_data *
-lib_vrf_ribs_rib_route_protocol_v6_get_elem(struct nb_cb_get_elem_args *args);
-struct yang_data *
-lib_vrf_ribs_rib_route_vrf_get_elem(struct nb_cb_get_elem_args *args);
-struct yang_data *
-lib_vrf_ribs_rib_route_distance_get_elem(struct nb_cb_get_elem_args *args);
+lib_vrf_zebra_ribs_rib_lookup_entry(struct nb_cb_lookup_entry_args *args);
+const void *
+lib_vrf_zebra_ribs_rib_route_get_next(struct nb_cb_get_next_args *args);
+int lib_vrf_zebra_ribs_rib_route_get_keys(struct nb_cb_get_keys_args *args);
+const void *
+lib_vrf_zebra_ribs_rib_route_lookup_entry(struct nb_cb_lookup_entry_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_metric_get_elem(struct nb_cb_get_elem_args *args);
+lib_vrf_zebra_ribs_rib_route_prefix_get_elem(struct nb_cb_get_elem_args *args);
+struct yang_data *lib_vrf_zebra_ribs_rib_route_protocol_get_elem(
+ struct nb_cb_get_elem_args *args);
+struct yang_data *lib_vrf_zebra_ribs_rib_route_protocol_v6_get_elem(
+ struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_tag_get_elem(struct nb_cb_get_elem_args *args);
+lib_vrf_zebra_ribs_rib_route_vrf_get_elem(struct nb_cb_get_elem_args *args);
+struct yang_data *lib_vrf_zebra_ribs_rib_route_distance_get_elem(
+ struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_selected_get_elem(struct nb_cb_get_elem_args *args);
+lib_vrf_zebra_ribs_rib_route_metric_get_elem(struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_installed_get_elem(struct nb_cb_get_elem_args *args);
+lib_vrf_zebra_ribs_rib_route_tag_get_elem(struct nb_cb_get_elem_args *args);
+struct yang_data *lib_vrf_zebra_ribs_rib_route_selected_get_elem(
+ struct nb_cb_get_elem_args *args);
+struct yang_data *lib_vrf_zebra_ribs_rib_route_installed_get_elem(
+ struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_failed_get_elem(struct nb_cb_get_elem_args *args);
+lib_vrf_zebra_ribs_rib_route_failed_get_elem(struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_queued_get_elem(struct nb_cb_get_elem_args *args);
-struct yang_data *lib_vrf_ribs_rib_route_internal_flags_get_elem(
+lib_vrf_zebra_ribs_rib_route_queued_get_elem(struct nb_cb_get_elem_args *args);
+struct yang_data *lib_vrf_zebra_ribs_rib_route_internal_flags_get_elem(
struct nb_cb_get_elem_args *args);
-struct yang_data *lib_vrf_ribs_rib_route_internal_status_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_internal_status_get_elem(
struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_uptime_get_elem(struct nb_cb_get_elem_args *args);
-const void *
-lib_vrf_ribs_rib_route_nexthop_group_get_next(struct nb_cb_get_next_args *args);
-int lib_vrf_ribs_rib_route_nexthop_group_get_keys(
+lib_vrf_zebra_ribs_rib_route_uptime_get_elem(struct nb_cb_get_elem_args *args);
+const void *lib_vrf_zebra_ribs_rib_route_nexthop_group_get_next(
+ struct nb_cb_get_next_args *args);
+int lib_vrf_zebra_ribs_rib_route_nexthop_group_get_keys(
struct nb_cb_get_keys_args *args);
-const void *lib_vrf_ribs_rib_route_nexthop_group_lookup_entry(
+const void *lib_vrf_zebra_ribs_rib_route_nexthop_group_lookup_entry(
struct nb_cb_lookup_entry_args *args);
-struct yang_data *lib_vrf_ribs_rib_route_nexthop_group_name_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_nexthop_group_name_get_elem(
struct nb_cb_get_elem_args *args);
-const void *lib_vrf_ribs_rib_route_nexthop_group_frr_nexthops_nexthop_get_next(
+const void *
+lib_vrf_zebra_ribs_rib_route_nexthop_group_frr_nexthops_nexthop_get_next(
struct nb_cb_get_next_args *args);
-int lib_vrf_ribs_rib_route_nexthop_group_frr_nexthops_nexthop_get_keys(
+int lib_vrf_zebra_ribs_rib_route_nexthop_group_frr_nexthops_nexthop_get_keys(
struct nb_cb_get_keys_args *args);
-int lib_vrf_ribs_rib_create(struct nb_cb_create_args *args);
-int lib_vrf_ribs_rib_destroy(struct nb_cb_destroy_args *args);
-const void *lib_vrf_ribs_rib_get_next(struct nb_cb_get_next_args *args);
-int lib_vrf_ribs_rib_get_keys(struct nb_cb_get_keys_args *args);
-const void *lib_vrf_ribs_rib_lookup_entry(struct nb_cb_lookup_entry_args *args);
-const void *lib_vrf_ribs_rib_route_get_next(struct nb_cb_get_next_args *args);
-int lib_vrf_ribs_rib_route_get_keys(struct nb_cb_get_keys_args *args);
+int lib_vrf_zebra_ribs_rib_create(struct nb_cb_create_args *args);
+int lib_vrf_zebra_ribs_rib_destroy(struct nb_cb_destroy_args *args);
+const void *lib_vrf_zebra_ribs_rib_get_next(struct nb_cb_get_next_args *args);
+int lib_vrf_zebra_ribs_rib_get_keys(struct nb_cb_get_keys_args *args);
const void *
-lib_vrf_ribs_rib_route_lookup_entry(struct nb_cb_lookup_entry_args *args);
-struct yang_data *
-lib_vrf_ribs_rib_route_prefix_get_elem(struct nb_cb_get_elem_args *args);
+lib_vrf_zebra_ribs_rib_lookup_entry(struct nb_cb_lookup_entry_args *args);
const void *
-lib_vrf_ribs_rib_route_route_entry_get_next(struct nb_cb_get_next_args *args);
-int lib_vrf_ribs_rib_route_route_entry_get_keys(
+lib_vrf_zebra_ribs_rib_route_get_next(struct nb_cb_get_next_args *args);
+int lib_vrf_zebra_ribs_rib_route_get_keys(struct nb_cb_get_keys_args *args);
+const void *
+lib_vrf_zebra_ribs_rib_route_lookup_entry(struct nb_cb_lookup_entry_args *args);
+struct yang_data *
+lib_vrf_zebra_ribs_rib_route_prefix_get_elem(struct nb_cb_get_elem_args *args);
+const void *lib_vrf_zebra_ribs_rib_route_route_entry_get_next(
+ struct nb_cb_get_next_args *args);
+int lib_vrf_zebra_ribs_rib_route_route_entry_get_keys(
struct nb_cb_get_keys_args *args);
-const void *lib_vrf_ribs_rib_route_route_entry_lookup_entry(
+const void *lib_vrf_zebra_ribs_rib_route_route_entry_lookup_entry(
struct nb_cb_lookup_entry_args *args);
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_protocol_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_protocol_get_elem(
struct nb_cb_get_elem_args *args);
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_instance_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_instance_get_elem(
struct nb_cb_get_elem_args *args);
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_distance_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_distance_get_elem(
struct nb_cb_get_elem_args *args);
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_metric_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_metric_get_elem(
struct nb_cb_get_elem_args *args);
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_tag_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_tag_get_elem(
struct nb_cb_get_elem_args *args);
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_selected_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_selected_get_elem(
struct nb_cb_get_elem_args *args);
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_installed_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_installed_get_elem(
struct nb_cb_get_elem_args *args);
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_failed_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_failed_get_elem(
struct nb_cb_get_elem_args *args);
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_queued_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_queued_get_elem(
struct nb_cb_get_elem_args *args);
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_internal_flags_get_elem(
+struct yang_data *
+lib_vrf_zebra_ribs_rib_route_route_entry_internal_flags_get_elem(
struct nb_cb_get_elem_args *args);
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_internal_status_get_elem(
+struct yang_data *
+lib_vrf_zebra_ribs_rib_route_route_entry_internal_status_get_elem(
struct nb_cb_get_elem_args *args);
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_uptime_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_uptime_get_elem(
struct nb_cb_get_elem_args *args);
-const void *lib_vrf_ribs_rib_route_route_entry_nexthop_group_get_next(
+const void *lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_get_next(
struct nb_cb_get_next_args *args);
-int lib_vrf_ribs_rib_route_route_entry_nexthop_group_get_keys(
+int lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_get_keys(
struct nb_cb_get_keys_args *args);
-const void *lib_vrf_ribs_rib_route_route_entry_nexthop_group_lookup_entry(
+const void *lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_lookup_entry(
struct nb_cb_lookup_entry_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_name_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_name_get_elem(
struct nb_cb_get_elem_args *args);
const void *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_next(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_next(
struct nb_cb_get_next_args *args);
-int lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_keys(
+int lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_keys(
struct nb_cb_get_keys_args *args);
const void *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_lookup_entry(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_lookup_entry(
struct nb_cb_lookup_entry_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_nh_type_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_nh_type_get_elem(
struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_vrf_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_vrf_get_elem(
struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_gateway_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_gateway_get_elem(
struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_interface_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_interface_get_elem(
struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_bh_type_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_bh_type_get_elem(
struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_onlink_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_onlink_get_elem(
struct nb_cb_get_elem_args *args);
const void *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_next(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_next(
struct nb_cb_get_next_args *args);
-int lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_keys(
+int lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_keys(
struct nb_cb_get_keys_args *args);
const void *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_lookup_entry(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_lookup_entry(
struct nb_cb_lookup_entry_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_id_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_id_get_elem(
struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_label_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_label_get_elem(
struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_get_elem(
struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_get_elem(
struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_duplicate_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_duplicate_get_elem(
struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_recursive_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_recursive_get_elem(
struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_active_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_active_get_elem(
struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_fib_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_fib_get_elem(
struct nb_cb_get_elem_args *args);
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_weight_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_weight_get_elem(
struct nb_cb_get_elem_args *args);
#ifdef __cplusplus
diff --git a/zebra/zebra_nb_config.c b/zebra/zebra_nb_config.c
index dbe265da8c..5b87a18a06 100644
--- a/zebra/zebra_nb_config.c
+++ b/zebra/zebra_nb_config.c
@@ -29,6 +29,8 @@
#include "zebra_nb.h"
#include "zebra/interface.h"
#include "zebra/connected.h"
+#include "zebra/zebra_router.h"
+#include "zebra/debug.h"
/*
* XPath: /frr-zebra:zebra/mcast-rpf-lookup
@@ -1216,32 +1218,56 @@ int lib_interface_zebra_bandwidth_destroy(struct nb_cb_destroy_args *args)
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib
+ * XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib
*/
-int lib_vrf_ribs_rib_create(struct nb_cb_create_args *args)
+int lib_vrf_zebra_ribs_rib_create(struct nb_cb_create_args *args)
{
+ struct vrf *vrf;
+ afi_t afi;
+ safi_t safi;
+ struct zebra_vrf *zvrf;
+ struct zebra_router_table *zrt;
+ uint32_t table_id;
+ const char *afi_safi_name;
+
+ vrf = nb_running_get_entry(args->dnode, NULL, false);
+ zvrf = vrf_info_lookup(vrf->vrf_id);
+ table_id = yang_dnode_get_uint32(args->dnode, "./table-id");
+ if (!table_id)
+ table_id = zvrf->table_id;
+
+ afi_safi_name = yang_dnode_get_string(args->dnode, "./afi-safi-name");
+ zebra_afi_safi_identity2value(afi_safi_name, &afi, &safi);
+
+ zrt = zebra_router_find_zrt(zvrf, table_id, afi, safi);
+
switch (args->event) {
case NB_EV_VALIDATE:
+ if (!zrt) {
+ zlog_debug("%s: vrf %s table is not found.", __func__,
+ vrf->name);
+ return NB_ERR_VALIDATION;
+ }
+ break;
case NB_EV_PREPARE:
case NB_EV_ABORT:
+ break;
case NB_EV_APPLY:
- /* TODO: implement me. */
+
+ nb_running_set_entry(args->dnode, zrt);
+
break;
}
return NB_OK;
}
-int lib_vrf_ribs_rib_destroy(struct nb_cb_destroy_args *args)
+int lib_vrf_zebra_ribs_rib_destroy(struct nb_cb_destroy_args *args)
{
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
- break;
- }
+ if (args->event != NB_EV_APPLY)
+ return NB_OK;
+
+ nb_running_unset_entry(args->dnode);
return NB_OK;
}
diff --git a/zebra/zebra_nb_state.c b/zebra/zebra_nb_state.c
index 09c76e6022..44a2d172f1 100644
--- a/zebra/zebra_nb_state.c
+++ b/zebra/zebra_nb_state.c
@@ -22,6 +22,9 @@
#include "libfrr.h"
#include "zebra_nb.h"
#include "zebra/interface.h"
+#include "zebra/zebra_router.h"
+#include "zebra/debug.h"
+#include "printfrr.h"
/*
* XPath: /frr-interface:lib/interface/frr-zebra:zebra/state/up-count
@@ -147,218 +150,366 @@ lib_interface_zebra_state_mcast_group_get_elem(struct nb_cb_get_elem_args *args)
return yang_data_new_ipv4(args->xpath, &vxlan_info->mcast_grp);
}
-const void *lib_vrf_ribs_rib_get_next(struct nb_cb_get_next_args *args)
+const void *lib_vrf_zebra_ribs_rib_get_next(struct nb_cb_get_next_args *args)
{
- /* TODO: implement me. */
- return NULL;
+ struct vrf *vrf = (struct vrf *)args->parent_list_entry;
+ struct zebra_router_table *zrt =
+ (struct zebra_router_table *)args->list_entry;
+
+ struct zebra_vrf *zvrf;
+ afi_t afi;
+ safi_t safi;
+
+ zvrf = zebra_vrf_lookup_by_id(vrf->vrf_id);
+
+ if (args->list_entry == NULL) {
+ afi = AFI_IP;
+ safi = SAFI_UNICAST;
+
+ zrt = zebra_router_find_zrt(zvrf, zvrf->table_id, afi, safi);
+ if (zrt == NULL)
+ return NULL;
+ } else {
+ zrt = RB_NEXT(zebra_router_table_head, zrt);
+ /* vrf_id/ns_id do not match, only walk for the given VRF */
+ while (zrt && zrt->ns_id != zvrf->zns->ns_id)
+ zrt = RB_NEXT(zebra_router_table_head, zrt);
+ }
+
+ return zrt;
}
-int lib_vrf_ribs_rib_get_keys(struct nb_cb_get_keys_args *args)
+int lib_vrf_zebra_ribs_rib_get_keys(struct nb_cb_get_keys_args *args)
{
- /* TODO: implement me. */
+ const struct zebra_router_table *zrt = args->list_entry;
+
+ args->keys->num = 2;
+
+ snprintfrr(args->keys->key[0], sizeof(args->keys->key[0]), "%s:%s",
+ "frr-zebra",
+ zebra_afi_safi_value2identity(zrt->afi, zrt->safi));
+ snprintfrr(args->keys->key[1], sizeof(args->keys->key[1]), "%" PRIu32,
+ zrt->tableid);
+
return NB_OK;
}
-const void *lib_vrf_ribs_rib_lookup_entry(struct nb_cb_lookup_entry_args *args)
+const void *
+lib_vrf_zebra_ribs_rib_lookup_entry(struct nb_cb_lookup_entry_args *args)
{
- /* TODO: implement me. */
- return NULL;
+ struct vrf *vrf = (struct vrf *)args->parent_list_entry;
+ struct zebra_vrf *zvrf;
+ afi_t afi;
+ safi_t safi;
+ uint32_t table_id = 0;
+
+ zvrf = zebra_vrf_lookup_by_id(vrf->vrf_id);
+
+ zebra_afi_safi_identity2value(args->keys->key[0], &afi, &safi);
+ table_id = yang_str2uint32(args->keys->key[1]);
+ /* table_id 0 assume vrf's table_id. */
+ if (!table_id)
+ table_id = zvrf->table_id;
+
+ return zebra_router_find_zrt(zvrf, table_id, afi, safi);
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route
+ * XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route
*/
-const void *lib_vrf_ribs_rib_route_get_next(struct nb_cb_get_next_args *args)
+const void *
+lib_vrf_zebra_ribs_rib_route_get_next(struct nb_cb_get_next_args *args)
{
- /* TODO: implement me. */
- return NULL;
+ const struct zebra_router_table *zrt = args->parent_list_entry;
+ const struct route_node *rn = args->list_entry;
+
+ if (args->list_entry == NULL)
+ rn = route_top(zrt->table);
+ else
+ rn = srcdest_route_next((struct route_node *)rn);
+
+ return rn;
}
-int lib_vrf_ribs_rib_route_get_keys(struct nb_cb_get_keys_args *args)
+int lib_vrf_zebra_ribs_rib_route_get_keys(struct nb_cb_get_keys_args *args)
{
- /* TODO: implement me. */
+ const struct route_node *rn = args->list_entry;
+
+ args->keys->num = 1;
+ prefix2str(&rn->p, args->keys->key[0], sizeof(args->keys->key[0]));
+
return NB_OK;
}
const void *
-lib_vrf_ribs_rib_route_lookup_entry(struct nb_cb_lookup_entry_args *args)
+lib_vrf_zebra_ribs_rib_route_lookup_entry(struct nb_cb_lookup_entry_args *args)
{
- /* TODO: implement me. */
- return NULL;
+ const struct zebra_router_table *zrt = args->parent_list_entry;
+ struct prefix p;
+ struct route_node *rn;
+
+ yang_str2prefix(args->keys->key[0], &p);
+
+ rn = route_node_lookup(zrt->table, &p);
+
+ if (!rn)
+ return NULL;
+
+ route_unlock_node(rn);
+
+ return rn;
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/prefix
+ * XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/prefix
*/
struct yang_data *
-lib_vrf_ribs_rib_route_prefix_get_elem(struct nb_cb_get_elem_args *args)
+lib_vrf_zebra_ribs_rib_route_prefix_get_elem(struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
- return NULL;
+ const struct route_node *rn = args->list_entry;
+
+ return yang_data_new_prefix(args->xpath, &rn->p);
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry
+ * XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry
*/
-const void *
-lib_vrf_ribs_rib_route_route_entry_get_next(struct nb_cb_get_next_args *args)
+const void *lib_vrf_zebra_ribs_rib_route_route_entry_get_next(
+ struct nb_cb_get_next_args *args)
{
- /* TODO: implement me. */
- return NULL;
+ struct route_entry *re = (struct route_entry *)args->list_entry;
+ struct route_node *rn = (struct route_node *)args->parent_list_entry;
+
+ if (args->list_entry == NULL)
+ RNODE_FIRST_RE(rn, re);
+ else
+ RNODE_NEXT_RE(rn, re);
+
+ return re;
}
-int lib_vrf_ribs_rib_route_route_entry_get_keys(
+int lib_vrf_zebra_ribs_rib_route_route_entry_get_keys(
struct nb_cb_get_keys_args *args)
{
- /* TODO: implement me. */
+ struct route_entry *re = (struct route_entry *)args->list_entry;
+
+ args->keys->num = 1;
+
+ strlcpy(args->keys->key[0], zebra_route_string(re->type),
+ sizeof(args->keys->key[0]));
+
return NB_OK;
}
-const void *lib_vrf_ribs_rib_route_route_entry_lookup_entry(
+const void *lib_vrf_zebra_ribs_rib_route_route_entry_lookup_entry(
struct nb_cb_lookup_entry_args *args)
{
- /* TODO: implement me. */
+ struct route_node *rn = (struct route_node *)args->parent_list_entry;
+ struct route_entry *re = NULL;
+ int proto_type = 0;
+ afi_t afi;
+
+ afi = family2afi(rn->p.family);
+ proto_type = proto_redistnum(afi, args->keys->key[0]);
+
+ RNODE_FOREACH_RE (rn, re) {
+ if (proto_type == re->type)
+ return re;
+ }
+
return NULL;
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/protocol
+ * XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/protocol
*/
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_protocol_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_protocol_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
- return NULL;
+ struct route_entry *re = (struct route_entry *)args->list_entry;
+
+ return yang_data_new_enum(args->xpath, re->type);
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/instance
+ * XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/instance
*/
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_instance_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_instance_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
+ struct route_entry *re = (struct route_entry *)args->list_entry;
+
+ if (re->instance)
+ return yang_data_new_uint16(args->xpath, re->instance);
+
return NULL;
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/distance
+ * XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/distance
*/
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_distance_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_distance_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
- return NULL;
+ struct route_entry *re = (struct route_entry *)args->list_entry;
+
+ return yang_data_new_uint8(args->xpath, re->distance);
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/metric
+ * XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/metric
*/
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_metric_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_metric_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
- return NULL;
+ struct route_entry *re = (struct route_entry *)args->list_entry;
+
+ return yang_data_new_uint32(args->xpath, re->metric);
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/tag
+ * XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/tag
*/
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_tag_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_tag_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
+ struct route_entry *re = (struct route_entry *)args->list_entry;
+
+ if (re->tag)
+ return yang_data_new_uint32(args->xpath, re->tag);
+
return NULL;
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/selected
+ * XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/selected
*/
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_selected_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_selected_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
+ struct route_entry *re = (struct route_entry *)args->list_entry;
+
+ if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED))
+ return yang_data_new_empty(args->xpath);
+
return NULL;
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/installed
+ * XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/installed
*/
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_installed_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_installed_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
+ struct route_entry *re = (struct route_entry *)args->list_entry;
+
+ if (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED))
+ return yang_data_new_empty(args->xpath);
+
return NULL;
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/failed
+ * XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/failed
*/
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_failed_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_failed_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
+ struct route_entry *re = (struct route_entry *)args->list_entry;
+
+ if (CHECK_FLAG(re->status, ROUTE_ENTRY_FAILED))
+ return yang_data_new_empty(args->xpath);
+
return NULL;
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/queued
+ * XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/queued
*/
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_queued_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_queued_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
+ struct route_entry *re = (struct route_entry *)args->list_entry;
+
+ if (CHECK_FLAG(re->status, ROUTE_ENTRY_QUEUED))
+ return yang_data_new_empty(args->xpath);
+
return NULL;
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/internal-flags
+ * XPath:
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/internal-flags
*/
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_internal_flags_get_elem(
+struct yang_data *
+lib_vrf_zebra_ribs_rib_route_route_entry_internal_flags_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
+ struct route_entry *re = (struct route_entry *)args->list_entry;
+
+ if (re->flags)
+ return yang_data_new_int32(args->xpath, re->flags);
+
return NULL;
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/internal-status
+ * XPath:
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/internal-status
*/
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_internal_status_get_elem(
+struct yang_data *
+lib_vrf_zebra_ribs_rib_route_route_entry_internal_status_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
+ struct route_entry *re = (struct route_entry *)args->list_entry;
+
+ if (re->status)
+ return yang_data_new_int32(args->xpath, re->status);
+
return NULL;
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/uptime
+ * XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/uptime
*/
-struct yang_data *lib_vrf_ribs_rib_route_route_entry_uptime_get_elem(
+struct yang_data *lib_vrf_zebra_ribs_rib_route_route_entry_uptime_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
- return NULL;
+ struct route_entry *re = (struct route_entry *)args->list_entry;
+
+ return yang_data_new_date_and_time(args->xpath, re->uptime);
}
/*
- * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group
+ * XPath:
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group
*/
-const void *lib_vrf_ribs_rib_route_route_entry_nexthop_group_get_next(
+const void *lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_get_next(
struct nb_cb_get_next_args *args)
{
- /* TODO: implement me. */
+ struct route_entry *re = (struct route_entry *)args->parent_list_entry;
+ struct nhg_hash_entry *nhe = (struct nhg_hash_entry *)args->list_entry;
+
+ if (nhe == NULL) {
+ nhe = re->nhe;
+ return nhe;
+ }
return NULL;
}
-int lib_vrf_ribs_rib_route_route_entry_nexthop_group_get_keys(
+int lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_get_keys(
struct nb_cb_get_keys_args *args)
{
- /* TODO: implement me. */
+ struct nhg_hash_entry *nhe = (struct nhg_hash_entry *)args->list_entry;
+
+ args->keys->num = 1;
+ snprintfrr(args->keys->key[0], sizeof(args->keys->key[0]), "%" PRIu32,
+ nhe->id);
+
return NB_OK;
}
-const void *lib_vrf_ribs_rib_route_route_entry_nexthop_group_lookup_entry(
+const void *lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_lookup_entry(
struct nb_cb_lookup_entry_args *args)
{
/* TODO: implement me. */
@@ -367,37 +518,103 @@ const void *lib_vrf_ribs_rib_route_route_entry_nexthop_group_lookup_entry(
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/name
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/name
*/
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_name_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_name_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
- return NULL;
+ struct nhg_hash_entry *nhe = (struct nhg_hash_entry *)args->list_entry;
+ char name[20] = {'\0'};
+
+ snprintfrr(name, sizeof(name), "%" PRIu32, nhe->id);
+
+ return yang_data_new_string(args->xpath, name);
}
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop
*/
const void *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_next(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_next(
struct nb_cb_get_next_args *args)
{
- /* TODO: implement me. */
- return NULL;
+ struct nexthop *nexthop = (struct nexthop *)args->list_entry;
+ struct nhg_hash_entry *nhe =
+ (struct nhg_hash_entry *)args->parent_list_entry;
+
+ if (args->list_entry == NULL)
+ nexthop = nhe->nhg.nexthop;
+ else
+ nexthop = nexthop_next(nexthop);
+
+ return nexthop;
}
-int lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_keys(
+int lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_keys(
struct nb_cb_get_keys_args *args)
{
- /* TODO: implement me. */
+ struct nexthop *nexthop = (struct nexthop *)args->list_entry;
+
+ args->keys->num = 3;
+
+ strlcpy(args->keys->key[0], yang_nexthop_type2str(nexthop->type),
+ sizeof(args->keys->key[0]));
+
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ snprintfrr(args->keys->key[1], sizeof(args->keys->key[1]),
+ "%pI4", &nexthop->gate.ipv4);
+ if (nexthop->ifindex)
+ strlcpy(args->keys->key[2],
+ ifindex2ifname(nexthop->ifindex,
+ nexthop->vrf_id),
+ sizeof(args->keys->key[2]));
+ else
+ /* no ifindex */
+ strlcpy(args->keys->key[2], " ",
+ sizeof(args->keys->key[2]));
+
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ snprintfrr(args->keys->key[1], sizeof(args->keys->key[1]),
+ "%pI6", &nexthop->gate.ipv6);
+
+ if (nexthop->ifindex)
+ strlcpy(args->keys->key[2],
+ ifindex2ifname(nexthop->ifindex,
+ nexthop->vrf_id),
+ sizeof(args->keys->key[2]));
+ else
+ /* no ifindex */
+ strlcpy(args->keys->key[2], " ",
+ sizeof(args->keys->key[2]));
+
+ break;
+ case NEXTHOP_TYPE_IFINDEX:
+ strlcpy(args->keys->key[1], "", sizeof(args->keys->key[1]));
+ strlcpy(args->keys->key[2],
+ ifindex2ifname(nexthop->ifindex, nexthop->vrf_id),
+ sizeof(args->keys->key[2]));
+
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ /* Gateway IP */
+ strlcpy(args->keys->key[1], "", sizeof(args->keys->key[1]));
+ strlcpy(args->keys->key[2], " ", sizeof(args->keys->key[2]));
+ break;
+ default:
+ break;
+ }
+
return NB_OK;
}
const void *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_lookup_entry(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_lookup_entry(
struct nb_cb_lookup_entry_args *args)
{
/* TODO: implement me. */
@@ -406,89 +623,166 @@ lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_lookup_ent
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/nh-type
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/nh-type
*/
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_nh_type_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_nh_type_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
+ struct nexthop *nexthop = (struct nexthop *)args->list_entry;
+
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IFINDEX:
+ return yang_data_new_string(args->xpath, "ifindex");
+ break;
+ case NEXTHOP_TYPE_IPV4:
+ return yang_data_new_string(args->xpath, "ip4");
+ break;
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ return yang_data_new_string(args->xpath, "ip4-ifindex");
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ return yang_data_new_string(args->xpath, "ip6");
+ break;
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ return yang_data_new_string(args->xpath, "ip6-ifindex");
+ break;
+ default:
+ break;
+ }
+
return NULL;
}
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/vrf
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/vrf
*/
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_vrf_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_vrf_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
- return NULL;
+ struct nexthop *nexthop = (struct nexthop *)args->list_entry;
+
+ return yang_data_new_string(args->xpath,
+ vrf_id_to_name(nexthop->vrf_id));
}
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/gateway
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/gateway
*/
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_gateway_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_gateway_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
- return NULL;
+ struct nexthop *nexthop = (struct nexthop *)args->list_entry;
+ struct ipaddr addr;
+
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ addr.ipa_type = IPADDR_V4;
+ memcpy(&addr.ipaddr_v4, &(nexthop->gate.ipv4),
+ sizeof(struct in_addr));
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ addr.ipa_type = IPADDR_V6;
+ memcpy(&addr.ipaddr_v6, &(nexthop->gate.ipv6),
+ sizeof(struct in6_addr));
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ case NEXTHOP_TYPE_IFINDEX:
+ /* No addr here */
+ return yang_data_new_string(args->xpath, "");
+ break;
+ default:
+ break;
+ }
+
+ return yang_data_new_ip(args->xpath, &addr);
}
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/interface
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/interface
*/
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_interface_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_interface_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
+ struct nexthop *nexthop = (struct nexthop *)args->list_entry;
+
+ if (nexthop->ifindex)
+ yang_data_new_string(
+ args->xpath,
+ ifindex2ifname(nexthop->ifindex, nexthop->vrf_id));
+
return NULL;
}
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/bh-type
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/bh-type
*/
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_bh_type_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_bh_type_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
- return NULL;
+ struct nexthop *nexthop = (struct nexthop *)args->list_entry;
+ const char *type_str = "";
+
+ if (nexthop->type != NEXTHOP_TYPE_BLACKHOLE)
+ return NULL;
+
+ switch (nexthop->bh_type) {
+ case BLACKHOLE_NULL:
+ type_str = "null";
+ break;
+ case BLACKHOLE_REJECT:
+ type_str = "reject";
+ break;
+ case BLACKHOLE_ADMINPROHIB:
+ type_str = "prohibited";
+ break;
+ case BLACKHOLE_UNSPEC:
+ type_str = "unspec";
+ break;
+ }
+
+ return yang_data_new_string(args->xpath, type_str);
}
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/onlink
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/onlink
*/
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_onlink_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_onlink_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
+ struct nexthop *nexthop = (struct nexthop *)args->list_entry;
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
+ return yang_data_new_bool(args->xpath, true);
+
return NULL;
}
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry
*/
const void *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_next(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_next(
struct nb_cb_get_next_args *args)
{
/* TODO: implement me. */
return NULL;
}
-int lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_keys(
+int lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_keys(
struct nb_cb_get_keys_args *args)
{
/* TODO: implement me. */
@@ -496,7 +790,7 @@ int lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_l
}
const void *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_lookup_entry(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_lookup_entry(
struct nb_cb_lookup_entry_args *args)
{
/* TODO: implement me. */
@@ -505,10 +799,10 @@ lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/id
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/id
*/
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_id_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_id_get_elem(
struct nb_cb_get_elem_args *args)
{
/* TODO: implement me. */
@@ -517,10 +811,10 @@ lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/label
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/label
*/
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_label_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_label_get_elem(
struct nb_cb_get_elem_args *args)
{
/* TODO: implement me. */
@@ -529,10 +823,10 @@ lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/ttl
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/ttl
*/
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_get_elem(
struct nb_cb_get_elem_args *args)
{
/* TODO: implement me. */
@@ -541,10 +835,10 @@ lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/traffic-class
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/traffic-class
*/
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_get_elem(
struct nb_cb_get_elem_args *args)
{
/* TODO: implement me. */
@@ -553,60 +847,80 @@ lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/duplicate
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/duplicate
*/
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_duplicate_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_duplicate_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
+ struct nexthop *nexthop = (struct nexthop *)args->list_entry;
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE))
+ return yang_data_new_empty(args->xpath);
+
return NULL;
}
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/recursive
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/recursive
*/
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_recursive_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_recursive_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
+ struct nexthop *nexthop = (struct nexthop *)args->list_entry;
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
+ return yang_data_new_empty(args->xpath);
+
return NULL;
}
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/active
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/active
*/
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_active_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_active_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
+ struct nexthop *nexthop = (struct nexthop *)args->list_entry;
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
+ return yang_data_new_empty(args->xpath);
+
return NULL;
}
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/fib
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/fib
*/
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_fib_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_fib_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
+ struct nexthop *nexthop = (struct nexthop *)args->list_entry;
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB))
+ return yang_data_new_empty(args->xpath);
+
return NULL;
}
/*
* XPath:
- * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/weight
+ * /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/weight
*/
struct yang_data *
-lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_weight_get_elem(
+lib_vrf_zebra_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_weight_get_elem(
struct nb_cb_get_elem_args *args)
{
- /* TODO: implement me. */
+ struct nexthop *nexthop = (struct nexthop *)args->list_entry;
+
+ if (nexthop->weight)
+ return yang_data_new_uint8(args->xpath, nexthop->weight);
+
return NULL;
}
diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c
index de044c0ea0..f24552c80b 100644
--- a/zebra/zebra_nhg.c
+++ b/zebra/zebra_nhg.c
@@ -1793,23 +1793,16 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
nexthop->vrf_id);
return 0;
}
- if (connected_is_unnumbered(ifp)) {
- if (if_is_operative(ifp))
- return 1;
+ if (if_is_operative(ifp))
+ return 1;
+ else {
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug(
" %s: Onlink and interface %s is not operative",
__func__, ifp->name);
return 0;
}
- if (!if_is_operative(ifp)) {
- if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug(
- " %s: Interface %s is not unnumbered",
- __func__, ifp->name);
- return 0;
- }
}
if ((top->p.family == AF_INET && top->p.prefixlen == 32
@@ -2099,7 +2092,7 @@ static unsigned nexthop_active_check(struct route_node *rn,
* in every case.
*/
if (!family) {
- rib_table_info_t *info;
+ struct rib_table_info *info;
info = srcdest_rnode_table_info(rn);
family = info->afi;
diff --git a/zebra/zebra_pw.c b/zebra/zebra_pw.c
index c26b7a6157..273843baa2 100644
--- a/zebra/zebra_pw.c
+++ b/zebra/zebra_pw.c
@@ -24,6 +24,8 @@
#include "thread.h"
#include "command.h"
#include "vrf.h"
+#include "lib/json.h"
+#include "printfrr.h"
#include "zebra/debug.h"
#include "zebra/rib.h"
@@ -506,6 +508,155 @@ DEFUN (show_pseudowires,
return CMD_SUCCESS;
}
+static void vty_show_mpls_pseudowire_detail(struct vty *vty)
+{
+ struct zebra_vrf *zvrf;
+ struct zebra_pw *pw;
+ struct route_entry *re;
+ struct nexthop *nexthop;
+
+ zvrf = vrf_info_lookup(VRF_DEFAULT);
+ if (!zvrf)
+ return;
+
+ RB_FOREACH (pw, zebra_pw_head, &zvrf->pseudowires) {
+ char buf_nbr[INET6_ADDRSTRLEN];
+ char buf_nh[100];
+
+ vty_out(vty, "Interface: %s\n", pw->ifname);
+ inet_ntop(pw->af, &pw->nexthop, buf_nbr, sizeof(buf_nbr));
+ vty_out(vty, " Neighbor: %s\n",
+ (pw->af != AF_UNSPEC) ? buf_nbr : "-");
+ if (pw->local_label != MPLS_NO_LABEL)
+ vty_out(vty, " Local Label: %u\n", pw->local_label);
+ else
+ vty_out(vty, " Local Label: %s\n", "-");
+ if (pw->remote_label != MPLS_NO_LABEL)
+ vty_out(vty, " Remote Label: %u\n", pw->remote_label);
+ else
+ vty_out(vty, " Remote Label: %s\n", "-");
+ vty_out(vty, " Protocol: %s\n",
+ zebra_route_string(pw->protocol));
+ if (pw->protocol == ZEBRA_ROUTE_LDP)
+ vty_out(vty, " VC-ID: %u\n", pw->data.ldp.pwid);
+ vty_out(vty, " Status: %s \n",
+ (zebra_pw_enabled(pw) && pw->status == PW_STATUS_UP)
+ ? "Up"
+ : "Down");
+ re = rib_match(family2afi(pw->af), SAFI_UNICAST, pw->vrf_id,
+ &pw->nexthop, NULL);
+ if (re) {
+ for (ALL_NEXTHOPS_PTR(rib_active_nhg(re), nexthop)) {
+ snprintfrr(buf_nh, sizeof(buf_nh), "%pNHv",
+ nexthop);
+ vty_out(vty, " Next Hop: %s\n", buf_nh);
+ if (nexthop->nh_label)
+ vty_out(vty, " Next Hop label: %u\n",
+ nexthop->nh_label->label[0]);
+ else
+ vty_out(vty, " Next Hop label: %s\n",
+ "-");
+ }
+ }
+ }
+}
+
+static void vty_show_mpls_pseudowire(struct zebra_pw *pw, json_object *json_pws)
+{
+ struct route_entry *re;
+ struct nexthop *nexthop;
+ char buf_nbr[INET6_ADDRSTRLEN];
+ char buf_nh[100];
+ json_object *json_pw = NULL;
+ json_object *json_nexthop = NULL;
+ json_object *json_nexthops = NULL;
+
+ json_nexthops = json_object_new_array();
+ json_pw = json_object_new_object();
+
+ json_object_string_add(json_pw, "interface", pw->ifname);
+ if (pw->af == AF_UNSPEC)
+ json_object_string_add(json_pw, "neighbor", "-");
+ else {
+ inet_ntop(pw->af, &pw->nexthop, buf_nbr, sizeof(buf_nbr));
+ json_object_string_add(json_pw, "neighbor", buf_nbr);
+ }
+ if (pw->local_label != MPLS_NO_LABEL)
+ json_object_int_add(json_pw, "localLabel", pw->local_label);
+ else
+ json_object_string_add(json_pw, "localLabel", "-");
+ if (pw->remote_label != MPLS_NO_LABEL)
+ json_object_int_add(json_pw, "remoteLabel", pw->remote_label);
+ else
+ json_object_string_add(json_pw, "remoteLabel", "-");
+ json_object_string_add(json_pw, "protocol",
+ zebra_route_string(pw->protocol));
+ if (pw->protocol == ZEBRA_ROUTE_LDP)
+ json_object_int_add(json_pw, "vcId", pw->data.ldp.pwid);
+ json_object_string_add(
+ json_pw, "Status",
+ (zebra_pw_enabled(pw) && pw->status == PW_STATUS_UP) ? "Up"
+ : "Down");
+ re = rib_match(family2afi(pw->af), SAFI_UNICAST, pw->vrf_id,
+ &pw->nexthop, NULL);
+ if (re) {
+ for (ALL_NEXTHOPS_PTR(rib_active_nhg(re), nexthop)) {
+ json_nexthop = json_object_new_object();
+ snprintfrr(buf_nh, sizeof(buf_nh), "%pNHv", nexthop);
+ json_object_string_add(json_nexthop, "nexthop", buf_nh);
+ if (nexthop->nh_label)
+ json_object_int_add(
+ json_nexthop, "nhLabel",
+ nexthop->nh_label->label[0]);
+ else
+ json_object_string_add(json_nexthop, "nhLabel",
+ "-");
+
+ json_object_array_add(json_nexthops, json_nexthop);
+ }
+ json_object_object_add(json_pw, "nexthops", json_nexthops);
+ }
+ json_object_array_add(json_pws, json_pw);
+}
+
+static void vty_show_mpls_pseudowire_detail_json(struct vty *vty)
+{
+ json_object *json = NULL;
+ json_object *json_pws = NULL;
+ struct zebra_vrf *zvrf;
+ struct zebra_pw *pw;
+
+ zvrf = vrf_info_lookup(VRF_DEFAULT);
+ if (!zvrf)
+ return;
+
+ json = json_object_new_object();
+ json_pws = json_object_new_array();
+ RB_FOREACH (pw, zebra_pw_head, &zvrf->pseudowires) {
+ vty_show_mpls_pseudowire(pw, json_pws);
+ }
+ json_object_object_add(json, "pw", json_pws);
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+}
+
+DEFUN(show_pseudowires_detail, show_pseudowires_detail_cmd,
+ "show mpls pseudowires detail [json]$json",
+ SHOW_STR MPLS_STR
+ "Pseudowires\n"
+ "Detailed output\n" JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+
+ if (uj)
+ vty_show_mpls_pseudowire_detail_json(vty);
+ else
+ vty_show_mpls_pseudowire_detail(vty);
+
+ return CMD_SUCCESS;
+}
+
/* Pseudowire configuration write function. */
static int zebra_pw_config(struct vty *vty)
{
@@ -568,4 +719,5 @@ void zebra_pw_vty_init(void)
install_element(PW_NODE, &pseudowire_control_word_cmd);
install_element(VIEW_NODE, &show_pseudowires_cmd);
+ install_element(VIEW_NODE, &show_pseudowires_detail_cmd);
}
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index f89656201c..d491982d62 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -125,7 +125,7 @@ _rnode_zlog(const char *_func, vrf_id_t vrf_id, struct route_node *rn,
va_end(ap);
if (rn) {
- rib_table_info_t *info = srcdest_rnode_table_info(rn);
+ struct rib_table_info *info = srcdest_rnode_table_info(rn);
srcdest_rnode2str(rn, buf, sizeof(buf));
if (info->safi == SAFI_MULTICAST)
@@ -211,28 +211,29 @@ static void route_entry_attach_ref(struct route_entry *re,
zebra_nhg_increment_ref(new);
}
-int route_entry_update_nhe(struct route_entry *re, struct nhg_hash_entry *new)
+int route_entry_update_nhe(struct route_entry *re,
+ struct nhg_hash_entry *new_nhghe)
{
struct nhg_hash_entry *old;
int ret = 0;
- if (new == NULL) {
+ if (new_nhghe == NULL) {
if (re->nhe)
zebra_nhg_decrement_ref(re->nhe);
re->nhe = NULL;
goto done;
}
- if ((re->nhe_id != 0) && (re->nhe_id != new->id)) {
+ if ((re->nhe_id != 0) && (re->nhe_id != new_nhghe->id)) {
old = re->nhe;
- route_entry_attach_ref(re, new);
+ route_entry_attach_ref(re, new_nhghe);
if (old)
zebra_nhg_decrement_ref(old);
} else if (!re->nhe)
/* This is the first time it's being attached */
- route_entry_attach_ref(re, new);
+ route_entry_attach_ref(re, new_nhghe);
done:
return ret;
@@ -420,7 +421,7 @@ void rib_install_kernel(struct route_node *rn, struct route_entry *re,
struct route_entry *old)
{
struct nexthop *nexthop;
- rib_table_info_t *info = srcdest_rnode_table_info(rn);
+ struct rib_table_info *info = srcdest_rnode_table_info(rn);
struct zebra_vrf *zvrf = vrf_info_lookup(re->vrf_id);
const struct prefix *p, *src_p;
enum zebra_dplane_result ret;
@@ -503,7 +504,7 @@ void rib_install_kernel(struct route_node *rn, struct route_entry *re,
void rib_uninstall_kernel(struct route_node *rn, struct route_entry *re)
{
struct nexthop *nexthop;
- rib_table_info_t *info = srcdest_rnode_table_info(rn);
+ struct rib_table_info *info = srcdest_rnode_table_info(rn);
struct zebra_vrf *zvrf = vrf_info_lookup(re->vrf_id);
if (info->safi != SAFI_UNICAST) {
@@ -546,7 +547,7 @@ void rib_uninstall_kernel(struct route_node *rn, struct route_entry *re)
/* Uninstall the route from kernel. */
static void rib_uninstall(struct route_node *rn, struct route_entry *re)
{
- rib_table_info_t *info = srcdest_rnode_table_info(rn);
+ struct rib_table_info *info = srcdest_rnode_table_info(rn);
rib_dest_t *dest = rib_dest_from_rnode(rn);
struct nexthop *nexthop;
@@ -3085,7 +3086,7 @@ int rib_add(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
return rib_add_multipath(afi, safi, p, src_p, re, ng);
}
-static const char *rib_update_event2str(rib_update_event_t event)
+static const char *rib_update_event2str(enum rib_update_event event)
{
const char *ret = "UNKNOWN";
@@ -3125,7 +3126,7 @@ static void rib_update_route_node(struct route_node *rn, int type)
}
/* Schedule routes of a particular table (address-family) based on event. */
-void rib_update_table(struct route_table *table, rib_update_event_t event)
+void rib_update_table(struct route_table *table, enum rib_update_event event)
{
struct route_node *rn;
@@ -3133,13 +3134,14 @@ void rib_update_table(struct route_table *table, rib_update_event_t event)
struct zebra_vrf *zvrf;
struct vrf *vrf;
- zvrf = table->info ? ((rib_table_info_t *)table->info)->zvrf
- : NULL;
+ zvrf = table->info
+ ? ((struct rib_table_info *)table->info)->zvrf
+ : NULL;
vrf = zvrf ? zvrf->vrf : NULL;
zlog_debug("%s: %s VRF %s Table %u event %s", __func__,
table->info ? afi2str(
- ((rib_table_info_t *)table->info)->afi)
+ ((struct rib_table_info *)table->info)->afi)
: "Unknown",
VRF_LOGNAME(vrf), zvrf ? zvrf->table_id : 0,
rib_update_event2str(event));
@@ -3173,7 +3175,7 @@ void rib_update_table(struct route_table *table, rib_update_event_t event)
}
}
-static void rib_update_handle_vrf(vrf_id_t vrf_id, rib_update_event_t event)
+static void rib_update_handle_vrf(vrf_id_t vrf_id, enum rib_update_event event)
{
struct route_table *table;
@@ -3191,7 +3193,7 @@ static void rib_update_handle_vrf(vrf_id_t vrf_id, rib_update_event_t event)
rib_update_table(table, event);
}
-static void rib_update_handle_vrf_all(rib_update_event_t event)
+static void rib_update_handle_vrf_all(enum rib_update_event event)
{
struct zebra_router_table *zrt;
@@ -3205,13 +3207,13 @@ static void rib_update_handle_vrf_all(rib_update_event_t event)
}
struct rib_update_ctx {
- rib_update_event_t event;
+ enum rib_update_event event;
bool vrf_all;
vrf_id_t vrf_id;
};
static struct rib_update_ctx *rib_update_ctx_init(vrf_id_t vrf_id,
- rib_update_event_t event)
+ enum rib_update_event event)
{
struct rib_update_ctx *ctx;
@@ -3251,7 +3253,7 @@ static int rib_update_handler(struct thread *thread)
static struct thread *t_rib_update_threads[RIB_UPDATE_MAX];
/* Schedule a RIB update event for specific vrf */
-void rib_update_vrf(vrf_id_t vrf_id, rib_update_event_t event)
+void rib_update_vrf(vrf_id_t vrf_id, enum rib_update_event event)
{
struct rib_update_ctx *ctx;
@@ -3271,7 +3273,7 @@ void rib_update_vrf(vrf_id_t vrf_id, rib_update_event_t event)
}
/* Schedule a RIB update event for all vrfs */
-void rib_update(rib_update_event_t event)
+void rib_update(enum rib_update_event event)
{
struct rib_update_ctx *ctx;
@@ -3425,7 +3427,7 @@ unsigned long rib_score_proto(uint8_t proto, unsigned short instance)
void rib_close_table(struct route_table *table)
{
struct route_node *rn;
- rib_table_info_t *info;
+ struct rib_table_info *info;
rib_dest_t *dest;
if (!table)
diff --git a/zebra/zebra_rnh.c b/zebra/zebra_rnh.c
index f9c74c7462..ad2e00b1ec 100644
--- a/zebra/zebra_rnh.c
+++ b/zebra/zebra_rnh.c
@@ -57,8 +57,8 @@ static void free_state(vrf_id_t vrf_id, struct route_entry *re,
static void copy_state(struct rnh *rnh, const struct route_entry *re,
struct route_node *rn);
static int compare_state(struct route_entry *r1, struct route_entry *r2);
-static int send_client(struct rnh *rnh, struct zserv *client, rnh_type_t type,
- vrf_id_t vrf_id);
+static int send_client(struct rnh *rnh, struct zserv *client,
+ enum rnh_type type, vrf_id_t vrf_id);
static void print_rnh(struct route_node *rn, struct vty *vty);
static int zebra_client_cleanup_rnh(struct zserv *client);
@@ -68,7 +68,7 @@ void zebra_rnh_init(void)
}
static inline struct route_table *get_rnh_table(vrf_id_t vrfid, afi_t afi,
- rnh_type_t type)
+ enum rnh_type type)
{
struct zebra_vrf *zvrf;
struct route_table *t = NULL;
@@ -148,7 +148,7 @@ static void zebra_rnh_store_in_routing_table(struct rnh *rnh)
route_unlock_node(rn);
}
-struct rnh *zebra_add_rnh(struct prefix *p, vrf_id_t vrfid, rnh_type_t type,
+struct rnh *zebra_add_rnh(struct prefix *p, vrf_id_t vrfid, enum rnh_type type,
bool *exists)
{
struct route_table *table;
@@ -207,7 +207,8 @@ struct rnh *zebra_add_rnh(struct prefix *p, vrf_id_t vrfid, rnh_type_t type,
return (rn->info);
}
-struct rnh *zebra_lookup_rnh(struct prefix *p, vrf_id_t vrfid, rnh_type_t type)
+struct rnh *zebra_lookup_rnh(struct prefix *p, vrf_id_t vrfid,
+ enum rnh_type type)
{
struct route_table *table;
struct route_node *rn;
@@ -258,7 +259,7 @@ void zebra_free_rnh(struct rnh *rnh)
XFREE(MTYPE_RNH, rnh);
}
-static void zebra_delete_rnh(struct rnh *rnh, rnh_type_t type)
+static void zebra_delete_rnh(struct rnh *rnh, enum rnh_type type)
{
struct route_node *rn;
@@ -289,7 +290,7 @@ static void zebra_delete_rnh(struct rnh *rnh, rnh_type_t type)
* and as such it will have a resolved rnh.
*/
void zebra_add_rnh_client(struct rnh *rnh, struct zserv *client,
- rnh_type_t type, vrf_id_t vrf_id)
+ enum rnh_type type, vrf_id_t vrf_id)
{
if (IS_ZEBRA_DEBUG_NHT) {
char buf[PREFIX2STR_BUFFER];
@@ -308,7 +309,7 @@ void zebra_add_rnh_client(struct rnh *rnh, struct zserv *client,
}
void zebra_remove_rnh_client(struct rnh *rnh, struct zserv *client,
- rnh_type_t type)
+ enum rnh_type type)
{
if (IS_ZEBRA_DEBUG_NHT) {
char buf[PREFIX2STR_BUFFER];
@@ -804,7 +805,7 @@ static void zebra_rnh_eval_nexthop_entry(struct zebra_vrf *zvrf, afi_t afi,
/* Evaluate one tracked entry */
static void zebra_rnh_evaluate_entry(struct zebra_vrf *zvrf, afi_t afi,
- int force, rnh_type_t type,
+ int force, enum rnh_type type,
struct route_node *nrn)
{
struct rnh *rnh;
@@ -851,7 +852,7 @@ static void zebra_rnh_evaluate_entry(struct zebra_vrf *zvrf, afi_t afi,
* covers multiple nexthops we are interested in.
*/
static void zebra_rnh_clear_nhc_flag(struct zebra_vrf *zvrf, afi_t afi,
- rnh_type_t type, struct route_node *nrn)
+ enum rnh_type type, struct route_node *nrn)
{
struct rnh *rnh;
struct route_entry *re;
@@ -875,7 +876,7 @@ static void zebra_rnh_clear_nhc_flag(struct zebra_vrf *zvrf, afi_t afi,
* of a particular VRF and address-family or a specific prefix.
*/
void zebra_evaluate_rnh(struct zebra_vrf *zvrf, afi_t afi, int force,
- rnh_type_t type, struct prefix *p)
+ enum rnh_type type, struct prefix *p)
{
struct route_table *rnh_table;
struct route_node *nrn;
@@ -911,7 +912,7 @@ void zebra_evaluate_rnh(struct zebra_vrf *zvrf, afi_t afi, int force,
}
void zebra_print_rnh_table(vrf_id_t vrfid, afi_t afi, struct vty *vty,
- rnh_type_t type, struct prefix *p)
+ enum rnh_type type, struct prefix *p)
{
struct route_table *table;
struct route_node *rn;
@@ -997,8 +998,8 @@ static int compare_state(struct route_entry *r1, struct route_entry *r2)
return 0;
}
-static int send_client(struct rnh *rnh, struct zserv *client, rnh_type_t type,
- vrf_id_t vrf_id)
+static int send_client(struct rnh *rnh, struct zserv *client,
+ enum rnh_type type, vrf_id_t vrf_id)
{
struct stream *s;
struct route_entry *re;
@@ -1134,7 +1135,7 @@ static void print_rnh(struct route_node *rn, struct vty *vty)
}
static int zebra_cleanup_rnh_client(vrf_id_t vrf_id, afi_t afi,
- struct zserv *client, rnh_type_t type)
+ struct zserv *client, enum rnh_type type)
{
struct route_table *ntable;
struct route_node *nrn;
diff --git a/zebra/zebra_rnh.h b/zebra/zebra_rnh.h
index 6e2dab8d9f..f07e5bc791 100644
--- a/zebra/zebra_rnh.h
+++ b/zebra/zebra_rnh.h
@@ -31,7 +31,7 @@ extern "C" {
extern void zebra_rnh_init(void);
-static inline const char *rnh_type2str(rnh_type_t type)
+static inline const char *rnh_type2str(enum rnh_type type)
{
switch (type) {
case RNH_NEXTHOP_TYPE:
@@ -44,20 +44,20 @@ static inline const char *rnh_type2str(rnh_type_t type)
}
extern struct rnh *zebra_add_rnh(struct prefix *p, vrf_id_t vrfid,
- rnh_type_t type, bool *exists);
+ enum rnh_type type, bool *exists);
extern struct rnh *zebra_lookup_rnh(struct prefix *p, vrf_id_t vrfid,
- rnh_type_t type);
+ enum rnh_type type);
extern void zebra_free_rnh(struct rnh *rnh);
extern void zebra_add_rnh_client(struct rnh *rnh, struct zserv *client,
- rnh_type_t type, vrf_id_t vrfid);
+ enum rnh_type type, vrf_id_t vrfid);
extern void zebra_register_rnh_pseudowire(vrf_id_t, struct zebra_pw *);
extern void zebra_deregister_rnh_pseudowire(vrf_id_t, struct zebra_pw *);
extern void zebra_remove_rnh_client(struct rnh *rnh, struct zserv *client,
- rnh_type_t type);
+ enum rnh_type type);
extern void zebra_evaluate_rnh(struct zebra_vrf *zvrf, afi_t afi, int force,
- rnh_type_t type, struct prefix *p);
+ enum rnh_type type, struct prefix *p);
extern void zebra_print_rnh_table(vrf_id_t vrfid, afi_t afi, struct vty *vty,
- rnh_type_t type, struct prefix *p);
+ enum rnh_type type, struct prefix *p);
extern char *rnh_str(struct rnh *rnh, char *buf, int size);
extern int rnh_resolve_via_default(struct zebra_vrf *zvrf, int family);
diff --git a/zebra/zebra_router.c b/zebra/zebra_router.c
index ea2b6752b3..61fef8779f 100644
--- a/zebra/zebra_router.c
+++ b/zebra/zebra_router.c
@@ -66,6 +66,22 @@ zebra_router_table_entry_compare(const struct zebra_router_table *e1,
return (e1->safi - e2->safi);
}
+struct zebra_router_table *zebra_router_find_zrt(struct zebra_vrf *zvrf,
+ uint32_t tableid, afi_t afi,
+ safi_t safi)
+{
+ struct zebra_router_table finder;
+ struct zebra_router_table *zrt;
+
+ memset(&finder, 0, sizeof(finder));
+ finder.afi = afi;
+ finder.safi = safi;
+ finder.tableid = tableid;
+ finder.ns_id = zvrf->zns->ns_id;
+ zrt = RB_FIND(zebra_router_table_head, &zrouter.tables, &finder);
+
+ return zrt;
+}
struct route_table *zebra_router_find_table(struct zebra_vrf *zvrf,
uint32_t tableid, afi_t afi,
@@ -93,7 +109,7 @@ struct route_table *zebra_router_get_table(struct zebra_vrf *zvrf,
{
struct zebra_router_table finder;
struct zebra_router_table *zrt;
- rib_table_info_t *info;
+ struct rib_table_info *info;
memset(&finder, 0, sizeof(finder));
finder.afi = afi;
@@ -133,7 +149,7 @@ void zebra_router_show_table_summary(struct vty *vty)
vty_out(vty,
"---------------------------------------------------------------------------\n");
RB_FOREACH (zrt, zebra_router_table_head, &zrouter.tables) {
- rib_table_info_t *info = route_table_get_info(zrt->table);
+ struct rib_table_info *info = route_table_get_info(zrt->table);
vty_out(vty, "%-16s%5d %9d %7s %15s %8d %10lu\n", info->zvrf->vrf->name,
zrt->ns_id, info->zvrf->vrf->vrf_id,
diff --git a/zebra/zebra_router.h b/zebra/zebra_router.h
index 773e5a6415..863c5fa71c 100644
--- a/zebra/zebra_router.h
+++ b/zebra/zebra_router.h
@@ -186,6 +186,9 @@ extern void zebra_router_init(void);
extern void zebra_router_cleanup(void);
extern void zebra_router_terminate(void);
+extern struct zebra_router_table *zebra_router_find_zrt(struct zebra_vrf *zvrf,
+ uint32_t tableid,
+ afi_t afi, safi_t safi);
extern struct route_table *zebra_router_find_table(struct zebra_vrf *zvrf,
uint32_t tableid, afi_t afi,
safi_t safi);
diff --git a/zebra/zebra_vrf.h b/zebra/zebra_vrf.h
index 268ee12a65..d262faa070 100644
--- a/zebra/zebra_vrf.h
+++ b/zebra/zebra_vrf.h
@@ -33,10 +33,10 @@ extern "C" {
#endif
/* MPLS (Segment Routing) global block */
-typedef struct mpls_srgb_t_ {
+struct mpls_srgb {
uint32_t start_label;
uint32_t end_label;
-} mpls_srgb_t;
+};
struct zebra_rmap {
char *name;
@@ -111,7 +111,7 @@ struct zebra_vrf {
struct route_table *fec_table[AFI_MAX];
/* MPLS Segment Routing Global block */
- mpls_srgb_t mpls_srgb;
+ struct mpls_srgb mpls_srgb;
/* Pseudowires. */
struct zebra_pw_head pseudowires;
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index 8024db4ca7..9718b40d9d 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -357,7 +357,8 @@ static void vty_show_ip_route_detail(struct vty *vty, struct route_node *rn,
const char *mcast_info = "";
if (mcast) {
- rib_table_info_t *info = srcdest_rnode_table_info(rn);
+ struct rib_table_info *info =
+ srcdest_rnode_table_info(rn);
mcast_info = (info->safi == SAFI_MULTICAST)
? " using Multicast RIB"
: " using Unicast RIB";
@@ -978,7 +979,7 @@ static void do_show_ip_route_all(struct vty *vty, struct zebra_vrf *zvrf,
unsigned short ospf_instance_id)
{
struct zebra_router_table *zrt;
- rib_table_info_t *info;
+ struct rib_table_info *info;
RB_FOREACH (zrt, zebra_router_table_head,
&zrouter.tables) {
@@ -1059,7 +1060,7 @@ DEFPY (show_ip_nht,
afi_t afi = ipv4 ? AFI_IP : AFI_IP6;
vrf_id_t vrf_id = VRF_DEFAULT;
struct prefix prefix, *p = NULL;
- rnh_type_t rtype;
+ enum rnh_type rtype;
if (strcmp(type, "nht") == 0)
rtype = RNH_NEXTHOP_TYPE;
@@ -1349,8 +1350,9 @@ DEFPY (show_nexthop_group,
else if (v6)
afi = AFI_IP6;
- if (vrf_is_backend_netns() && (vrf_name || vrf_all)) {
- vty_out(vty, "VRF subcommand does not make any sense in l3mdev based vrf's");
+ if (!vrf_is_backend_netns() && (vrf_name || vrf_all)) {
+ vty_out(vty,
+ "VRF subcommand does not make any sense in l3mdev based vrf's\n");
return CMD_WARNING;
}
@@ -1832,8 +1834,8 @@ static void vty_show_ip_route_summary(struct vty *vty,
if (!use_json)
vty_out(vty, "%-20s %-20s %s (vrf %s)\n", "Route Source",
"Routes", "FIB",
- zvrf_name(((rib_table_info_t *)route_table_get_info(
- table))
+ zvrf_name(((struct rib_table_info *)
+ route_table_get_info(table))
->zvrf));
for (i = 0; i < ZEBRA_ROUTE_MAX; i++) {
@@ -1980,8 +1982,8 @@ static void vty_show_ip_route_summary_prefix(struct vty *vty,
if (!use_json)
vty_out(vty, "%-20s %-20s %s (vrf %s)\n", "Route Source",
"Prefix Routes", "FIB",
- zvrf_name(((rib_table_info_t *)route_table_get_info(
- table))
+ zvrf_name(((struct rib_table_info *)
+ route_table_get_info(table))
->zvrf));
for (i = 0; i < ZEBRA_ROUTE_MAX; i++) {
diff --git a/zebra/zserv.h b/zebra/zserv.h
index 5506c4299d..9d442899f1 100644
--- a/zebra/zserv.h
+++ b/zebra/zserv.h
@@ -72,7 +72,7 @@ struct client_gr_info {
enum zserv_client_capabilities capabilities;
/* GR commands */
- bool delete;
+ bool do_delete;
bool gr_enable;
bool stale_client;