summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bgpd/bgp_attr.c2
-rw-r--r--bgpd/bgp_clist.c31
-rw-r--r--bgpd/bgp_dump.c8
-rw-r--r--bgpd/bgp_dump.h1
-rw-r--r--bgpd/bgp_fsm.c8
-rw-r--r--bgpd/bgp_label.c15
-rw-r--r--bgpd/bgp_labelpool.c51
-rw-r--r--bgpd/bgp_mplsvpn.c221
-rw-r--r--bgpd/bgp_mplsvpn.h2
-rw-r--r--bgpd/bgp_nht.c2
-rw-r--r--bgpd/bgp_packet.c20
-rw-r--r--bgpd/bgp_packet.h12
-rw-r--r--bgpd/bgp_pbr.c21
-rw-r--r--bgpd/bgp_pbr.h1
-rw-r--r--bgpd/bgp_route.c26
-rw-r--r--bgpd/bgp_route.h9
-rw-r--r--bgpd/bgp_routemap.c87
-rw-r--r--bgpd/bgp_updgrp.c12
-rw-r--r--bgpd/bgp_vty.c56
-rw-r--r--bgpd/bgp_zebra.c1
-rw-r--r--bgpd/bgpd.c62
-rw-r--r--bgpd/bgpd.h18
-rwxr-xr-xconfigure.ac2
-rw-r--r--debian/frr.install1
-rw-r--r--doc/developer/building-frr-for-omnios.rst2
-rw-r--r--doc/developer/topotests-jsontopo.rst475
-rw-r--r--doc/developer/topotests.rst2
-rw-r--r--doc/user/basic.rst28
-rw-r--r--eigrpd/eigrp_const.h2
-rw-r--r--fpm/fpm.h4
-rw-r--r--isisd/isis_misc.c4
-rw-r--r--lib/command.c1
-rw-r--r--lib/command.h8
-rw-r--r--lib/lib_errors.c6
-rw-r--r--lib/lib_errors.h1
-rw-r--r--lib/libfrr.c2
-rw-r--r--lib/linklist.h2
-rw-r--r--lib/log.c195
-rw-r--r--lib/log.h9
-rw-r--r--lib/log_vty.c97
-rw-r--r--lib/log_vty.h24
-rw-r--r--lib/memory_vty.c3
-rw-r--r--lib/nexthop.c26
-rw-r--r--lib/nexthop.h6
-rw-r--r--lib/nexthop_group.c70
-rw-r--r--lib/nexthop_group.h4
-rw-r--r--lib/nexthop_group_private.h45
-rw-r--r--lib/northbound.c7
-rw-r--r--lib/pbr.h1
-rw-r--r--lib/resolver.c (renamed from nhrpd/resolver.c)112
-rw-r--r--lib/resolver.h25
-rw-r--r--lib/subdir.am20
-rw-r--r--lib/table.h4
-rw-r--r--nhrpd/nhrp_errors.c6
-rw-r--r--nhrpd/nhrp_errors.h1
-rw-r--r--nhrpd/nhrp_main.c2
-rw-r--r--nhrpd/nhrpd.h10
-rw-r--r--nhrpd/subdir.am4
-rw-r--r--ospfd/ospf_spf.c4
-rw-r--r--ospfd/ospf_vty.c48
-rw-r--r--ospfd/ospfd.c8
-rw-r--r--pbrd/pbr_nht.c7
-rw-r--r--pbrd/pbr_vty.c3
-rw-r--r--pimd/pim_assert.c4
-rw-r--r--pimd/pim_bsm.c8
-rw-r--r--pimd/pim_cmd.c24
-rw-r--r--pimd/pim_iface.c2
-rw-r--r--pimd/pim_ifchannel.c8
-rw-r--r--pimd/pim_macro.c22
-rw-r--r--pimd/pim_register.c12
-rw-r--r--pimd/pim_rpf.c2
-rw-r--r--pimd/pim_upstream.c14
-rw-r--r--pimd/pim_vxlan.c2
-rw-r--r--python/clidef.py4
-rw-r--r--ripd/rip_cli.c8
-rw-r--r--tests/topotests/bgp-basic-functionality-topo1/__init__.py0
-rw-r--r--tests/topotests/bgp-basic-functionality-topo1/bgp_basic_functionality.json172
-rwxr-xr-xtests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py595
-rw-r--r--tests/topotests/bgp-path-attributes-topo1/__init__.py0
-rw-r--r--tests/topotests/bgp-path-attributes-topo1/bgp_path_attributes.json220
-rwxr-xr-xtests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py1078
-rw-r--r--tests/topotests/bgp-prefix-list-topo1/__init__.py0
-rw-r--r--tests/topotests/bgp-prefix-list-topo1/prefix_lists.json123
-rwxr-xr-xtests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py1450
-rw-r--r--tests/topotests/bgp_multiview_topo1/README.md2
-rwxr-xr-xtests/topotests/example-topojson-test/__init__.py0
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_multiple_links/__init__.py0
-rw-r--r--tests/topotests/example-topojson-test/test_topo_json_multiple_links/example_topojson_multiple_links.json152
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py194
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_single_link/__init__.py0
-rw-r--r--tests/topotests/example-topojson-test/test_topo_json_single_link/example_topojson.json153
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py190
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_single_link_loopback/__init__.py0
-rw-r--r--tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/example_topojson.json161
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py205
-rw-r--r--tests/topotests/lib/bgp.py1521
-rw-r--r--tests/topotests/lib/common_config.py1391
-rw-r--r--tests/topotests/lib/topojson.py193
-rw-r--r--tests/topotests/lib/topotest.py2
-rw-r--r--tests/topotests/ospf6-topo1/README.md2
-rw-r--r--tests/topotests/pytest.ini7
-rw-r--r--vrrpd/vrrp_packet.c4
-rw-r--r--vrrpd/vrrp_zebra.c2
-rw-r--r--vtysh/vtysh.c186
-rw-r--r--vtysh/vtysh_config.c5
-rw-r--r--zebra/interface.h18
-rw-r--r--zebra/irdp.h3
-rw-r--r--zebra/irdp_interface.c6
-rw-r--r--zebra/irdp_packet.c12
-rw-r--r--zebra/label_manager.c13
-rw-r--r--zebra/redistribute.c148
-rw-r--r--zebra/redistribute.h14
-rw-r--r--zebra/rib.h2
-rw-r--r--zebra/rt_netlink.c3
-rw-r--r--zebra/zapi_msg.c1
-rw-r--r--zebra/zebra_errors.c8
-rw-r--r--zebra/zebra_errors.h1
-rw-r--r--zebra/zebra_fpm.c10
-rw-r--r--zebra/zebra_fpm_netlink.c4
-rw-r--r--zebra/zebra_nhg.c3
-rw-r--r--zebra/zebra_pbr.c7
-rw-r--r--zebra/zebra_pbr.h1
-rw-r--r--zebra/zebra_rib.c32
-rw-r--r--zebra/zebra_vty.c9
-rw-r--r--zebra/zebra_vxlan.c19
125 files changed, 9906 insertions, 473 deletions
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index d46623c9d2..35946444dd 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -2974,6 +2974,8 @@ size_t bgp_packet_mpattr_prefix_size(afi_t afi, safi_t safi, struct prefix *p)
int size = PSIZE(p->prefixlen);
if (safi == SAFI_MPLS_VPN)
size += 88;
+ else if (safi == SAFI_LABELED_UNICAST)
+ size += BGP_LABEL_BYTES;
else if (afi == AFI_L2VPN && safi == SAFI_EVPN)
size += 232; // TODO: Maximum possible for type-2, type-3 and
// type-5
diff --git a/bgpd/bgp_clist.c b/bgpd/bgp_clist.c
index ce617fe6b5..ff2ea6f7cd 100644
--- a/bgpd/bgp_clist.c
+++ b/bgpd/bgp_clist.c
@@ -27,6 +27,7 @@
#include "filter.h"
#include "stream.h"
#include "jhash.h"
+#include "frrstr.h"
#include "bgpd/bgpd.h"
#include "bgpd/bgp_community.h"
@@ -1026,6 +1027,33 @@ struct lcommunity *lcommunity_list_match_delete(struct lcommunity *lcom,
return lcom;
}
+/* Helper to check if every octet do not exceed UINT_MAX */
+static int lcommunity_list_valid(const char *community)
+{
+ int octets = 0;
+ char **splits;
+ int num;
+
+ frrstr_split(community, ":", &splits, &num);
+
+ for (int i = 0; i < num; i++) {
+ if (strtoul(splits[i], NULL, 10) > UINT_MAX)
+ return 0;
+
+ if (strlen(splits[i]) == 0)
+ return 0;
+
+ octets++;
+ XFREE(MTYPE_TMP, splits[i]);
+ }
+ XFREE(MTYPE_TMP, splits);
+
+ if (octets < 3)
+ return 0;
+
+ return 1;
+}
+
/* Set lcommunity-list. */
int lcommunity_list_set(struct community_list_handler *ch, const char *name,
const char *str, int direct, int style)
@@ -1054,6 +1082,9 @@ int lcommunity_list_set(struct community_list_handler *ch, const char *name,
}
if (str) {
+ if (!lcommunity_list_valid(str))
+ return COMMUNITY_LIST_ERR_MALFORMED_VAL;
+
if (style == LARGE_COMMUNITY_LIST_STANDARD)
lcom = lcommunity_str2com(str);
else
diff --git a/bgpd/bgp_dump.c b/bgpd/bgp_dump.c
index 751140850a..7ea6ae586b 100644
--- a/bgpd/bgp_dump.c
+++ b/bgpd/bgp_dump.c
@@ -37,6 +37,7 @@
#include "bgpd/bgp_attr.h"
#include "bgpd/bgp_dump.h"
#include "bgpd/bgp_errors.h"
+#include "bgpd/bgp_packet.h"
enum bgp_dump_type {
BGP_DUMP_ALL,
@@ -555,7 +556,8 @@ static void bgp_dump_packet_func(struct bgp_dump *bgp_dump, struct peer *peer,
}
/* Called from bgp_packet.c when BGP packet is received. */
-void bgp_dump_packet(struct peer *peer, int type, struct stream *packet)
+static int bgp_dump_packet(struct peer *peer, uint8_t type, bgp_size_t size,
+ struct stream *packet)
{
/* bgp_dump_all. */
bgp_dump_packet_func(&bgp_dump_all, peer, packet);
@@ -563,6 +565,7 @@ void bgp_dump_packet(struct peer *peer, int type, struct stream *packet)
/* bgp_dump_updates. */
if (type == BGP_MSG_UPDATE)
bgp_dump_packet_func(&bgp_dump_updates, peer, packet);
+ return 0;
}
static unsigned int bgp_dump_parse_time(const char *str)
@@ -862,6 +865,8 @@ void bgp_dump_init(void)
install_element(CONFIG_NODE, &dump_bgp_all_cmd);
install_element(CONFIG_NODE, &no_dump_bgp_all_cmd);
+
+ hook_register(bgp_packet_dump, bgp_dump_packet);
}
void bgp_dump_finish(void)
@@ -872,4 +877,5 @@ void bgp_dump_finish(void)
stream_free(bgp_dump_obuf);
bgp_dump_obuf = NULL;
+ hook_unregister(bgp_packet_dump, bgp_dump_packet);
}
diff --git a/bgpd/bgp_dump.h b/bgpd/bgp_dump.h
index f73081b2e2..5ec0561b05 100644
--- a/bgpd/bgp_dump.h
+++ b/bgpd/bgp_dump.h
@@ -52,6 +52,5 @@
extern void bgp_dump_init(void);
extern void bgp_dump_finish(void);
extern void bgp_dump_state(struct peer *, int, int);
-extern void bgp_dump_packet(struct peer *, int, struct stream *);
#endif /* _QUAGGA_BGP_DUMP_H */
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index dd765731dc..4348e6b240 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -184,9 +184,11 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
EC_BGP_PKT_PROCESS,
"[%s] Dropping pending packet on connection transfer:",
peer->host);
- uint16_t type = stream_getc_from(peer->curr,
- BGP_MARKER_SIZE + 2);
- bgp_dump_packet(peer, type, peer->curr);
+ /* there used to be a bgp_packet_dump call here, but
+ * that's extremely confusing since there's no way to
+ * identify the packet in MRT dumps or BMP as dropped
+ * due to connection transfer.
+ */
stream_free(peer->curr);
peer->curr = NULL;
}
diff --git a/bgpd/bgp_label.c b/bgpd/bgp_label.c
index 9511650842..489ac6ea9f 100644
--- a/bgpd/bgp_label.c
+++ b/bgpd/bgp_label.c
@@ -130,10 +130,21 @@ mpls_label_t bgp_adv_label(struct bgp_node *rn, struct bgp_path_info *pi,
int bgp_reg_for_label_callback(mpls_label_t new_label, void *labelid,
bool allocated)
{
- struct bgp_path_info *pi = (struct bgp_path_info *)labelid;
- struct bgp_node *rn = (struct bgp_node *)pi->net;
+ struct bgp_path_info *pi;
+ struct bgp_node *rn;
char addr[PREFIX_STRLEN];
+ pi = labelid;
+ /* Is this path still valid? */
+ if (!bgp_path_info_unlock(pi)) {
+ if (BGP_DEBUG(labelpool, LABELPOOL))
+ zlog_debug(
+ "%s: bgp_path_info is no longer valid, ignoring",
+ __func__);
+ return -1;
+ }
+
+ rn = pi->net;
prefix2str(&rn->p, addr, PREFIX_STRLEN);
if (BGP_DEBUG(labelpool, LABELPOOL))
diff --git a/bgpd/bgp_labelpool.c b/bgpd/bgp_labelpool.c
index 71c0c8c7c6..7518f02acf 100644
--- a/bgpd/bgp_labelpool.c
+++ b/bgpd/bgp_labelpool.c
@@ -34,6 +34,7 @@
#include "bgpd/bgp_labelpool.h"
#include "bgpd/bgp_debug.h"
#include "bgpd/bgp_errors.h"
+#include "bgpd/bgp_route.h"
/*
* Definitions and external declarations.
@@ -180,9 +181,24 @@ void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
lp->callback_q->spec.max_retries = 0;
}
+/* check if a label callback was for a BGP LU path, and if so, unlock it */
+static void check_bgp_lu_cb_unlock(struct lp_lcb *lcb)
+{
+ if (lcb->type == LP_TYPE_BGP_LU)
+ bgp_path_info_unlock(lcb->labelid);
+}
+
+/* check if a label callback was for a BGP LU path, and if so, lock it */
+static void check_bgp_lu_cb_lock(struct lp_lcb *lcb)
+{
+ if (lcb->type == LP_TYPE_BGP_LU)
+ bgp_path_info_lock(lcb->labelid);
+}
+
void bgp_lp_finish(void)
{
struct lp_fifo *lf;
+ struct work_queue_item *item, *titem;
if (!lp)
return;
@@ -195,10 +211,21 @@ void bgp_lp_finish(void)
list_delete(&lp->chunks);
- while ((lf = lp_fifo_pop(&lp->requests)))
+ while ((lf = lp_fifo_pop(&lp->requests))) {
+ check_bgp_lu_cb_unlock(&lf->lcb);
XFREE(MTYPE_BGP_LABEL_FIFO, lf);
+ }
lp_fifo_fini(&lp->requests);
+ /* we must unlock path infos for LU callbacks; but we cannot do that
+ * in the deletion callback of the workqueue, as that is also called
+ * to remove an element from the queue after it has been run, resulting
+ * in a double unlock. Hence we need to iterate over our queues and
+ * lists and manually perform the unlocking (ugh)
+ */
+ STAILQ_FOREACH_SAFE (item, &lp->callback_q->items, wq, titem)
+ check_bgp_lu_cb_unlock(item->data);
+
work_queue_free_and_null(&lp->callback_q);
lp = NULL;
@@ -328,6 +355,9 @@ void bgp_lp_get(
q->labelid = lcb->labelid;
q->allocated = true;
+ /* if this is a LU request, lock path info before queueing */
+ check_bgp_lu_cb_lock(lcb);
+
work_queue_add(lp->callback_q, q);
return;
@@ -353,13 +383,16 @@ void bgp_lp_get(
sizeof(struct lp_fifo));
lf->lcb = *lcb;
+ /* if this is a LU request, lock path info before queueing */
+ check_bgp_lu_cb_lock(lcb);
+
lp_fifo_add_tail(&lp->requests, lf);
if (lp_fifo_count(&lp->requests) > lp->pending_count) {
- if (!zclient_send_get_label_chunk(zclient, 0, LP_CHUNK_SIZE)) {
- lp->pending_count += LP_CHUNK_SIZE;
+ if (!zclient || zclient->sock < 0)
return;
- }
+ if (!zclient_send_get_label_chunk(zclient, 0, LP_CHUNK_SIZE))
+ lp->pending_count += LP_CHUNK_SIZE;
}
}
@@ -436,6 +469,10 @@ void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
__func__, labelid,
lcb->label, lcb->label, lcb);
}
+ /* if this was a BGP_LU request, unlock path info node
+ */
+ check_bgp_lu_cb_unlock(lcb);
+
goto finishedrequest;
}
@@ -510,8 +547,10 @@ void bgp_lp_event_zebra_up(void)
lm_init_ok = lm_label_manager_connect(zclient, 1) == 0;
- if (!lm_init_ok)
+ if (!lm_init_ok) {
zlog_err("%s: label manager connection error", __func__);
+ return;
+ }
zclient_send_get_label_chunk(zclient, 0, labels_needed);
lp->pending_count = labels_needed;
@@ -544,6 +583,7 @@ void bgp_lp_event_zebra_up(void)
q->label = lcb->label;
q->labelid = lcb->labelid;
q->allocated = false;
+ check_bgp_lu_cb_lock(lcb);
work_queue_add(lp->callback_q, q);
lcb->label = MPLS_LABEL_NONE;
@@ -556,6 +596,7 @@ void bgp_lp_event_zebra_up(void)
sizeof(struct lp_fifo));
lf->lcb = *lcb;
+ check_bgp_lu_cb_lock(lcb);
lp_fifo_add_tail(&lp->requests, lf);
}
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index 355bc93320..1156810510 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -1614,11 +1614,13 @@ void vrf_import_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
{
const char *export_name;
vpn_policy_direction_t idir, edir;
- char *vname;
- char buf[1000];
+ char *vname, *tmp_name;
+ char buf[RD_ADDRSTRLEN];
struct ecommunity *ecom;
bool first_export = false;
int debug;
+ struct listnode *node;
+ bool is_inst_match = false;
export_name = to_bgp->name ? to_bgp->name : VRF_DEFAULT_NAME;
idir = BGP_VPN_POLICY_DIR_FROMVPN;
@@ -1634,13 +1636,41 @@ void vrf_import_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
vname = (from_bgp->name ? XSTRDUP(MTYPE_TMP, from_bgp->name)
: XSTRDUP(MTYPE_TMP, VRF_DEFAULT_NAME));
- listnode_add(to_bgp->vpn_policy[afi].import_vrf, vname);
+ /* Check the import_vrf list of destination vrf for the source vrf name,
+ * insert otherwise.
+ */
+ for (ALL_LIST_ELEMENTS_RO(to_bgp->vpn_policy[afi].import_vrf,
+ node, tmp_name)) {
+ if (strcmp(vname, tmp_name) == 0) {
+ is_inst_match = true;
+ break;
+ }
+ }
+ if (!is_inst_match)
+ listnode_add(to_bgp->vpn_policy[afi].import_vrf,
+ vname);
- if (!listcount(from_bgp->vpn_policy[afi].export_vrf))
- first_export = true;
+ /* Check if the source vrf already exports to any vrf,
+ * first time export requires to setup auto derived RD/RT values.
+ * Add the destination vrf name to export vrf list if it is
+ * not present.
+ */
+ is_inst_match = false;
vname = XSTRDUP(MTYPE_TMP, export_name);
- listnode_add(from_bgp->vpn_policy[afi].export_vrf, vname);
-
+ if (!listcount(from_bgp->vpn_policy[afi].export_vrf)) {
+ first_export = true;
+ } else {
+ for (ALL_LIST_ELEMENTS_RO(from_bgp->vpn_policy[afi].export_vrf,
+ node, tmp_name)) {
+ if (strcmp(vname, tmp_name) == 0) {
+ is_inst_match = true;
+ break;
+ }
+ }
+ }
+ if (!is_inst_match)
+ listnode_add(from_bgp->vpn_policy[afi].export_vrf,
+ vname);
/* Update import RT for current VRF using export RT of the VRF we're
* importing from. First though, make sure "import_vrf" has that
* set.
@@ -1702,7 +1732,7 @@ void vrf_unimport_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
const char *export_name, *tmp_name;
vpn_policy_direction_t idir, edir;
char *vname;
- struct ecommunity *ecom;
+ struct ecommunity *ecom = NULL;
struct listnode *node;
int debug;
@@ -1747,10 +1777,12 @@ void vrf_unimport_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
if (to_bgp->vpn_policy[afi].import_vrf->count == 0) {
UNSET_FLAG(to_bgp->af_flags[afi][safi],
BGP_CONFIG_VRF_TO_VRF_IMPORT);
- ecommunity_free(&to_bgp->vpn_policy[afi].rtlist[idir]);
+ if (to_bgp->vpn_policy[afi].rtlist[idir])
+ ecommunity_free(&to_bgp->vpn_policy[afi].rtlist[idir]);
} else {
ecom = from_bgp->vpn_policy[afi].rtlist[edir];
- ecommunity_del_val(to_bgp->vpn_policy[afi].rtlist[idir],
+ if (ecom)
+ ecommunity_del_val(to_bgp->vpn_policy[afi].rtlist[idir],
(struct ecommunity_val *)ecom->val);
vpn_leak_postchange(idir, afi, bgp_get_default(), to_bgp);
}
@@ -1783,8 +1815,11 @@ void vrf_unimport_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
*
* import_vrf and export_vrf must match in having
* the in/out names as appropriate.
+ * export_vrf list could have been cleaned up
+ * as part of no router bgp source instnace.
*/
- assert(vname);
+ if (!vname)
+ return;
listnode_delete(from_bgp->vpn_policy[afi].export_vrf, vname);
XFREE(MTYPE_TMP, vname);
@@ -2471,3 +2506,167 @@ void vpn_leak_postchange_all(void)
bgp);
}
}
+
+/* When a bgp vrf instance is unconfigured, remove its routes
+ * from the VPN table and this vrf could be importing routes from other
+ * bgp vrf instnaces, unimport them.
+ * VRF X and VRF Y are exporting routes to each other.
+ * When VRF X is deleted, unimport its routes from all target vrfs,
+ * also VRF Y should unimport its routes from VRF X table.
+ * This will ensure VPN table is cleaned up appropriately.
+ */
+int bgp_vpn_leak_unimport(struct bgp *from_bgp, struct vty *vty)
+{
+ struct bgp *to_bgp;
+ const char *tmp_name;
+ char *vname;
+ struct listnode *node, *next;
+ safi_t safi = SAFI_UNICAST;
+ afi_t afi;
+ bool is_vrf_leak_bind;
+ int debug;
+
+ if (from_bgp->inst_type != BGP_INSTANCE_TYPE_VRF)
+ return 0;
+
+ debug = (BGP_DEBUG(vpn, VPN_LEAK_TO_VRF) |
+ BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF));
+
+ tmp_name = from_bgp->name ? from_bgp->name : VRF_DEFAULT_NAME;
+
+ for (afi = 0; afi < AFI_MAX; ++afi) {
+ /* vrf leak is for IPv4 and IPv6 Unicast only */
+ if (afi != AFI_IP && afi != AFI_IP6)
+ continue;
+
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, next, to_bgp)) {
+ if (from_bgp == to_bgp)
+ continue;
+
+ /* Unimport and remove source vrf from the
+ * other vrfs import list.
+ */
+ struct vpn_policy *to_vpolicy;
+
+ is_vrf_leak_bind = false;
+ to_vpolicy = &(to_bgp->vpn_policy[afi]);
+ for (ALL_LIST_ELEMENTS_RO(to_vpolicy->import_vrf, node,
+ vname)) {
+ if (strcmp(vname, tmp_name) == 0) {
+ is_vrf_leak_bind = true;
+ break;
+ }
+ }
+ /* skip this bgp instance as there is no leak to this
+ * vrf instance.
+ */
+ if (!is_vrf_leak_bind)
+ continue;
+
+ if (debug)
+ zlog_debug("%s: unimport routes from %s to_bgp %s afi %s import vrfs count %u",
+ __func__, from_bgp->name_pretty,
+ to_bgp->name_pretty, afi2str(afi),
+ to_vpolicy->import_vrf->count);
+
+ vrf_unimport_from_vrf(to_bgp, from_bgp, afi, safi);
+
+ /* readd vrf name as unimport removes import vrf name
+ * from the destination vrf's import list where the
+ * `import vrf` configuration still exist.
+ */
+ vname = XSTRDUP(MTYPE_TMP, tmp_name);
+ listnode_add(to_bgp->vpn_policy[afi].import_vrf,
+ vname);
+ SET_FLAG(to_bgp->af_flags[afi][safi],
+ BGP_CONFIG_VRF_TO_VRF_IMPORT);
+
+ /* If to_bgp exports its routes to the bgp vrf
+ * which is being deleted, un-import the
+ * to_bgp routes from VPN.
+ */
+ for (ALL_LIST_ELEMENTS_RO(to_bgp->vpn_policy[afi]
+ .export_vrf, node,
+ vname)) {
+ if (strcmp(vname, tmp_name) == 0) {
+ vrf_unimport_from_vrf(from_bgp, to_bgp,
+ afi, safi);
+ break;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/* When a router bgp is configured, there could be a bgp vrf
+ * instance importing routes from this newly configured
+ * bgp vrf instance. Export routes from configured
+ * bgp vrf to VPN.
+ * VRF Y has import from bgp vrf x,
+ * when a bgp vrf x instance is created, export its routes
+ * to VRF Y instance.
+ */
+void bgp_vpn_leak_export(struct bgp *from_bgp)
+{
+ afi_t afi;
+ const char *export_name;
+ char *vname;
+ struct listnode *node, *next;
+ struct ecommunity *ecom;
+ vpn_policy_direction_t idir, edir;
+ safi_t safi = SAFI_UNICAST;
+ struct bgp *to_bgp;
+ int debug;
+
+ debug = (BGP_DEBUG(vpn, VPN_LEAK_TO_VRF) |
+ BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF));
+
+ idir = BGP_VPN_POLICY_DIR_FROMVPN;
+ edir = BGP_VPN_POLICY_DIR_TOVPN;
+
+ export_name = (from_bgp->name ? XSTRDUP(MTYPE_TMP, from_bgp->name)
+ : XSTRDUP(MTYPE_TMP, VRF_DEFAULT_NAME));
+
+ for (afi = 0; afi < AFI_MAX; ++afi) {
+ /* vrf leak is for IPv4 and IPv6 Unicast only */
+ if (afi != AFI_IP && afi != AFI_IP6)
+ continue;
+
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, next, to_bgp)) {
+ if (from_bgp == to_bgp)
+ continue;
+
+ /* bgp instance has import list, check to see if newly
+ * configured bgp instance is the list.
+ */
+ struct vpn_policy *to_vpolicy;
+
+ to_vpolicy = &(to_bgp->vpn_policy[afi]);
+ for (ALL_LIST_ELEMENTS_RO(to_vpolicy->import_vrf,
+ node, vname)) {
+ if (strcmp(vname, export_name) != 0)
+ continue;
+
+ if (debug)
+ zlog_debug("%s: found from_bgp %s in to_bgp %s import list, import routes.",
+ __func__,
+ export_name, to_bgp->name_pretty);
+
+ ecom = from_bgp->vpn_policy[afi].rtlist[edir];
+ /* remove import rt, it will be readded
+ * as part of import from vrf.
+ */
+ if (ecom)
+ ecommunity_del_val(
+ to_vpolicy->rtlist[idir],
+ (struct ecommunity_val *)
+ ecom->val);
+ vrf_import_from_vrf(to_bgp, from_bgp,
+ afi, safi);
+ break;
+
+ }
+ }
+ }
+}
diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h
index 2a6c0e1708..3234f7fc9d 100644
--- a/bgpd/bgp_mplsvpn.h
+++ b/bgpd/bgp_mplsvpn.h
@@ -266,5 +266,7 @@ extern vrf_id_t get_first_vrf_for_redirect_with_rt(struct ecommunity *eckey);
extern void vpn_leak_postchange_all(void);
extern void vpn_handle_router_id_update(struct bgp *bgp, bool withdraw,
bool is_config);
+extern int bgp_vpn_leak_unimport(struct bgp *from_bgp, struct vty *vty);
+extern void bgp_vpn_leak_export(struct bgp *from_bgp);
#endif /* _QUAGGA_BGP_MPLSVPN_H */
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index fdfa15b445..74c45ed447 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -163,7 +163,7 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
afi = BGP_ATTR_NEXTHOP_AFI_IP6(pi->attr) ? AFI_IP6
: AFI_IP;
- /* This will return TRUE if the global IPv6 NH is a link local
+ /* This will return true if the global IPv6 NH is a link local
* addr */
if (make_prefix(afi, pi, &p) < 0)
return 1;
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index 5654fe5329..99522a6522 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -63,6 +63,16 @@
#include "bgpd/bgp_keepalives.h"
#include "bgpd/bgp_flowspec.h"
+DEFINE_HOOK(bgp_packet_dump,
+ (struct peer *peer, uint8_t type, bgp_size_t size,
+ struct stream *s),
+ (peer, type, size, s))
+
+DEFINE_HOOK(bgp_packet_send,
+ (struct peer *peer, uint8_t type, bgp_size_t size,
+ struct stream *s),
+ (peer, type, size, s))
+
/**
* Sets marker and type fields for a BGP message.
*
@@ -542,6 +552,7 @@ void bgp_open_send(struct peer *peer)
/* Dump packet if debug option is set. */
/* bgp_packet_dump (s); */
+ hook_call(bgp_packet_send, peer, BGP_MSG_OPEN, stream_get_endp(s), s);
/* Add packet to the peer. */
bgp_packet_add(peer, s);
@@ -681,9 +692,9 @@ void bgp_notify_send_with_data(struct peer *peer, uint8_t code,
* in place because we are sometimes called with a doppelganger peer,
* who tends to have a plethora of fields nulled out.
*/
- if (peer->curr && peer->last_reset_cause_size) {
+ if (peer->curr) {
size_t packetsize = stream_get_endp(peer->curr);
- assert(packetsize <= peer->last_reset_cause_size);
+ assert(packetsize <= sizeof(peer->last_reset_cause));
memcpy(peer->last_reset_cause, peer->curr->data, packetsize);
peer->last_reset_cause_size = packetsize;
}
@@ -1518,6 +1529,8 @@ static int bgp_update_receive(struct peer *peer, bgp_size_t size)
|| BGP_DEBUG(update, UPDATE_PREFIX)) {
ret = bgp_dump_attr(&attr, peer->rcvd_attr_str, BUFSIZ);
+ peer->stat_upd_7606++;
+
if (attr_parse_ret == BGP_ATTR_PARSE_WITHDRAW)
flog_err(
EC_BGP_UPDATE_RCV,
@@ -2240,8 +2253,7 @@ int bgp_process_packet(struct thread *thread)
size = stream_getw(peer->curr);
type = stream_getc(peer->curr);
- /* BGP packet dump function. */
- bgp_dump_packet(peer, type, peer->curr);
+ hook_call(bgp_packet_dump, peer, type, size, peer->curr);
/* adjust size to exclude the marker + length + type */
size -= BGP_HEADER_SIZE;
diff --git a/bgpd/bgp_packet.h b/bgpd/bgp_packet.h
index 06a190585b..e8eacee589 100644
--- a/bgpd/bgp_packet.h
+++ b/bgpd/bgp_packet.h
@@ -21,6 +21,18 @@
#ifndef _QUAGGA_BGP_PACKET_H
#define _QUAGGA_BGP_PACKET_H
+#include "hook.h"
+
+DECLARE_HOOK(bgp_packet_dump,
+ (struct peer *peer, uint8_t type, bgp_size_t size,
+ struct stream *s),
+ (peer, type, size, s))
+
+DECLARE_HOOK(bgp_packet_send,
+ (struct peer *peer, uint8_t type, bgp_size_t size,
+ struct stream *s),
+ (peer, type, size, s))
+
#define BGP_NLRI_LENGTH 1U
#define BGP_TOTAL_ATTR_LEN 2U
#define BGP_UNFEASIBLE_LEN 2U
diff --git a/bgpd/bgp_pbr.c b/bgpd/bgp_pbr.c
index 5eef6ac6cc..2d50d1c9ea 100644
--- a/bgpd/bgp_pbr.c
+++ b/bgpd/bgp_pbr.c
@@ -698,6 +698,7 @@ int bgp_pbr_build_and_validate_entry(struct prefix *p,
int valid_prefix = 0;
afi_t afi = AFI_IP;
struct bgp_pbr_entry_action *api_action_redirect_ip = NULL;
+ bool discard_action_found = false;
/* extract match from flowspec entries */
ret = bgp_flowspec_match_rules_fill((uint8_t *)p->u.prefix_flowspec.ptr,
@@ -805,10 +806,22 @@ int bgp_pbr_build_and_validate_entry(struct prefix *p,
api_action);
if (ret != 0)
continue;
+ if ((api_action->action == ACTION_TRAFFICRATE) &&
+ api->actions[i].u.r.rate == 0)
+ discard_action_found = true;
}
api->action_num++;
}
}
+ /* if ECOMMUNITY_TRAFFIC_RATE = 0 as action
+ * then reduce the API action list to that action
+ */
+ if (api->action_num > 1 && discard_action_found) {
+ api->action_num = 1;
+ memset(&api->actions[0], 0,
+ sizeof(struct bgp_pbr_entry_action));
+ api->actions[0].action = ACTION_TRAFFICRATE;
+ }
/* validate if incoming matc/action is compatible
* with our policy routing engine
@@ -977,6 +990,7 @@ uint32_t bgp_pbr_match_hash_key(const void *arg)
key = jhash(&pbm->tcp_mask_flags, 2, key);
key = jhash(&pbm->dscp_value, 1, key);
key = jhash(&pbm->fragment, 1, key);
+ key = jhash(&pbm->protocol, 1, key);
return jhash_1word(pbm->type, key);
}
@@ -1016,6 +1030,9 @@ bool bgp_pbr_match_hash_equal(const void *arg1, const void *arg2)
if (r1->fragment != r2->fragment)
return false;
+
+ if (r1->protocol != r2->protocol)
+ return false;
return true;
}
@@ -2162,6 +2179,10 @@ static void bgp_pbr_policyroute_add_to_zebra_unit(struct bgp *bgp,
temp.flags |= MATCH_FRAGMENT_INVERSE_SET;
temp.fragment = bpf->fragment->val;
}
+ if (bpf->protocol) {
+ temp.protocol = bpf->protocol;
+ temp.flags |= MATCH_PROTOCOL_SET;
+ }
temp.action = bpa;
bpm = hash_get(bgp->pbr_match_hash, &temp,
bgp_pbr_match_alloc_intern);
diff --git a/bgpd/bgp_pbr.h b/bgpd/bgp_pbr.h
index b368d8892d..393b08da48 100644
--- a/bgpd/bgp_pbr.h
+++ b/bgpd/bgp_pbr.h
@@ -186,6 +186,7 @@ struct bgp_pbr_match {
uint16_t tcp_mask_flags;
uint8_t dscp_value;
uint8_t fragment;
+ uint8_t protocol;
vrf_id_t vrf_id;
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index a3aba447b5..aa02cc3c63 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -107,6 +107,12 @@ static const struct message bgp_pmsi_tnltype_str[] = {
#define VRFID_NONE_STR "-"
+DEFINE_HOOK(bgp_process,
+ (struct bgp *bgp, afi_t afi, safi_t safi,
+ struct bgp_node *bn, struct peer *peer, bool withdraw),
+ (bgp, afi, safi, bn, peer, withdraw))
+
+
struct bgp_node *bgp_afi_node_get(struct bgp_table *table, afi_t afi,
safi_t safi, struct prefix *p,
struct prefix_rd *prd)
@@ -2819,6 +2825,8 @@ void bgp_rib_remove(struct bgp_node *rn, struct bgp_path_info *pi,
if (!CHECK_FLAG(pi->flags, BGP_PATH_HISTORY))
bgp_path_info_delete(rn, pi); /* keep historical info */
+ hook_call(bgp_process, peer->bgp, afi, safi, rn, peer, true);
+
bgp_process(peer->bgp, rn, afi, safi);
}
@@ -3068,6 +3076,7 @@ int bgp_update(struct peer *peer, struct prefix *p, uint32_t addpath_id,
if (aspath_loop_check(attr->aspath, peer->change_local_as)
> aspath_loop_count) {
+ peer->stat_pfx_aspath_loop++;
reason = "as-path contains our own AS;";
goto filtered;
}
@@ -3088,6 +3097,7 @@ int bgp_update(struct peer *peer, struct prefix *p, uint32_t addpath_id,
|| (CHECK_FLAG(bgp->config, BGP_CONFIG_CONFEDERATION)
&& aspath_loop_check(attr->aspath, bgp->confed_id)
> peer->allowas_in[afi][safi])) {
+ peer->stat_pfx_aspath_loop++;
reason = "as-path contains our own AS;";
goto filtered;
}
@@ -3096,18 +3106,21 @@ int bgp_update(struct peer *peer, struct prefix *p, uint32_t addpath_id,
/* Route reflector originator ID check. */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID)
&& IPV4_ADDR_SAME(&bgp->router_id, &attr->originator_id)) {
+ peer->stat_pfx_originator_loop++;
reason = "originator is us;";
goto filtered;
}
/* Route reflector cluster ID check. */
if (bgp_cluster_filter(peer, attr)) {
+ peer->stat_pfx_cluster_loop++;
reason = "reflected from the same cluster;";
goto filtered;
}
/* Apply incoming filter. */
if (bgp_input_filter(peer, p, attr, afi, safi) == FILTER_DENY) {
+ peer->stat_pfx_filter++;
reason = "filter;";
goto filtered;
}
@@ -3138,6 +3151,7 @@ int bgp_update(struct peer *peer, struct prefix *p, uint32_t addpath_id,
* the attr (which takes over the memory references) */
if (bgp_input_modifier(peer, p, &new_attr, afi, safi, NULL)
== RMAP_DENY) {
+ peer->stat_pfx_filter++;
reason = "route-map;";
bgp_attr_flush(&new_attr);
goto filtered;
@@ -3163,12 +3177,14 @@ int bgp_update(struct peer *peer, struct prefix *p, uint32_t addpath_id,
/* next hop check. */
if (!CHECK_FLAG(peer->flags, PEER_FLAG_IS_RFAPI_HD)
&& bgp_update_martian_nexthop(bgp, afi, safi, &new_attr)) {
+ peer->stat_pfx_nh_invalid++;
reason = "martian or self next-hop;";
bgp_attr_flush(&new_attr);
goto filtered;
}
if (bgp_mac_entry_exists(p) || bgp_mac_exist(&attr->rmac)) {
+ peer->stat_pfx_nh_invalid++;
reason = "self mac;";
goto filtered;
}
@@ -3180,6 +3196,8 @@ int bgp_update(struct peer *peer, struct prefix *p, uint32_t addpath_id,
pi->uptime = bgp_clock();
same_attr = attrhash_cmp(pi->attr, attr_new);
+ hook_call(bgp_process, bgp, afi, safi, rn, peer, true);
+
/* Same attribute comes in. */
if (!CHECK_FLAG(pi->flags, BGP_PATH_REMOVED)
&& attrhash_cmp(pi->attr, attr_new)
@@ -3608,6 +3626,8 @@ int bgp_update(struct peer *peer, struct prefix *p, uint32_t addpath_id,
if (safi == SAFI_EVPN)
bgp_evpn_import_route(bgp, afi, safi, p, new);
+ hook_call(bgp_process, bgp, afi, safi, rn, peer, false);
+
/* Process change. */
bgp_process(bgp, rn, afi, safi);
@@ -3639,6 +3659,8 @@ int bgp_update(struct peer *peer, struct prefix *p, uint32_t addpath_id,
/* This BGP update is filtered. Log the reason then update BGP
entry. */
filtered:
+ hook_call(bgp_process, bgp, afi, safi, rn, peer, true);
+
if (bgp_debug_update(peer, p, NULL, 1)) {
if (!peer->rcvd_attr_printed) {
zlog_debug("%s rcvd UPDATE w/ attr: %s", peer->host,
@@ -3727,6 +3749,8 @@ int bgp_withdraw(struct peer *peer, struct prefix *p, uint32_t addpath_id,
if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_SOFT_RECONFIG)
&& peer != bgp->peer_self)
if (!bgp_adj_in_unset(rn, peer, addpath_id)) {
+ peer->stat_pfx_dup_withdraw++;
+
if (bgp_debug_update(peer, p, NULL, 1)) {
bgp_debug_rdpfxpath2str(
afi, safi, prd, p, label, num_labels,
@@ -10992,7 +11016,7 @@ DEFUN (show_ip_bgp_instance_neighbor_prefix_counts,
if (!peer)
return CMD_WARNING;
- return bgp_peer_counts(vty, peer, AFI_IP, SAFI_UNICAST, uj);
+ return bgp_peer_counts(vty, peer, afi, safi, uj);
}
#ifdef KEEP_OLD_VPN_COMMANDS
diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h
index 0f2363dc6f..704cd39710 100644
--- a/bgpd/bgp_route.h
+++ b/bgpd/bgp_route.h
@@ -21,6 +21,9 @@
#ifndef _QUAGGA_BGP_ROUTE_H
#define _QUAGGA_BGP_ROUTE_H
+#include <stdbool.h>
+
+#include "hook.h"
#include "queue.h"
#include "nexthop.h"
#include "bgp_table.h"
@@ -447,6 +450,12 @@ static inline bool is_pi_family_matching(struct bgp_path_info *pi,
return false;
}
+/* called before bgp_process() */
+DECLARE_HOOK(bgp_process,
+ (struct bgp *bgp, afi_t afi, safi_t safi,
+ struct bgp_node *bn, struct peer *peer, bool withdraw),
+ (bgp, afi, safi, bn, peer, withdraw))
+
/* Prototypes. */
extern void bgp_rib_remove(struct bgp_node *rn, struct bgp_path_info *pi,
struct peer *peer, afi_t afi, safi_t safi);
diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c
index a212523b19..24c8cc4424 100644
--- a/bgpd/bgp_routemap.c
+++ b/bgpd/bgp_routemap.c
@@ -60,6 +60,8 @@
#include "bgpd/bgp_evpn_private.h"
#include "bgpd/bgp_evpn_vty.h"
#include "bgpd/bgp_mplsvpn.h"
+#include "bgpd/bgp_pbr.h"
+#include "bgpd/bgp_flowspec_util.h"
#if ENABLE_BGP_VNC
#include "bgpd/rfapi/bgp_rfapi_cfg.h"
@@ -569,24 +571,67 @@ struct route_map_rule_cmd route_match_ip_route_source_cmd = {
"ip route-source", route_match_ip_route_source,
route_match_ip_route_source_compile, route_match_ip_route_source_free};
-/* `match ip address prefix-list PREFIX_LIST' */
+static route_map_result_t route_match_prefix_list_flowspec(afi_t afi,
+ struct prefix_list *plist,
+ const struct prefix *p)
+{
+ int ret;
+ struct bgp_pbr_entry_main api;
+
+ memset(&api, 0, sizeof(api));
+
+ /* extract match from flowspec entries */
+ ret = bgp_flowspec_match_rules_fill(
+ (uint8_t *)p->u.prefix_flowspec.ptr,
+ p->u.prefix_flowspec.prefixlen, &api);
+ if (ret < 0)
+ return RMAP_NOMATCH;
+ if (api.match_bitmask & PREFIX_DST_PRESENT ||
+ api.match_bitmask_iprule & PREFIX_DST_PRESENT) {
+ if (family2afi((&api.dst_prefix)->family) != afi)
+ return RMAP_NOMATCH;
+ return prefix_list_apply(plist, &api.dst_prefix) == PREFIX_DENY
+ ? RMAP_NOMATCH
+ : RMAP_MATCH;
+ } else if (api.match_bitmask & PREFIX_SRC_PRESENT ||
+ api.match_bitmask_iprule & PREFIX_SRC_PRESENT) {
+ if (family2afi((&api.src_prefix)->family) != afi)
+ return RMAP_NOMATCH;
+ return (prefix_list_apply(plist, &api.src_prefix) == PREFIX_DENY
+ ? RMAP_NOMATCH
+ : RMAP_MATCH);
+ }
+ return RMAP_NOMATCH;
+}
+/* `match ip address prefix-list PREFIX_LIST' */
static route_map_result_t
-route_match_ip_address_prefix_list(void *rule, const struct prefix *prefix,
- route_map_object_t type, void *object)
+route_match_address_prefix_list(void *rule, afi_t afi,
+ const struct prefix *prefix,
+ route_map_object_t type, void *object)
{
struct prefix_list *plist;
- if (type == RMAP_BGP && prefix->family == AF_INET) {
- plist = prefix_list_lookup(AFI_IP, (char *)rule);
- if (plist == NULL)
- return RMAP_NOMATCH;
+ if (type != RMAP_BGP)
+ return RMAP_NOMATCH;
- return (prefix_list_apply(plist, prefix) == PREFIX_DENY
- ? RMAP_NOMATCH
- : RMAP_MATCH);
- }
- return RMAP_NOMATCH;
+ plist = prefix_list_lookup(afi, (char *)rule);
+ if (plist == NULL)
+ return RMAP_NOMATCH;
+
+ if (prefix->family == AF_FLOWSPEC)
+ return route_match_prefix_list_flowspec(afi, plist,
+ prefix);
+ return (prefix_list_apply(plist, prefix) == PREFIX_DENY ? RMAP_NOMATCH
+ : RMAP_MATCH);
+}
+
+static route_map_result_t
+route_match_ip_address_prefix_list(void *rule, const struct prefix *prefix,
+ route_map_object_t type, void *object)
+{
+ return route_match_address_prefix_list(rule, AFI_IP, prefix, type,
+ object);
}
static void *route_match_ip_address_prefix_list_compile(const char *arg)
@@ -2540,18 +2585,8 @@ static route_map_result_t
route_match_ipv6_address_prefix_list(void *rule, const struct prefix *prefix,
route_map_object_t type, void *object)
{
- struct prefix_list *plist;
-
- if (type == RMAP_BGP && prefix->family == AF_INET6) {
- plist = prefix_list_lookup(AFI_IP6, (char *)rule);
- if (plist == NULL)
- return RMAP_NOMATCH;
-
- return (prefix_list_apply(plist, prefix) == PREFIX_DENY
- ? RMAP_NOMATCH
- : RMAP_MATCH);
- }
- return RMAP_NOMATCH;
+ return route_match_address_prefix_list(rule, AFI_IP6, prefix, type,
+ object);
}
static void *route_match_ipv6_address_prefix_list_compile(const char *arg)
@@ -2695,11 +2730,11 @@ route_set_ipv6_nexthop_prefer_global(void *rule, const struct prefix *prefix,
&& peer->su_remote
&& sockunion_family(peer->su_remote) == AF_INET6) {
/* Set next hop preference to global */
- path->attr->mp_nexthop_prefer_global = TRUE;
+ path->attr->mp_nexthop_prefer_global = true;
SET_FLAG(path->attr->rmap_change_flags,
BATTR_RMAP_IPV6_PREFER_GLOBAL_CHANGED);
} else {
- path->attr->mp_nexthop_prefer_global = FALSE;
+ path->attr->mp_nexthop_prefer_global = false;
SET_FLAG(path->attr->rmap_change_flags,
BATTR_RMAP_IPV6_PREFER_GLOBAL_CHANGED);
}
diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c
index 82df1905ba..de7b05bdd9 100644
--- a/bgpd/bgp_updgrp.c
+++ b/bgpd/bgp_updgrp.c
@@ -829,7 +829,7 @@ void update_subgroup_inherit_info(struct update_subgroup *to,
*
* Delete a subgroup if it is ready to be deleted.
*
- * Returns TRUE if the subgroup was deleted.
+ * Returns true if the subgroup was deleted.
*/
static int update_subgroup_check_delete(struct update_subgroup *subgrp)
{
@@ -979,7 +979,7 @@ static struct update_subgroup *update_subgroup_find(struct update_group *updgrp,
/*
* update_subgroup_ready_for_merge
*
- * Returns TRUE if this subgroup is in a state that allows it to be
+ * Returns true if this subgroup is in a state that allows it to be
* merged into another subgroup.
*/
static int update_subgroup_ready_for_merge(struct update_subgroup *subgrp)
@@ -1012,7 +1012,7 @@ static int update_subgroup_ready_for_merge(struct update_subgroup *subgrp)
/*
* update_subgrp_can_merge_into
*
- * Returns TRUE if the first subgroup can merge into the second
+ * Returns true if the first subgroup can merge into the second
* subgroup.
*/
static int update_subgroup_can_merge_into(struct update_subgroup *subgrp,
@@ -1092,7 +1092,7 @@ static void update_subgroup_merge(struct update_subgroup *subgrp,
*
* Merge this subgroup into another subgroup if possible.
*
- * Returns TRUE if the subgroup has been merged. The subgroup pointer
+ * Returns true if the subgroup has been merged. The subgroup pointer
* should not be accessed in this case.
*/
int update_subgroup_check_merge(struct update_subgroup *subgrp,
@@ -1141,7 +1141,7 @@ static int update_subgroup_merge_check_thread_cb(struct thread *thread)
* @param force If true, the merge check will be triggered even if the
* subgroup doesn't currently look ready for a merge.
*
- * Returns TRUE if a merge check will be performed shortly.
+ * Returns true if a merge check will be performed shortly.
*/
int update_subgroup_trigger_merge_check(struct update_subgroup *subgrp,
int force)
@@ -1788,7 +1788,7 @@ int update_group_refresh_default_originate_route_map(struct thread *thread)
*
* Refreshes routes out to a peer_af immediately.
*
- * If the combine parameter is TRUE, then this function will try to
+ * If the combine parameter is true, then this function will try to
* gather other peers in the subgroup for which a route announcement
* is pending and efficently announce routes to all of them.
*
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index 9c3bf84213..d7f6b65384 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -925,7 +925,7 @@ DEFUN (no_auto_summary,
/* "router bgp" commands. */
DEFUN_NOSH (router_bgp,
router_bgp_cmd,
- "router bgp [(1-4294967295) [<view|vrf> VIEWVRFNAME]]",
+ "router bgp [(1-4294967295)$instasn [<view|vrf> VIEWVRFNAME]]",
ROUTER_STR
BGP_STR
AS_STR
@@ -999,6 +999,8 @@ DEFUN_NOSH (router_bgp,
if (is_new_bgp && inst_type == BGP_INSTANCE_TYPE_DEFAULT)
vpn_leak_postchange_all();
+ if (inst_type == BGP_INSTANCE_TYPE_VRF)
+ bgp_vpn_leak_export(bgp);
/* Pending: handle when user tries to change a view to vrf n vv.
*/
}
@@ -1013,7 +1015,7 @@ DEFUN_NOSH (router_bgp,
/* "no router bgp" commands. */
DEFUN (no_router_bgp,
no_router_bgp_cmd,
- "no router bgp [(1-4294967295) [<view|vrf> VIEWVRFNAME]]",
+ "no router bgp [(1-4294967295)$instasn [<view|vrf> VIEWVRFNAME]]",
NO_STR
ROUTER_STR
BGP_STR
@@ -1081,6 +1083,9 @@ DEFUN (no_router_bgp,
}
}
+ if (bgp_vpn_leak_unimport(bgp, vty))
+ return CMD_WARNING_CONFIG_FAILED;
+
bgp_delete(bgp);
return CMD_SUCCESS;
@@ -11313,15 +11318,19 @@ static int bgp_show_route_leak_vty(struct vty *vty, const char *name,
json_object_array_add(json_import_vrfs,
json_object_new_string(vname));
+ json_object_object_add(json, "importFromVrfs",
+ json_import_vrfs);
dir = BGP_VPN_POLICY_DIR_FROMVPN;
- ecom_str = ecommunity_ecom2str(
+ if (bgp->vpn_policy[afi].rtlist[dir]) {
+ ecom_str = ecommunity_ecom2str(
bgp->vpn_policy[afi].rtlist[dir],
ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
- json_object_object_add(json, "importFromVrfs",
- json_import_vrfs);
- json_object_string_add(json, "importRts", ecom_str);
-
- XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ json_object_string_add(json, "importRts",
+ ecom_str);
+ XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ } else
+ json_object_string_add(json, "importRts",
+ "none");
}
if (!CHECK_FLAG(bgp->af_flags[afi][safi],
@@ -11345,12 +11354,16 @@ static int bgp_show_route_leak_vty(struct vty *vty, const char *name,
buf1, RD_ADDRSTRLEN));
dir = BGP_VPN_POLICY_DIR_TOVPN;
- ecom_str = ecommunity_ecom2str(
+ if (bgp->vpn_policy[afi].rtlist[dir]) {
+ ecom_str = ecommunity_ecom2str(
bgp->vpn_policy[afi].rtlist[dir],
ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
- json_object_string_add(json, "exportRts", ecom_str);
-
- XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ json_object_string_add(json, "exportRts",
+ ecom_str);
+ XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ } else
+ json_object_string_add(json, "exportRts",
+ "none");
}
if (use_json) {
@@ -11383,12 +11396,16 @@ static int bgp_show_route_leak_vty(struct vty *vty, const char *name,
vty_out(vty, " %s\n", vname);
dir = BGP_VPN_POLICY_DIR_FROMVPN;
- ecom_str = ecommunity_ecom2str(
+ ecom_str = NULL;
+ if (bgp->vpn_policy[afi].rtlist[dir]) {
+ ecom_str = ecommunity_ecom2str(
bgp->vpn_policy[afi].rtlist[dir],
ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
- vty_out(vty, "Import RT(s): %s\n", ecom_str);
+ vty_out(vty, "Import RT(s): %s\n", ecom_str);
- XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ } else
+ vty_out(vty, "Import RT(s):\n");
}
if (!CHECK_FLAG(bgp->af_flags[afi][safi],
@@ -11411,11 +11428,14 @@ static int bgp_show_route_leak_vty(struct vty *vty, const char *name,
buf1, RD_ADDRSTRLEN));
dir = BGP_VPN_POLICY_DIR_TOVPN;
- ecom_str = ecommunity_ecom2str(
+ if (bgp->vpn_policy[afi].rtlist[dir]) {
+ ecom_str = ecommunity_ecom2str(
bgp->vpn_policy[afi].rtlist[dir],
ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
- vty_out(vty, "Export RT: %s\n", ecom_str);
- XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ vty_out(vty, "Export RT: %s\n", ecom_str);
+ XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ } else
+ vty_out(vty, "Import RT(s):\n");
}
}
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index c0f2dfca17..71f7f6d0e3 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -2436,6 +2436,7 @@ static void bgp_encode_pbr_iptable_match(struct stream *s,
stream_putw(s, pbm->tcp_mask_flags);
stream_putc(s, pbm->dscp_value);
stream_putc(s, pbm->fragment);
+ stream_putc(s, pbm->protocol);
}
/* BGP has established connection with Zebra. */
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 3ca209676f..d79a68dcab 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -94,6 +94,10 @@ DEFINE_MTYPE_STATIC(BGPD, BGP_EVPN_INFO, "BGP EVPN instance information");
DEFINE_QOBJ_TYPE(bgp_master)
DEFINE_QOBJ_TYPE(bgp)
DEFINE_QOBJ_TYPE(peer)
+DEFINE_HOOK(bgp_inst_delete, (struct bgp *bgp), (bgp))
+DEFINE_HOOK(bgp_inst_config_write,
+ (struct bgp *bgp, struct vty *vty),
+ (bgp, vty))
/* BGP process wide configuration. */
static struct bgp_master bgp_master;
@@ -248,9 +252,9 @@ static int bgp_router_id_set(struct bgp *bgp, const struct in_addr *id,
/* EVPN uses router id in RD, withdraw them */
if (is_evpn_enabled())
- bgp_evpn_handle_router_id_update(bgp, TRUE);
+ bgp_evpn_handle_router_id_update(bgp, true);
- vpn_handle_router_id_update(bgp, TRUE, is_config);
+ vpn_handle_router_id_update(bgp, true, is_config);
IPV4_ADDR_COPY(&bgp->router_id, id);
@@ -267,9 +271,9 @@ static int bgp_router_id_set(struct bgp *bgp, const struct in_addr *id,
/* EVPN uses router id in RD, update them */
if (is_evpn_enabled())
- bgp_evpn_handle_router_id_update(bgp, FALSE);
+ bgp_evpn_handle_router_id_update(bgp, false);
- vpn_handle_router_id_update(bgp, FALSE, is_config);
+ vpn_handle_router_id_update(bgp, false, is_config);
return 0;
}
@@ -303,7 +307,7 @@ void bgp_router_id_zebra_bump(vrf_id_t vrf_id, const struct prefix *router_id)
if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("RID change : vrf %u, RTR ID %s",
bgp->vrf_id, inet_ntoa(*addr));
- bgp_router_id_set(bgp, addr, FALSE);
+ bgp_router_id_set(bgp, addr, false);
}
}
}
@@ -323,7 +327,7 @@ void bgp_router_id_zebra_bump(vrf_id_t vrf_id, const struct prefix *router_id)
if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("RID change : vrf %u, RTR ID %s",
bgp->vrf_id, inet_ntoa(*addr));
- bgp_router_id_set(bgp, addr, FALSE);
+ bgp_router_id_set(bgp, addr, false);
}
}
@@ -335,7 +339,7 @@ int bgp_router_id_static_set(struct bgp *bgp, struct in_addr id)
{
bgp->router_id_static = id;
bgp_router_id_set(bgp, id.s_addr ? &id : &bgp->router_id_zebra,
- TRUE /* is config */);
+ true /* is config */);
return 0;
}
@@ -3133,7 +3137,7 @@ int bgp_handle_socket(struct bgp *bgp, struct vrf *vrf, vrf_id_t old_vrf_id,
/*
* suppress vrf socket
*/
- if (create == FALSE) {
+ if (create == false) {
bgp_close_vrf_socket(bgp);
return 0;
}
@@ -3189,7 +3193,7 @@ int bgp_get(struct bgp **bgp_val, as_t *as, const char *name,
bgp = bgp_create(as, name, inst_type);
if (bgp_option_check(BGP_OPT_NO_ZEBRA) && name)
bgp->vrf_id = vrf_generate_id();
- bgp_router_id_set(bgp, &bgp->router_id_zebra, TRUE);
+ bgp_router_id_set(bgp, &bgp->router_id_zebra, true);
bgp_address_init(bgp);
bgp_tip_hash_init(bgp);
bgp_scan_init(bgp);
@@ -3286,6 +3290,9 @@ int bgp_delete(struct bgp *bgp)
int i;
assert(bgp);
+
+ hook_call(bgp_inst_delete, bgp);
+
THREAD_OFF(bgp->t_startup);
THREAD_OFF(bgp->t_maxmed_onstartup);
THREAD_OFF(bgp->t_update_delay);
@@ -6922,8 +6929,8 @@ static void bgp_config_write_peer_global(struct vty *vty, struct bgp *bgp,
struct peer *g_peer = NULL;
char buf[SU_ADDRSTRLEN];
char *addr;
- int if_pg_printed = FALSE;
- int if_ras_printed = FALSE;
+ int if_pg_printed = false;
+ int if_ras_printed = false;
/* Skip dynamic neighbors. */
if (peer_dynamic_neighbor(peer))
@@ -6945,16 +6952,16 @@ static void bgp_config_write_peer_global(struct vty *vty, struct bgp *bgp,
if (peer_group_active(peer)) {
vty_out(vty, " peer-group %s", peer->group->name);
- if_pg_printed = TRUE;
+ if_pg_printed = true;
} else if (peer->as_type == AS_SPECIFIED) {
vty_out(vty, " remote-as %u", peer->as);
- if_ras_printed = TRUE;
+ if_ras_printed = true;
} else if (peer->as_type == AS_INTERNAL) {
vty_out(vty, " remote-as internal");
- if_ras_printed = TRUE;
+ if_ras_printed = true;
} else if (peer->as_type == AS_EXTERNAL) {
vty_out(vty, " remote-as external");
- if_ras_printed = TRUE;
+ if_ras_printed = true;
}
vty_out(vty, "\n");
@@ -7797,6 +7804,8 @@ int bgp_config_write(struct vty *vty)
/* EVPN configuration. */
bgp_config_write_family(vty, bgp, AFI_L2VPN, SAFI_EVPN);
+ hook_call(bgp_inst_config_write, bgp, vty);
+
#if ENABLE_BGP_VNC
bgp_rfapi_cfg_write(vty, bgp);
#endif
@@ -7878,8 +7887,31 @@ static void bgp_viewvrf_autocomplete(vector comps, struct cmd_token *token)
}
}
+static void bgp_instasn_autocomplete(vector comps, struct cmd_token *token)
+{
+ struct listnode *next, *next2;
+ struct bgp *bgp, *bgp2;
+ char buf[11];
+
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, next, bgp)) {
+ /* deduplicate */
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, next2, bgp2)) {
+ if (bgp2->as == bgp->as)
+ break;
+ if (bgp2 == bgp)
+ break;
+ }
+ if (bgp2 != bgp)
+ continue;
+
+ snprintf(buf, sizeof(buf), "%u", bgp->as);
+ vector_set(comps, XSTRDUP(MTYPE_COMPLETION, buf));
+ }
+}
+
static const struct cmd_variable_handler bgp_viewvrf_var_handlers[] = {
{.tokenname = "VIEWVRFNAME", .completions = bgp_viewvrf_autocomplete},
+ {.varname = "instasn", .completions = bgp_instasn_autocomplete},
{.completions = NULL},
};
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 4bce73898f..777db0ce22 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -24,6 +24,7 @@
#include "qobj.h"
#include <pthread.h>
+#include "hook.h"
#include "frr_pthread.h"
#include "lib/json.h"
#include "vrf.h"
@@ -572,6 +573,11 @@ struct bgp {
};
DECLARE_QOBJ_TYPE(bgp)
+DECLARE_HOOK(bgp_inst_delete, (struct bgp *bgp), (bgp))
+DECLARE_HOOK(bgp_inst_config_write,
+ (struct bgp *bgp, struct vty *vty),
+ (bgp, vty))
+
#define BGP_ROUTE_ADV_HOLD(bgp) (bgp->main_peers_update_hold)
#define IS_BGP_INST_KNOWN_TO_ZEBRA(bgp) \
@@ -1077,6 +1083,14 @@ struct peer {
_Atomic uint32_t dynamic_cap_in; /* Dynamic Capability input count. */
_Atomic uint32_t dynamic_cap_out; /* Dynamic Capability output count. */
+ uint32_t stat_pfx_filter;
+ uint32_t stat_pfx_aspath_loop;
+ uint32_t stat_pfx_originator_loop;
+ uint32_t stat_pfx_cluster_loop;
+ uint32_t stat_pfx_nh_invalid;
+ uint32_t stat_pfx_dup_withdraw;
+ uint32_t stat_upd_7606; /* RFC7606: treat-as-withdraw */
+
/* BGP state count */
uint32_t established; /* Established */
uint32_t dropped; /* Dropped */
@@ -1153,7 +1167,7 @@ struct peer {
unsigned long weight[AFI_MAX][SAFI_MAX];
/* peer reset cause */
- char last_reset;
+ uint8_t last_reset;
#define PEER_DOWN_RID_CHANGE 1 /* bgp router-id command */
#define PEER_DOWN_REMOTE_AS_CHANGE 2 /* neighbor remote-as command */
#define PEER_DOWN_LOCAL_AS_CHANGE 3 /* neighbor local-as command */
@@ -1180,7 +1194,7 @@ struct peer {
#define PEER_DOWN_BFD_DOWN 24 /* BFD down */
#define PEER_DOWN_IF_DOWN 25 /* Interface down */
#define PEER_DOWN_NBR_ADDR_DEL 26 /* Peer address lost */
- unsigned long last_reset_cause_size;
+ size_t last_reset_cause_size;
uint8_t last_reset_cause[BGP_MAX_PACKET_SIZE];
/* The kind of route-map Flags.*/
diff --git a/configure.ac b/configure.ac
index 45cb85ab8d..961336fbd0 100755
--- a/configure.ac
+++ b/configure.ac
@@ -1553,7 +1553,7 @@ if test "${NHRPD}" != ""; then
AC_MSG_ERROR([trying to build nhrpd, but libcares not found. install c-ares and its -dev headers.])
])
fi
-
+AM_CONDITIONAL([CARES], [test "${NHRPD}" != ""])
dnl ------------------
dnl check Net-SNMP library
diff --git a/debian/frr.install b/debian/frr.install
index ebb87a0b3e..fe34b23d02 100644
--- a/debian/frr.install
+++ b/debian/frr.install
@@ -2,6 +2,7 @@ etc/
usr/bin/vtysh
usr/bin/mtracebis
usr/lib/*/frr/libfrr.*
+usr/lib/*/frr/libfrrcares.*
usr/lib/*/frr/libfrrospfapiclient.*
usr/lib/frr/*.sh
usr/lib/frr/*d
diff --git a/doc/developer/building-frr-for-omnios.rst b/doc/developer/building-frr-for-omnios.rst
index ffc7a078e5..3a69279b0c 100644
--- a/doc/developer/building-frr-for-omnios.rst
+++ b/doc/developer/building-frr-for-omnios.rst
@@ -60,7 +60,7 @@ Add pytest:
::
- pip install pytest
+ pip install "pytest<5"
Install Sphinx:::
diff --git a/doc/developer/topotests-jsontopo.rst b/doc/developer/topotests-jsontopo.rst
new file mode 100644
index 0000000000..65bdcbe9cf
--- /dev/null
+++ b/doc/developer/topotests-jsontopo.rst
@@ -0,0 +1,475 @@
+.. role:: raw-html-m2r(raw)
+ :format: html
+
+*************************************
+FRRouting Topology Tests with Mininet
+*************************************
+
+Overview
+########
+
+On top of current topotests framework following enhancements are done:
+
+
+#.
+ Creating the topology and assigning IPs to router' interfaces dynamically.\ :raw-html-m2r:`<br>`
+ It is achieved by using json file, in which user specify the number of routers,
+ links to each router, interfaces for the routers and protocol configurations for
+ all routers.
+
+#.
+ Creating the configurations dynamically. It is achieved by using
+ /usr/lib/frr/frr-reload.py utility, which takes running configuration and the
+ newly created configuration for any particular router and creates a delta
+ file(diff file) and loads it to router.
+
+
+Logging of test case executions
+###############################
+
+
+#. User can enable logging of testcases execution messages into log file by
+ adding "frrtest_log_dir = /tmp/topotests/" in pytest.ini file
+#. Router's current configuration can be displyed on console or sent to logs by
+ adding "show_router_config = True" in pytest.ini file
+
+Log file name will be displayed when we start execution:
+root@test:~/topotests/example-topojson-test/test_topo_json_single_link# python
+test_topo_json_single_link.py Logs will be sent to logfile:
+/tmp/topotests/test_topo_json_single_link_11:57:01.353797
+
+Note: directory "/tmp/topotests/" is created by topotests by default, making
+use of same directory to save execution logs.
+
+
+Guidelines
+##########
+
+Writing New Tests
+=================
+
+
+This section will guide you in all recommended steps to produce a standard topology test.
+
+This is the recommended test writing routine:
+
+
+* Create a json file , which will have routers and protocol configurations
+* Create topology from json
+* Create configuration from json
+* Write the tests
+* Create a Pull Request
+
+File Hierarchy
+==============
+
+Before starting to write any tests one must know the file hierarchy. The
+repository hierarchy looks like this:
+
+.. code-block::
+
+ $ cd path/to/topotests
+ $ find ./*
+ ...
+ ./example-topojson-test # the basic example test topology-1
+ ./example-topojson-test/test_example_topojson.json # input json file, having
+ topology, interfaces, bgp and other configuration
+ ./example-topojson-test/test_example_topojson.py # test script to write and
+ execute testcases
+ ...
+ ./lib # shared test/topology functions
+ ./lib/topojson.py # library to create topology and configurations dynamically
+ from json file
+ ./lib/common_config.py # library to create protocol's common configurations ex-
+ static_routes, prefix_lists, route_maps etc.
+ ./lib/bgp.py # library to create only bgp configurations
+
+Defining the Topology and initial configuration in JSON file
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+The first step to write a new test is to define the topology and initial
+configuration. User has to define topology and initial configuration in JSON
+file. Here is an example of JSON file.
+
+.. code-block::
+
+ BGP neihghborship with single phy-link, sample JSON file:
+ {
+ "ipv4base": "192.168.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {"ipv4": "192.168.0.0", "v4mask": 30, "ipv6": "fd00::", "v6mask": 64},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32, "ipv6": "2001:DB8:F::", "v6mask": 128},
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "64512",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "64512",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ...
+
+
+ BGP neighboship with loopback interface, sample JSON file:
+ {
+ "ipv4base": "192.168.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {"ipv4": "192.168.0.0", "v4mask": 30, "ipv6": "fd00::", "v6mask": 64},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32, "ipv6": "2001:DB8:F::", "v6mask": 128},
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback",
+ "add_static_route":"yes"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "64512",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "lo": {
+ "source_link": "lo"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "1.0.2.17/32",
+ "next_hop": "192.168.0.1
+ }
+ ]
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback",
+ "add_static_route":"yes"},
+ "r1": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "64512",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "lo": {
+ "source_link": "lo"
+ }
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "lo": {
+ "source_link": "lo"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "192.0.20.1/32",
+ "no_of_ip": 9,
+ "admin_distance": 100,
+ "next_hop": "192.168.0.1",
+ "tag": 4001
+ }
+ ],
+ }
+ ...
+
+ BGP neighborship with Multiple phy-links, sample JSON file:
+ {
+ "ipv4base": "192.168.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {"ipv4": "192.168.0.0", "v4mask": 30, "ipv6": "fd00::", "v6mask": 64},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32, "ipv6": "2001:DB8:F::", "v6mask": 128},
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r2-link2": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "64512",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link2": {"ipv4": "auto", "ipv6": "auto"},
+ "r3-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r3-link2": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "64512",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ...
+
+
+JSON file explained
+"""""""""""""""""""
+
+Mandatory keywords/options in JSON:
+
+
+* "ipv4base" : base ipv4 address to generate ips, ex - 192.168.0.0
+* "ipv4mask" : mask for ipv4 address, ex - 30
+* "ipv6base" : base ipv6 address to generate ips, ex - fd00:
+* "ipv6mask" : mask for ipv6 address, ex - 64
+* "link_ip_start" : physical interface base ipv4 and ipv6 address
+* "lo_prefix" : loopback interface base ipv4 and ipv6 address
+* "routers" : user can add number of routers as per topology, router's name
+ can be any logical name, ex- r1 or a0.
+* "r1" : name of the router
+* "lo" : loopback interface dict, ipv4 and/or ipv6 addresses generated automatically
+* "type" : type of interface, to identify loopback interface
+* "links" : physical interfaces dict, ipv4 and/or ipv6 addresses generated
+ automatically
+* "r2-link1" : it will be used when routers have multiple links. 'r2' is router
+ name, 'link' is any logical name, '1' is to identify link number,
+ router name and link must be seperated by hyphen ("-"), ex- a0-peer1
+
+Optional keywords/options in JSON:
+
+* "bgp" : bgp configuration
+* "local_as" : Local AS number
+* "unicast" : All SAFI configuration
+* "neighbor": All neighbor details
+* "dest_link" : Destination link to which router will connect
+* "router_id" : bgp router-id
+* "source_link" : if user wants to establish bgp neighborship with loopback
+ interface, add "source_link": "lo"
+* "keepalivetimer" : Keep alive timer for BGP neighbor
+* "holddowntimer" : Hold down timer for BGP neighbor
+* "static_routes" : create static routes for routers
+* "redistribute" : redistribute static and/or connected routes
+* "prefix_lists" : create Prefix-lists for routers
+
+Building topology and configurations
+""""""""""""""""""""""""""""""""""""
+
+Topology and initial configuration will be created in setup_module(). Following
+is the sample code:
+
+.. code-block::
+
+ class TemplateTopo(Topo):
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+ def setup_module(mod):
+ tgen = Topogen(TemplateTopo, mod.__name__)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ def teardown_module(mod):
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ stop_topology(tgen)
+
+
+* Note: Topology will be created in setup module but routers will not be
+ started until we load zebra.conf and bgpd.conf to routers. For all routers
+ dirs will be created in /tmp/topotests/<test_folder_name>/<router_name>
+ zebra.conf and bgpd.conf empty files will be created and laoded to routers.
+ All folder and files are deleted in teardown module..
+
+Creating configuration files
+""""""""""""""""""""""""""""
+
+Router's configuration would be saved in config file frr_json.conf. Common
+configurations are like, static routes, prefixlists and route maps etc configs,
+these configs can be used by any other protocols as it is.
+BGP config will be specific to BGP protocol testing.
+
+* JSON file is passed to API build_config_from_json(), which looks for
+ configuration tags in JSON file.
+* If tag is found in JSON, configuration is created as per input and written
+ to file frr_json.conf
+* Once JSON parsing is over, frr_json.conf is loaded onto respective router.
+ Config loading is done using 'vtysh -f <file>'. Initial config at this point
+ is also saved frr_json_initial.conf. This file can be used to reset
+ configuration on router, during the course of execution.
+* Reset of configuration is done using frr "reload.py" utility, which
+ calculates the difference between router's running config and user's config
+ and loads delta file to router. API used - reset_config_on_router()
+
+Writing Tests
+"""""""""""""
+
+Test topologies should always be bootstrapped from the
+example-test/test_example.py, because it contains important boilerplate code
+that can't be avoided, like:
+
+imports: os, sys, pytest, topotest/topogen and mininet topology class
+
+The global variable CWD (Current Working directory): which is most likely going
+to be used to reference the routers configuration file location
+
+Example:
+
+
+* The topology class that inherits from Mininet Topo class
+
+.. code-block::
+
+ class TemplateTopo(Topo):
+ def build(self, *_args, **_opts):
+ tgen = get_topogen(self)
+ # topology build code
+
+
+* pytest setup_module() and teardown_module() to start the topology
+
+.. code-block::
+
+ def setup_module(_m):
+ tgen = Topogen(TemplateTopo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, CWD)
+
+ def teardown_module(_m):
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ stop_topology(tgen, CWD)
+
+
+* __main__ initialization code (to support running the script directly)
+
+.. code-block::
+
+ if **name** == '\ **main**\ ':
+ sys.exit(pytest.main(["-s"]))
+
diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst
index e12bc37256..a0a574a79c 100644
--- a/doc/developer/topotests.rst
+++ b/doc/developer/topotests.rst
@@ -22,7 +22,7 @@ Installing Mininet Infrastructure
apt-get install python-pip
apt-get install iproute
pip install ipaddr
- pip install pytest
+ pip install "pytest<5"
pip install exabgp==3.4.17 (Newer 4.0 version of exabgp is not yet
supported)
useradd -d /var/run/exabgp/ -s /bin/false exabgp
diff --git a/doc/user/basic.rst b/doc/user/basic.rst
index 3d3a75d4b1..5509fd5f0d 100644
--- a/doc/user/basic.rst
+++ b/doc/user/basic.rst
@@ -189,6 +189,29 @@ Basic Config Commands
is used to start the daemon then this command is turned on by default
and cannot be turned off and the [no] form of the command is dissallowed.
+.. index::
+ single: no log-filter WORD [DAEMON]
+ single: log-filter WORD [DAEMON]
+
+.. clicmd:: [no] log-filter WORD [DAEMON]
+
+ This command forces logs to be filtered on a specific string. A log message
+ will only be printed if it matches on one of the filters in the log-filter
+ table. Can be daemon independent.
+
+ .. note::
+
+ Log filters help when you need to turn on debugs that cause significant
+ load on the system (enabling certain debugs can bring FRR to a halt).
+ Log filters prevent this but you should still expect a small performance
+ hit due to filtering each of all those logs.
+
+.. index:: log-filter clear [DAEMON]
+.. clicmd:: log-filter clear [DAEMON]
+
+ This command clears all current filters in the log-filter table. Can be
+ daemon independent.
+
.. index:: service password-encryption
.. clicmd:: service password-encryption
@@ -321,6 +344,11 @@ Terminal Mode Commands
Shows the current configuration of the logging system. This includes the
status of all logging destinations.
+.. index:: show log-filter
+.. clicmd:: show log-filter
+
+ Shows the current log filters applied to each daemon.
+
.. index:: show memory
.. clicmd:: show memory
diff --git a/eigrpd/eigrp_const.h b/eigrpd/eigrp_const.h
index 895a141e4a..3a103fb9f2 100644
--- a/eigrpd/eigrp_const.h
+++ b/eigrpd/eigrp_const.h
@@ -32,8 +32,6 @@
#ifndef _ZEBRA_EIGRP_CONST_H_
#define _ZEBRA_EIGRP_CONST_H_
-#define FALSE 0
-
#define EIGRP_NEIGHBOR_DOWN 0
#define EIGRP_NEIGHBOR_PENDING 1
#define EIGRP_NEIGHBOR_UP 2
diff --git a/fpm/fpm.h b/fpm/fpm.h
index ec1da6657c..f73ab7c66f 100644
--- a/fpm/fpm.h
+++ b/fpm/fpm.h
@@ -245,7 +245,7 @@ static inline fpm_msg_hdr_t *fpm_msg_next(fpm_msg_hdr_t *hdr, size_t *len)
/*
* fpm_msg_hdr_ok
*
- * Returns TRUE if a message header looks well-formed.
+ * Returns true if a message header looks well-formed.
*/
static inline int fpm_msg_hdr_ok(const fpm_msg_hdr_t *hdr)
{
@@ -272,7 +272,7 @@ static inline int fpm_msg_hdr_ok(const fpm_msg_hdr_t *hdr)
/*
* fpm_msg_ok
*
- * Returns TRUE if a message looks well-formed.
+ * Returns true if a message looks well-formed.
*
* @param len The length in bytes from 'hdr' to the end of the buffer.
*/
diff --git a/isisd/isis_misc.c b/isisd/isis_misc.c
index d4c38efaf3..3ad8278e10 100644
--- a/isisd/isis_misc.c
+++ b/isisd/isis_misc.c
@@ -519,7 +519,7 @@ void log_multiline(int priority, const char *prefix, const char *format, ...)
char *p;
va_start(ap, format);
- p = asnprintfrr(MTYPE_TMP, shortbuf, sizeof(shortbuf), format, ap);
+ p = vasnprintfrr(MTYPE_TMP, shortbuf, sizeof(shortbuf), format, ap);
va_end(ap);
if (!p)
@@ -542,7 +542,7 @@ void vty_multiline(struct vty *vty, const char *prefix, const char *format, ...)
char *p;
va_start(ap, format);
- p = asnprintfrr(MTYPE_TMP, shortbuf, sizeof(shortbuf), format, ap);
+ p = vasnprintfrr(MTYPE_TMP, shortbuf, sizeof(shortbuf), format, ap);
va_end(ap);
if (!p)
diff --git a/lib/command.c b/lib/command.c
index f257c7d0f9..c8fbf22721 100644
--- a/lib/command.c
+++ b/lib/command.c
@@ -85,6 +85,7 @@ const char *node_names[] = {
"northbound debug", // NORTHBOUND_DEBUG_NODE,
"vnc debug", // DEBUG_VNC_NODE,
"route-map debug", /* RMAP_DEBUG_NODE */
+ "resolver debug", /* RESOLVER_DEBUG_NODE */
"aaa", // AAA_NODE,
"keychain", // KEYCHAIN_NODE,
"keychain key", // KEYCHAIN_KEY_NODE,
diff --git a/lib/command.h b/lib/command.h
index fd8b56d62e..08d6128af4 100644
--- a/lib/command.h
+++ b/lib/command.h
@@ -94,6 +94,7 @@ enum node_type {
NORTHBOUND_DEBUG_NODE, /* Northbound Debug node. */
DEBUG_VNC_NODE, /* Debug VNC node. */
RMAP_DEBUG_NODE, /* Route-map debug node */
+ RESOLVER_DEBUG_NODE, /* Resolver debug node */
AAA_NODE, /* AAA node. */
KEYCHAIN_NODE, /* Key-chain node. */
KEYCHAIN_KEY_NODE, /* Key-chain key node. */
@@ -396,6 +397,7 @@ struct cmd_node {
#define SR_STR "Segment-Routing specific commands\n"
#define WATCHFRR_STR "watchfrr information\n"
#define ZEBRA_STR "Zebra information\n"
+#define FILTER_LOG_STR "Filter Logs\n"
#define CMD_VNI_RANGE "(1-16777215)"
#define CONF_BACKUP_EXT ".sav"
@@ -410,6 +412,12 @@ struct cmd_node {
#define NEIGHBOR_ADDR_STR2 "Neighbor address\nNeighbor IPv6 address\nInterface name or neighbor tag\n"
#define NEIGHBOR_ADDR_STR3 "Neighbor address\nIPv6 address\nInterface name\n"
+/* Dameons lists */
+#define DAEMONS_STR \
+ "For the zebra daemon\nFor the rip daemon\nFor the ripng daemon\nFor the ospf daemon\nFor the ospfv6 daemon\nFor the bgp daemon\nFor the isis daemon\nFor the pbr daemon\nFor the fabricd daemon\nFor the pim daemon\nFor the static daemon\nFor the sharpd daemon\nFor the vrrpd daemon\n"
+#define DAEMONS_LIST \
+ "<zebra|ripd|ripngd|ospfd|ospf6d|bgpd|isisd|pbrd|fabricd|pimd|staticd|sharpd|vrrpd>"
+
/* Prototypes. */
extern void install_node(struct cmd_node *, int (*)(struct vty *));
extern void install_default(enum node_type);
diff --git a/lib/lib_errors.c b/lib/lib_errors.c
index e0559f332d..6e5088142a 100644
--- a/lib/lib_errors.c
+++ b/lib/lib_errors.c
@@ -357,6 +357,12 @@ static struct log_ref ferr_lib_err[] = {
.suggestion = "Gather log data and open an Issue.",
},
{
+ .code = EC_LIB_RESOLVER,
+ .title = "DNS Resolution",
+ .description = "An error was detected while attempting to resolve a hostname",
+ .suggestion = "Ensure that DNS is working properly and the hostname is configured in dns. If you are still seeing this error, open an issue"
+ },
+ {
.code = END_FERR,
}
};
diff --git a/lib/lib_errors.h b/lib/lib_errors.h
index 996a16ba95..4730b6aa33 100644
--- a/lib/lib_errors.h
+++ b/lib/lib_errors.h
@@ -84,6 +84,7 @@ enum lib_log_refs {
EC_LIB_GRPC_INIT,
EC_LIB_ID_CONSISTENCY,
EC_LIB_ID_EXHAUST,
+ EC_LIB_RESOLVER,
};
extern void lib_error_init(void);
diff --git a/lib/libfrr.c b/lib/libfrr.c
index ed784fc73a..3294a61295 100644
--- a/lib/libfrr.c
+++ b/lib/libfrr.c
@@ -31,6 +31,7 @@
#include "command.h"
#include "version.h"
#include "memory_vty.h"
+#include "log_vty.h"
#include "zclient.h"
#include "log_int.h"
#include "module.h"
@@ -677,6 +678,7 @@ struct thread_master *frr_init(void)
vty_init(master, di->log_always);
memory_init();
+ log_filter_cmd_init();
log_ref_init();
lib_error_init();
diff --git a/lib/linklist.h b/lib/linklist.h
index d23d425d62..ef914b965f 100644
--- a/lib/linklist.h
+++ b/lib/linklist.h
@@ -316,7 +316,7 @@ extern void list_add_list(struct list *list, struct list *add);
* list
* list to operate on
* cond
- * function pointer which takes node data as input and return TRUE or FALSE
+ * function pointer which takes node data as input and return true or false
*/
extern void list_filter_out_nodes(struct list *list, bool (*cond)(void *data));
diff --git a/lib/log.c b/lib/log.c
index 5ce3bd7020..732b238b1e 100644
--- a/lib/log.c
+++ b/lib/log.c
@@ -65,6 +65,110 @@ const char *zlog_priority[] = {
"notifications", "informational", "debugging", NULL,
};
+static char zlog_filters[ZLOG_FILTERS_MAX][ZLOG_FILTER_LENGTH_MAX + 1];
+static uint8_t zlog_filter_count;
+
+/*
+ * look for a match on the filter in the current filters, loglock must be held
+ */
+static int zlog_filter_lookup(const char *lookup)
+{
+ for (int i = 0; i < zlog_filter_count; i++) {
+ if (strncmp(lookup, zlog_filters[i], sizeof(zlog_filters[0]))
+ == 0)
+ return i;
+ }
+ return -1;
+}
+
+void zlog_filter_clear(void)
+{
+ pthread_mutex_lock(&loglock);
+ zlog_filter_count = 0;
+ pthread_mutex_unlock(&loglock);
+}
+
+int zlog_filter_add(const char *filter)
+{
+ pthread_mutex_lock(&loglock);
+
+ int ret = 0;
+
+ if (zlog_filter_count >= ZLOG_FILTERS_MAX) {
+ ret = 1;
+ goto done;
+ }
+
+ if (zlog_filter_lookup(filter) != -1) {
+ /* Filter already present */
+ ret = -1;
+ goto done;
+ }
+
+ strlcpy(zlog_filters[zlog_filter_count], filter,
+ sizeof(zlog_filters[0]));
+
+ if (zlog_filters[zlog_filter_count][0] == '\0') {
+ /* Filter was either empty or didn't get copied correctly */
+ ret = -1;
+ goto done;
+ }
+
+ zlog_filter_count++;
+
+done:
+ pthread_mutex_unlock(&loglock);
+ return ret;
+}
+
+int zlog_filter_del(const char *filter)
+{
+ pthread_mutex_lock(&loglock);
+
+ int found_idx = zlog_filter_lookup(filter);
+ int last_idx = zlog_filter_count - 1;
+ int ret = 0;
+
+ if (found_idx == -1) {
+ /* Didn't find the filter to delete */
+ ret = -1;
+ goto done;
+ }
+
+ /* Adjust the filter array */
+ memmove(zlog_filters[found_idx], zlog_filters[found_idx + 1],
+ (last_idx - found_idx) * sizeof(zlog_filters[0]));
+
+ zlog_filter_count--;
+
+done:
+ pthread_mutex_unlock(&loglock);
+ return ret;
+}
+
+/* Dump all filters to buffer, delimited by new line */
+int zlog_filter_dump(char *buf, size_t max_size)
+{
+ pthread_mutex_lock(&loglock);
+
+ int ret = 0;
+ int len = 0;
+
+ for (int i = 0; i < zlog_filter_count; i++) {
+ ret = snprintf(buf + len, max_size - len, " %s\n",
+ zlog_filters[i]);
+ len += ret;
+ if ((ret < 0) || ((size_t)len >= max_size)) {
+ len = -1;
+ goto done;
+ }
+ }
+
+done:
+ pthread_mutex_unlock(&loglock);
+ return len;
+}
+
/*
* write_wrapper
*
@@ -178,17 +282,32 @@ size_t quagga_timestamp(int timestamp_precision, char *buf, size_t buflen)
return 0;
}
-/* Utility routine for current time printing. */
-static void time_print(FILE *fp, struct timestamp_control *ctl)
+static inline void timestamp_control_render(struct timestamp_control *ctl)
{
if (!ctl->already_rendered) {
ctl->len = quagga_timestamp(ctl->precision, ctl->buf,
sizeof(ctl->buf));
ctl->already_rendered = 1;
}
+}
+
+/* Utility routine for current time printing. */
+static void time_print(FILE *fp, struct timestamp_control *ctl)
+{
+ timestamp_control_render(ctl);
fprintf(fp, "%s ", ctl->buf);
}
+static int time_print_buf(char *buf, int len, int max_size,
+ struct timestamp_control *ctl)
+{
+ timestamp_control_render(ctl);
+
+ if (ctl->len + 1 >= (unsigned long)max_size)
+ return -1;
+
+ return snprintf(buf + len, max_size - len, "%s ", ctl->buf);
+}
static void vzlog_file(struct zlog *zl, struct timestamp_control *tsctl,
const char *proto_str, int record_priority, int priority,
@@ -202,42 +321,92 @@ static void vzlog_file(struct zlog *zl, struct timestamp_control *tsctl,
fflush(fp);
}
+/* Search a buf for the filter strings, loglock must be held */
+static int search_buf(const char *buf)
+{
+ char *found = NULL;
+
+ for (int i = 0; i < zlog_filter_count; i++) {
+ found = strstr(buf, zlog_filters[i]);
+ if (found != NULL)
+ return 0;
+ }
+
+ return -1;
+}
+
+/* Filter out a log */
+static int vzlog_filter(struct zlog *zl, struct timestamp_control *tsctl,
+ const char *proto_str, int priority, const char *msg)
+{
+ int len = 0;
+ int ret = 0;
+ char buf[1024] = "";
+
+ ret = time_print_buf(buf, len, sizeof(buf), tsctl);
+
+ len += ret;
+ if ((ret < 0) || ((size_t)len >= sizeof(buf)))
+ goto search;
+
+ if (zl && zl->record_priority)
+ snprintf(buf + len, sizeof(buf) - len, "%s: %s: %s",
+ zlog_priority[priority], proto_str, msg);
+ else
+ snprintf(buf + len, sizeof(buf) - len, "%s: %s", proto_str,
+ msg);
+
+search:
+ return search_buf(buf);
+}
+
/* va_list version of zlog. */
void vzlog(int priority, const char *format, va_list args)
{
pthread_mutex_lock(&loglock);
- char proto_str[32];
+ char proto_str[32] = "";
int original_errno = errno;
- struct timestamp_control tsctl;
+ struct timestamp_control tsctl = {};
tsctl.already_rendered = 0;
struct zlog *zl = zlog_default;
char buf[256], *msg;
- /* call external hook */
- hook_call(zebra_ext_log, priority, format, args);
+ if (zl == NULL) {
+ tsctl.precision = 0;
+ } else {
+ tsctl.precision = zl->timestamp_precision;
+ if (zl->instance)
+ sprintf(proto_str, "%s[%d]: ", zl->protoname,
+ zl->instance);
+ else
+ sprintf(proto_str, "%s: ", zl->protoname);
+ }
msg = vasnprintfrr(MTYPE_TMP, buf, sizeof(buf), format, args);
+ /* If it doesn't match on a filter, do nothing with the debug log */
+ if ((priority == LOG_DEBUG) && zlog_filter_count
+ && vzlog_filter(zl, &tsctl, proto_str, priority, msg)) {
+ pthread_mutex_unlock(&loglock);
+ goto out;
+ }
+
+ /* call external hook */
+ hook_call(zebra_ext_log, priority, format, args);
+
/* When zlog_default is also NULL, use stderr for logging. */
if (zl == NULL) {
- tsctl.precision = 0;
time_print(stderr, &tsctl);
fprintf(stderr, "%s: %s\n", "unknown", msg);
fflush(stderr);
goto out;
}
- tsctl.precision = zl->timestamp_precision;
/* Syslog output */
if (priority <= zl->maxlvl[ZLOG_DEST_SYSLOG])
syslog(priority | zlog_default->facility, "%s", msg);
- if (zl->instance)
- sprintf(proto_str, "%s[%d]: ", zl->protoname, zl->instance);
- else
- sprintf(proto_str, "%s: ", zl->protoname);
-
/* File output. */
if ((priority <= zl->maxlvl[ZLOG_DEST_FILE]) && zl->fp)
vzlog_file(zl, &tsctl, proto_str, zl->record_priority, priority,
diff --git a/lib/log.h b/lib/log.h
index c5ae6fe32f..501da88a54 100644
--- a/lib/log.h
+++ b/lib/log.h
@@ -115,6 +115,15 @@ extern int zlog_reset_file(void);
/* Rotate log. */
extern int zlog_rotate(void);
+#define ZLOG_FILTERS_MAX 100 /* Max # of filters at once */
+#define ZLOG_FILTER_LENGTH_MAX 80 /* 80 character filter limit */
+
+/* Add/Del/Dump log filters */
+extern void zlog_filter_clear(void);
+extern int zlog_filter_add(const char *filter);
+extern int zlog_filter_del(const char *filter);
+extern int zlog_filter_dump(char *buf, size_t max_size);
+
const char *lookup_msg(const struct message *mz, int kz, const char *nf);
/* Safe version of strerror -- never returns NULL. */
diff --git a/lib/log_vty.c b/lib/log_vty.c
new file mode 100644
index 0000000000..68d598f565
--- /dev/null
+++ b/lib/log_vty.c
@@ -0,0 +1,97 @@
+/*
+ * Logging - VTY code
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ * Stephen Worley
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#include "lib/log_vty.h"
+#include "command.h"
+#include "lib/vty.h"
+#include "lib/log.h"
+#ifndef VTYSH_EXTRACT_PL
+#include "lib/log_vty_clippy.c"
+#endif
+
+DEFPY (log_filter,
+ log_filter_cmd,
+ "[no] log-filter WORD$filter",
+ NO_STR
+ FILTER_LOG_STR
+ "String to filter by\n")
+{
+ int ret = 0;
+
+ if (no)
+ ret = zlog_filter_del(filter);
+ else
+ ret = zlog_filter_add(filter);
+
+ if (ret == 1) {
+ vty_out(vty, "%% filter table full\n");
+ return CMD_WARNING;
+ } else if (ret != 0) {
+ vty_out(vty, "%% failed to %s log filter\n",
+ (no ? "remove" : "apply"));
+ return CMD_WARNING;
+ }
+
+ vty_out(vty, " %s\n", filter);
+ return CMD_SUCCESS;
+}
+
+/* Clear all log filters */
+DEFPY (log_filter_clear,
+ log_filter_clear_cmd,
+ "clear log-filter",
+ CLEAR_STR
+ FILTER_LOG_STR)
+{
+ zlog_filter_clear();
+ return CMD_SUCCESS;
+}
+
+/* Show log filter */
+DEFPY (show_log_filter,
+ show_log_filter_cmd,
+ "show log-filter",
+ SHOW_STR
+ FILTER_LOG_STR)
+{
+ char log_filters[ZLOG_FILTERS_MAX * (ZLOG_FILTER_LENGTH_MAX + 3)] = "";
+ int len = 0;
+
+ len = zlog_filter_dump(log_filters, sizeof(log_filters));
+
+ if (len == -1) {
+ vty_out(vty, "%% failed to get filters\n");
+ return CMD_WARNING;
+ }
+
+ if (len != 0)
+ vty_out(vty, "%s", log_filters);
+
+ return CMD_SUCCESS;
+}
+
+void log_filter_cmd_init(void)
+{
+ install_element(VIEW_NODE, &show_log_filter_cmd);
+ install_element(CONFIG_NODE, &log_filter_cmd);
+ install_element(CONFIG_NODE, &log_filter_clear_cmd);
+}
diff --git a/lib/log_vty.h b/lib/log_vty.h
new file mode 100644
index 0000000000..fa5627e4bd
--- /dev/null
+++ b/lib/log_vty.h
@@ -0,0 +1,24 @@
+/*
+ * Logging - VTY library
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ * Stephen Worley
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __LOG_VTY_H__
+#define __LOG_VTY_H__
+extern void log_filter_cmd_init(void);
+#endif /* __LOG_VTY_H__ */
diff --git a/lib/memory_vty.c b/lib/memory_vty.c
index 5fd9c3b900..1adc0d7b74 100644
--- a/lib/memory_vty.c
+++ b/lib/memory_vty.c
@@ -171,6 +171,9 @@ DEFUN (show_modules,
}
plug = plug->next;
}
+
+ vty_out(vty, "pid: %u\n", (uint32_t)(getpid()));
+
return CMD_SUCCESS;
}
diff --git a/lib/nexthop.c b/lib/nexthop.c
index 4cea14955a..0984c1a168 100644
--- a/lib/nexthop.c
+++ b/lib/nexthop.c
@@ -425,6 +425,32 @@ uint32_t nexthop_hash(const struct nexthop *nexthop)
return key;
}
+void nexthop_copy(struct nexthop *copy, const struct nexthop *nexthop,
+ struct nexthop *rparent)
+{
+ copy->vrf_id = nexthop->vrf_id;
+ copy->ifindex = nexthop->ifindex;
+ copy->type = nexthop->type;
+ copy->flags = nexthop->flags;
+ memcpy(&copy->gate, &nexthop->gate, sizeof(nexthop->gate));
+ memcpy(&copy->src, &nexthop->src, sizeof(nexthop->src));
+ memcpy(&copy->rmap_src, &nexthop->rmap_src, sizeof(nexthop->rmap_src));
+ copy->rparent = rparent;
+ if (nexthop->nh_label)
+ nexthop_add_labels(copy, nexthop->nh_label_type,
+ nexthop->nh_label->num_labels,
+ &nexthop->nh_label->label[0]);
+}
+
+struct nexthop *nexthop_dup(const struct nexthop *nexthop,
+ struct nexthop *rparent)
+{
+ struct nexthop *new = nexthop_new();
+
+ nexthop_copy(new, nexthop, rparent);
+ return new;
+}
+
/*
* nexthop printing variants:
* %pNHvv
diff --git a/lib/nexthop.h b/lib/nexthop.h
index 5b6c12d4ef..20401cd581 100644
--- a/lib/nexthop.h
+++ b/lib/nexthop.h
@@ -152,6 +152,12 @@ extern const char *nexthop2str(const struct nexthop *nexthop,
char *str, int size);
extern struct nexthop *nexthop_next(struct nexthop *nexthop);
extern unsigned int nexthop_level(struct nexthop *nexthop);
+/* Copies to an already allocated nexthop struct */
+extern void nexthop_copy(struct nexthop *copy, const struct nexthop *nexthop,
+ struct nexthop *rparent);
+/* Duplicates a nexthop and returns the newly allocated nexthop */
+extern struct nexthop *nexthop_dup(const struct nexthop *nexthop,
+ struct nexthop *rparent);
#ifdef __cplusplus
}
diff --git a/lib/nexthop_group.c b/lib/nexthop_group.c
index 10f610db37..5602018b30 100644
--- a/lib/nexthop_group.c
+++ b/lib/nexthop_group.c
@@ -23,6 +23,7 @@
#include <sockunion.h>
#include <nexthop.h>
#include <nexthop_group.h>
+#include <nexthop_group_private.h>
#include <vty.h>
#include <command.h>
#include <jhash.h>
@@ -100,13 +101,19 @@ struct nexthop_group *nexthop_group_new(void)
return XCALLOC(MTYPE_NEXTHOP_GROUP, sizeof(struct nexthop_group));
}
+void nexthop_group_copy(struct nexthop_group *to, struct nexthop_group *from)
+{
+ /* Copy everything, including recursive info */
+ copy_nexthops(&to->nexthop, from->nexthop, NULL);
+}
+
void nexthop_group_delete(struct nexthop_group **nhg)
{
XFREE(MTYPE_NEXTHOP_GROUP, *nhg);
}
/* Add nexthop to the end of a nexthop list. */
-void nexthop_add(struct nexthop **target, struct nexthop *nexthop)
+void _nexthop_add(struct nexthop **target, struct nexthop *nexthop)
{
struct nexthop *last;
@@ -119,8 +126,36 @@ void nexthop_add(struct nexthop **target, struct nexthop *nexthop)
nexthop->prev = last;
}
+void _nexthop_group_add_sorted(struct nexthop_group *nhg,
+ struct nexthop *nexthop)
+{
+ struct nexthop *position, *prev;
+
+ for (position = nhg->nexthop, prev = NULL; position;
+ prev = position, position = position->next) {
+ if (nexthop_cmp(position, nexthop) > 0) {
+ nexthop->next = position;
+ nexthop->prev = prev;
+
+ if (nexthop->prev)
+ nexthop->prev->next = nexthop;
+ else
+ nhg->nexthop = nexthop;
+
+ position->prev = nexthop;
+ return;
+ }
+ }
+
+ nexthop->prev = prev;
+ if (prev)
+ prev->next = nexthop;
+ else
+ nhg->nexthop = nexthop;
+}
+
/* Delete nexthop from a nexthop list. */
-void nexthop_del(struct nexthop_group *nhg, struct nexthop *nh)
+void _nexthop_del(struct nexthop_group *nhg, struct nexthop *nh)
{
struct nexthop *nexthop;
@@ -150,21 +185,8 @@ void copy_nexthops(struct nexthop **tnh, const struct nexthop *nh,
const struct nexthop *nh1;
for (nh1 = nh; nh1; nh1 = nh1->next) {
- nexthop = nexthop_new();
- nexthop->vrf_id = nh1->vrf_id;
- nexthop->ifindex = nh1->ifindex;
- nexthop->type = nh1->type;
- nexthop->flags = nh1->flags;
- memcpy(&nexthop->gate, &nh1->gate, sizeof(nh1->gate));
- memcpy(&nexthop->src, &nh1->src, sizeof(nh1->src));
- memcpy(&nexthop->rmap_src, &nh1->rmap_src,
- sizeof(nh1->rmap_src));
- nexthop->rparent = rparent;
- if (nh1->nh_label)
- nexthop_add_labels(nexthop, nh1->nh_label_type,
- nh1->nh_label->num_labels,
- &nh1->nh_label->label[0]);
- nexthop_add(tnh, nexthop);
+ nexthop = nexthop_dup(nh1, rparent);
+ _nexthop_add(tnh, nexthop);
if (CHECK_FLAG(nh1->flags, NEXTHOP_FLAG_RECURSIVE))
copy_nexthops(&nexthop->resolved, nh1->resolved,
@@ -195,7 +217,7 @@ static void nhgc_delete_nexthops(struct nexthop_group_cmd *nhgc)
while (nexthop) {
struct nexthop *next = nexthop_next(nexthop);
- nexthop_del(&nhgc->nhg, nexthop);
+ _nexthop_del(&nhgc->nhg, nexthop);
if (nhg_hooks.del_nexthop)
nhg_hooks.del_nexthop(nhgc, nexthop);
@@ -459,7 +481,7 @@ DEFPY(ecmp_nexthops, ecmp_nexthops_cmd,
if (no) {
nexthop_group_unsave_nhop(nhgc, name, addr, intf);
if (nh) {
- nexthop_del(&nhgc->nhg, nh);
+ _nexthop_del(&nhgc->nhg, nh);
if (nhg_hooks.del_nexthop)
nhg_hooks.del_nexthop(nhgc, nh);
@@ -472,7 +494,7 @@ DEFPY(ecmp_nexthops, ecmp_nexthops_cmd,
nh = nexthop_new();
memcpy(nh, &nhop, sizeof(nhop));
- nexthop_add(&nhgc->nhg.nexthop, nh);
+ _nexthop_add(&nhgc->nhg.nexthop, nh);
}
nexthop_group_save_nhop(nhgc, name, addr, intf);
@@ -596,7 +618,7 @@ void nexthop_group_enable_vrf(struct vrf *vrf)
nh = nexthop_new();
memcpy(nh, &nhop, sizeof(nhop));
- nexthop_add(&nhgc->nhg.nexthop, nh);
+ _nexthop_add(&nhgc->nhg.nexthop, nh);
if (nhg_hooks.add_nexthop)
nhg_hooks.add_nexthop(nhgc, nh);
@@ -629,7 +651,7 @@ void nexthop_group_disable_vrf(struct vrf *vrf)
if (nh->vrf_id != vrf->vrf_id)
continue;
- nexthop_del(&nhgc->nhg, nh);
+ _nexthop_del(&nhgc->nhg, nh);
if (nhg_hooks.del_nexthop)
nhg_hooks.del_nexthop(nhgc, nh);
@@ -679,7 +701,7 @@ void nexthop_group_interface_state_change(struct interface *ifp,
nh = nexthop_new();
memcpy(nh, &nhop, sizeof(nhop));
- nexthop_add(&nhgc->nhg.nexthop, nh);
+ _nexthop_add(&nhgc->nhg.nexthop, nh);
if (nhg_hooks.add_nexthop)
nhg_hooks.add_nexthop(nhgc, nh);
@@ -703,7 +725,7 @@ void nexthop_group_interface_state_change(struct interface *ifp,
if (oldifindex != nh->ifindex)
continue;
- nexthop_del(&nhgc->nhg, nh);
+ _nexthop_del(&nhgc->nhg, nh);
if (nhg_hooks.del_nexthop)
nhg_hooks.del_nexthop(nhgc, nh);
diff --git a/lib/nexthop_group.h b/lib/nexthop_group.h
index 5adf2db937..4f4d40eb33 100644
--- a/lib/nexthop_group.h
+++ b/lib/nexthop_group.h
@@ -42,8 +42,8 @@ struct nexthop_group {
struct nexthop_group *nexthop_group_new(void);
void nexthop_group_delete(struct nexthop_group **nhg);
-void nexthop_add(struct nexthop **target, struct nexthop *nexthop);
-void nexthop_del(struct nexthop_group *nhg, struct nexthop *nexthop);
+void nexthop_group_copy(struct nexthop_group *to,
+ struct nexthop_group *from);
void copy_nexthops(struct nexthop **tnh, const struct nexthop *nh,
struct nexthop *rparent);
diff --git a/lib/nexthop_group_private.h b/lib/nexthop_group_private.h
new file mode 100644
index 0000000000..cdd0df0ab3
--- /dev/null
+++ b/lib/nexthop_group_private.h
@@ -0,0 +1,45 @@
+/*
+ * Nexthop Group Private Functions.
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ * Stephen Worley
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * These functions should only be used internally for nexthop groups
+ * and in certain special cases. Please use `lib/nexthop_group.h` for
+ * any general nexthop_group api needs.
+ */
+
+#ifndef __NEXTHOP_GROUP_PRIVATE__
+#define __NEXTHOP_GROUP_PRIVATE__
+
+#include <nexthop_group.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void _nexthop_add(struct nexthop **target, struct nexthop *nexthop);
+void _nexthop_del(struct nexthop_group *nhg, struct nexthop *nexthop);
+void _nexthop_group_add_sorted(struct nexthop_group *nhg,
+ struct nexthop *nexthop);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __NEXTHOP_GROUP_PRIVATE__ */
diff --git a/lib/northbound.c b/lib/northbound.c
index 4478abf5a7..48b450e969 100644
--- a/lib/northbound.c
+++ b/lib/northbound.c
@@ -504,7 +504,9 @@ int nb_candidate_edit(struct nb_config *candidate,
*/
if (dnode) {
lyd_schema_sort(dnode, 0);
- lyd_validate(&dnode, LYD_OPT_CONFIG, ly_native_ctx);
+ lyd_validate(&dnode,
+ LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
+ ly_native_ctx);
}
break;
case NB_OP_DESTROY:
@@ -570,7 +572,8 @@ int nb_candidate_update(struct nb_config *candidate)
*/
static int nb_candidate_validate_yang(struct nb_config *candidate)
{
- if (lyd_validate(&candidate->dnode, LYD_OPT_STRICT | LYD_OPT_CONFIG,
+ if (lyd_validate(&candidate->dnode,
+ LYD_OPT_STRICT | LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
ly_native_ctx)
!= 0)
return NB_ERR_VALIDATION;
diff --git a/lib/pbr.h b/lib/pbr.h
index 1425e679c5..ecd50447e5 100644
--- a/lib/pbr.h
+++ b/lib/pbr.h
@@ -121,6 +121,7 @@ struct pbr_rule {
#define MATCH_PKT_LEN_INVERSE_SET (1 << 8)
#define MATCH_FRAGMENT_INVERSE_SET (1 << 9)
#define MATCH_ICMP_SET (1 << 10)
+#define MATCH_PROTOCOL_SET (1 << 11)
extern int zapi_pbr_rule_encode(uint8_t cmd, struct stream *s,
struct pbr_rule *zrule);
diff --git a/nhrpd/resolver.c b/lib/resolver.c
index 64b16e7ee3..fb8aeed92f 100644
--- a/nhrpd/resolver.c
+++ b/lib/resolver.c
@@ -17,17 +17,18 @@
#include "vector.h"
#include "thread.h"
#include "lib_errors.h"
-
-#include "nhrpd.h"
-#include "nhrp_errors.h"
+#include "resolver.h"
+#include "command.h"
struct resolver_state {
ares_channel channel;
+ struct thread_master *master;
struct thread *timeout;
vector read_threads, write_threads;
};
static struct resolver_state state;
+static bool resolver_debug;
#define THREAD_RUNNING ((struct thread *)-1)
@@ -54,7 +55,8 @@ static int resolver_cb_socket_readable(struct thread *t)
ares_process_fd(r->channel, fd, ARES_SOCKET_BAD);
if (vector_lookup(r->read_threads, fd) == THREAD_RUNNING) {
t = NULL;
- thread_add_read(master, resolver_cb_socket_readable, r, fd, &t);
+ thread_add_read(r->master, resolver_cb_socket_readable, r, fd,
+ &t);
vector_set_index(r->read_threads, fd, t);
}
resolver_update_timeouts(r);
@@ -71,7 +73,7 @@ static int resolver_cb_socket_writable(struct thread *t)
ares_process_fd(r->channel, ARES_SOCKET_BAD, fd);
if (vector_lookup(r->write_threads, fd) == THREAD_RUNNING) {
t = NULL;
- thread_add_write(master, resolver_cb_socket_writable, r, fd,
+ thread_add_write(r->master, resolver_cb_socket_writable, r, fd,
&t);
vector_set_index(r->write_threads, fd, t);
}
@@ -91,8 +93,8 @@ static void resolver_update_timeouts(struct resolver_state *r)
tv = ares_timeout(r->channel, NULL, &tvbuf);
if (tv) {
unsigned int timeoutms = tv->tv_sec * 1000 + tv->tv_usec / 1000;
- thread_add_timer_msec(master, resolver_cb_timeout, r, timeoutms,
- &r->timeout);
+ thread_add_timer_msec(r->master, resolver_cb_timeout, r,
+ timeoutms, &r->timeout);
}
}
@@ -105,8 +107,8 @@ static void ares_socket_cb(void *data, ares_socket_t fd, int readable,
if (readable) {
t = vector_lookup_ensure(r->read_threads, fd);
if (!t) {
- thread_add_read(master, resolver_cb_socket_readable, r,
- fd, &t);
+ thread_add_read(r->master, resolver_cb_socket_readable,
+ r, fd, &t);
vector_set_index(r->read_threads, fd, t);
}
} else {
@@ -122,8 +124,8 @@ static void ares_socket_cb(void *data, ares_socket_t fd, int readable,
if (writable) {
t = vector_lookup_ensure(r->write_threads, fd);
if (!t) {
- thread_add_read(master, resolver_cb_socket_writable, r,
- fd, &t);
+ thread_add_read(r->master, resolver_cb_socket_writable,
+ r, fd, &t);
vector_set_index(r->write_threads, fd, t);
}
} else {
@@ -137,37 +139,23 @@ static void ares_socket_cb(void *data, ares_socket_t fd, int readable,
}
}
-void resolver_init(void)
-{
- struct ares_options ares_opts;
-
- state.read_threads = vector_init(1);
- state.write_threads = vector_init(1);
-
- ares_opts = (struct ares_options){
- .sock_state_cb = &ares_socket_cb,
- .sock_state_cb_data = &state,
- .timeout = 2,
- .tries = 3,
- };
-
- ares_init_options(&state.channel, &ares_opts,
- ARES_OPT_SOCK_STATE_CB | ARES_OPT_TIMEOUT
- | ARES_OPT_TRIES);
-}
-
static void ares_address_cb(void *arg, int status, int timeouts,
struct hostent *he)
{
struct resolver_query *query = (struct resolver_query *)arg;
union sockunion addr[16];
+ void (*callback)(struct resolver_query *, int, union sockunion *);
size_t i;
+ callback = query->callback;
+ query->callback = NULL;
+
if (status != ARES_SUCCESS) {
- debugf(NHRP_DEBUG_COMMON, "[%p] Resolving failed", query);
- query->callback(query, -1, NULL);
- query->callback = NULL;
+ if (resolver_debug)
+ zlog_debug("[%p] Resolving failed", query);
+
+ callback(query, -1, NULL);
return;
}
@@ -186,10 +174,10 @@ static void ares_address_cb(void *arg, int status, int timeouts,
}
}
- debugf(NHRP_DEBUG_COMMON, "[%p] Resolved with %d results", query,
- (int)i);
- query->callback(query, i, &addr[0]);
- query->callback = NULL;
+ if (resolver_debug)
+ zlog_debug("[%p] Resolved with %d results", query, (int)i);
+
+ callback(query, i, &addr[0]);
}
void resolver_resolve(struct resolver_query *query, int af,
@@ -199,15 +187,61 @@ void resolver_resolve(struct resolver_query *query, int af,
{
if (query->callback != NULL) {
flog_err(
- EC_NHRP_RESOLVER,
+ EC_LIB_RESOLVER,
"Trying to resolve '%s', but previous query was not finished yet",
hostname);
return;
}
- debugf(NHRP_DEBUG_COMMON, "[%p] Resolving '%s'", query, hostname);
+ if (resolver_debug)
+ zlog_debug("[%p] Resolving '%s'", query, hostname);
query->callback = callback;
ares_gethostbyname(state.channel, hostname, af, ares_address_cb, query);
resolver_update_timeouts(&state);
}
+
+DEFUN(debug_resolver,
+ debug_resolver_cmd,
+ "[no] debug resolver",
+ NO_STR
+ DEBUG_STR
+ "Debug DNS resolver actions\n")
+{
+ resolver_debug = (argc == 2);
+ return CMD_SUCCESS;
+}
+
+static struct cmd_node resolver_debug_node = {RESOLVER_DEBUG_NODE, "", 1};
+
+static int resolver_config_write_debug(struct vty *vty)
+{
+ if (resolver_debug)
+ vty_out(vty, "debug resolver\n");
+ return 1;
+}
+
+
+void resolver_init(struct thread_master *tm)
+{
+ struct ares_options ares_opts;
+
+ state.master = tm;
+ state.read_threads = vector_init(1);
+ state.write_threads = vector_init(1);
+
+ ares_opts = (struct ares_options){
+ .sock_state_cb = &ares_socket_cb,
+ .sock_state_cb_data = &state,
+ .timeout = 2,
+ .tries = 3,
+ };
+
+ ares_init_options(&state.channel, &ares_opts,
+ ARES_OPT_SOCK_STATE_CB | ARES_OPT_TIMEOUT
+ | ARES_OPT_TRIES);
+
+ install_node(&resolver_debug_node, resolver_config_write_debug);
+ install_element(CONFIG_NODE, &debug_resolver_cmd);
+ install_element(ENABLE_NODE, &debug_resolver_cmd);
+}
diff --git a/lib/resolver.h b/lib/resolver.h
new file mode 100644
index 0000000000..bc6326edaa
--- /dev/null
+++ b/lib/resolver.h
@@ -0,0 +1,25 @@
+/* C-Ares integration to Quagga mainloop
+ * Copyright (c) 2014-2015 Timo Teräs
+ *
+ * This file is free software: you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _FRR_RESOLVER_H
+#define _FRR_RESOLVER_H
+
+#include "thread.h"
+#include "sockunion.h"
+
+struct resolver_query {
+ void (*callback)(struct resolver_query *, int n, union sockunion *);
+};
+
+void resolver_init(struct thread_master *tm);
+void resolver_resolve(struct resolver_query *query, int af,
+ const char *hostname, void (*cb)(struct resolver_query *,
+ int, union sockunion *));
+
+#endif /* _FRR_RESOLVER_H */
diff --git a/lib/subdir.am b/lib/subdir.am
index 8b6cbe2aeb..aa89622028 100644
--- a/lib/subdir.am
+++ b/lib/subdir.am
@@ -42,6 +42,7 @@ lib_libfrr_la_SOURCES = \
lib/libfrr.c \
lib/linklist.c \
lib/log.c \
+ lib/log_vty.c \
lib/md5.c \
lib/memory.c \
lib/memory_vty.c \
@@ -137,6 +138,8 @@ lib/northbound_cli_clippy.c: $(CLIPPY_DEPS)
lib/northbound_cli.lo: lib/northbound_cli_clippy.c
lib/vty_clippy.c: $(CLIPPY_DEPS)
lib/vty.lo: lib/vty_clippy.c
+lib/log_vty_clippy.c: $(CLIPPY_DEPS)
+lib/log_vty.lo: lib/log_vty_clippy.c
pkginclude_HEADERS += \
lib/agg_table.h \
@@ -179,6 +182,7 @@ pkginclude_HEADERS += \
lib/libospf.h \
lib/linklist.h \
lib/log.h \
+ lib/log_vty.h \
lib/md5.h \
lib/memory.h \
lib/memory_vty.h \
@@ -188,6 +192,7 @@ pkginclude_HEADERS += \
lib/network.h \
lib/nexthop.h \
lib/nexthop_group.h \
+ lib/nexthop_group_private.h \
lib/northbound.h \
lib/northbound_cli.h \
lib/northbound_db.h \
@@ -281,6 +286,21 @@ lib_libfrrsnmp_la_SOURCES = \
# end
#
+# c-ares support
+#
+if CARES
+lib_LTLIBRARIES += lib/libfrrcares.la
+pkginclude_HEADERS += lib/resolver.h
+endif
+
+lib_libfrrcares_la_CFLAGS = $(WERROR) $(CARES_CFLAGS)
+lib_libfrrcares_la_LDFLAGS = -version-info 0:0:0
+lib_libfrrcares_la_LIBADD = $(CARES_LIBS)
+lib_libfrrcares_la_SOURCES = \
+ lib/resolver.c \
+ #end
+
+#
# ZeroMQ support
#
if ZEROMQ
diff --git a/lib/table.h b/lib/table.h
index 57b65ac4ba..7a69c1664f 100644
--- a/lib/table.h
+++ b/lib/table.h
@@ -314,7 +314,7 @@ static inline struct route_node *route_table_iter_next(route_table_iter_t *iter)
/*
* route_table_iter_is_done
*
- * Returns TRUE if the iteration is complete.
+ * Returns true if the iteration is complete.
*/
static inline int route_table_iter_is_done(route_table_iter_t *iter)
{
@@ -324,7 +324,7 @@ static inline int route_table_iter_is_done(route_table_iter_t *iter)
/*
* route_table_iter_started
*
- * Returns TRUE if this iterator has started iterating over the tree.
+ * Returns true if this iterator has started iterating over the tree.
*/
static inline int route_table_iter_started(route_table_iter_t *iter)
{
diff --git a/nhrpd/nhrp_errors.c b/nhrpd/nhrp_errors.c
index 4c4f55be9e..741e64d8b3 100644
--- a/nhrpd/nhrp_errors.c
+++ b/nhrpd/nhrp_errors.c
@@ -32,12 +32,6 @@ static struct log_ref ferr_nhrp_err[] = {
.suggestion = "Ensure that StrongSwan is configured correctly. Restart StrongSwan and FRR"
},
{
- .code = EC_NHRP_RESOLVER,
- .title = "NHRP DNS Resolution",
- .description = "NHRP has detected an error in an attempt to resolve a hostname",
- .suggestion = "Ensure that DNS is working properly and the hostname is configured in dns. If you are still seeing this error, open an issue"
- },
- {
.code = END_FERR,
}
};
diff --git a/nhrpd/nhrp_errors.h b/nhrpd/nhrp_errors.h
index 593714786a..d4958358fe 100644
--- a/nhrpd/nhrp_errors.h
+++ b/nhrpd/nhrp_errors.h
@@ -25,7 +25,6 @@
enum nhrp_log_refs {
EC_NHRP_SWAN = NHRP_FERR_START,
- EC_NHRP_RESOLVER,
};
extern void nhrp_error_init(void);
diff --git a/nhrpd/nhrp_main.c b/nhrpd/nhrp_main.c
index d7c485f0a0..969638cd77 100644
--- a/nhrpd/nhrp_main.c
+++ b/nhrpd/nhrp_main.c
@@ -141,7 +141,7 @@ int main(int argc, char **argv)
nhrp_error_init();
vrf_init(NULL, NULL, NULL, NULL, NULL);
nhrp_interface_init();
- resolver_init();
+ resolver_init(master);
/* Run with elevated capabilities, as for all netlink activity
* we need privileges anyway. */
diff --git a/nhrpd/nhrpd.h b/nhrpd/nhrpd.h
index 89de145e65..670c9f4f18 100644
--- a/nhrpd/nhrpd.h
+++ b/nhrpd/nhrpd.h
@@ -16,6 +16,7 @@
#include "zclient.h"
#include "debug.h"
#include "memory.h"
+#include "resolver.h"
DECLARE_MGROUP(NHRPD)
@@ -84,15 +85,6 @@ static inline int notifier_active(struct notifier_list *l)
return !list_empty(&l->notifier_head);
}
-struct resolver_query {
- void (*callback)(struct resolver_query *, int n, union sockunion *);
-};
-
-void resolver_init(void);
-void resolver_resolve(struct resolver_query *query, int af,
- const char *hostname, void (*cb)(struct resolver_query *,
- int, union sockunion *));
-
void nhrp_zebra_init(void);
void nhrp_zebra_terminate(void);
diff --git a/nhrpd/subdir.am b/nhrpd/subdir.am
index 6e2b91780f..fe76623ac3 100644
--- a/nhrpd/subdir.am
+++ b/nhrpd/subdir.am
@@ -8,8 +8,7 @@ vtysh_scan += $(top_srcdir)/nhrpd/nhrp_vty.c
man8 += $(MANBUILD)/nhrpd.8
endif
-nhrpd_nhrpd_LDADD = lib/libfrr.la $(LIBCAP) $(CARES_LIBS)
-nhrpd_nhrpd_CFLAGS = $(AM_CFLAGS) $(CARES_CFLAGS)
+nhrpd_nhrpd_LDADD = lib/libfrr.la lib/libfrrcares.la $(LIBCAP)
nhrpd_nhrpd_SOURCES = \
nhrpd/linux.c \
nhrpd/netlink_arp.c \
@@ -27,7 +26,6 @@ nhrpd_nhrpd_SOURCES = \
nhrpd/nhrp_vc.c \
nhrpd/nhrp_vty.c \
nhrpd/reqid.c \
- nhrpd/resolver.c \
nhrpd/vici.c \
nhrpd/zbuf.c \
nhrpd/znl.c \
diff --git a/ospfd/ospf_spf.c b/ospfd/ospf_spf.c
index f5fe51d547..620691a98d 100644
--- a/ospfd/ospf_spf.c
+++ b/ospfd/ospf_spf.c
@@ -804,7 +804,7 @@ static void ospf_spf_next(struct vertex *v, struct ospf *ospf,
int type = 0, lsa_pos = -1, lsa_pos_next = 0;
/* If this is a router-LSA, and bit V of the router-LSA (see Section
- A.4.2:RFC2328) is set, set Area A's TransitCapability to TRUE. */
+ A.4.2:RFC2328) is set, set Area A's TransitCapability to true. */
if (v->type == OSPF_VERTEX_ROUTER) {
if (IS_ROUTER_LSA_VIRTUAL((struct router_lsa *)v->lsa))
area->transit = OSPF_TRANSIT_TRUE;
@@ -1207,7 +1207,7 @@ static void ospf_spf_calculate(struct ospf *ospf, struct ospf_area *area,
* spanning tree. */
v->lsa_p->stat = LSA_SPF_IN_SPFTREE;
- /* Set Area A's TransitCapability to FALSE. */
+ /* Set Area A's TransitCapability to false. */
area->transit = OSPF_TRANSIT_FALSE;
area->shortcut_capability = 1;
diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c
index 631465fb20..dfa34f15ef 100644
--- a/ospfd/ospf_vty.c
+++ b/ospfd/ospf_vty.c
@@ -3252,7 +3252,7 @@ DEFUN (show_ip_ospf,
bool uj = use_json(argc, argv);
struct listnode *node = NULL;
char *vrf_name = NULL;
- bool all_vrf = FALSE;
+ bool all_vrf = false;
int ret = CMD_SUCCESS;
int inst = 0;
int idx_vrf = 0;
@@ -3269,7 +3269,7 @@ DEFUN (show_ip_ospf,
/* vrf input is provided could be all or specific vrf*/
if (vrf_name) {
- bool ospf_output = FALSE;
+ bool ospf_output = false;
use_vrf = 1;
@@ -3277,7 +3277,7 @@ DEFUN (show_ip_ospf,
for (ALL_LIST_ELEMENTS_RO(om->ospf, node, ospf)) {
if (!ospf->oi_running)
continue;
- ospf_output = TRUE;
+ ospf_output = true;
ret = show_ip_ospf_common(vty, ospf, json,
use_vrf);
}
@@ -3942,7 +3942,7 @@ DEFUN (show_ip_ospf_interface,
bool uj = use_json(argc, argv);
struct listnode *node = NULL;
char *vrf_name = NULL, *intf_name = NULL;
- bool all_vrf = FALSE;
+ bool all_vrf = false;
int ret = CMD_SUCCESS;
int inst = 0;
int idx_vrf = 0, idx_intf = 0;
@@ -4082,7 +4082,7 @@ DEFUN (show_ip_ospf_interface_traffic,
struct ospf *ospf = NULL;
struct listnode *node = NULL;
char *vrf_name = NULL, *intf_name = NULL;
- bool all_vrf = FALSE;
+ bool all_vrf = false;
int inst = 0;
int idx_vrf = 0, idx_intf = 0;
bool uj = use_json(argc, argv);
@@ -4337,7 +4337,7 @@ DEFUN (show_ip_ospf_neighbor,
bool uj = use_json(argc, argv);
struct listnode *node = NULL;
char *vrf_name = NULL;
- bool all_vrf = FALSE;
+ bool all_vrf = false;
int ret = CMD_SUCCESS;
int inst = 0;
int idx_vrf = 0;
@@ -4563,7 +4563,7 @@ DEFUN (show_ip_ospf_neighbor_all,
bool uj = use_json(argc, argv);
struct listnode *node = NULL;
char *vrf_name = NULL;
- bool all_vrf = FALSE;
+ bool all_vrf = false;
int ret = CMD_SUCCESS;
int inst = 0;
int idx_vrf = 0;
@@ -5257,7 +5257,7 @@ DEFUN (show_ip_ospf_neighbor_detail,
bool uj = use_json(argc, argv);
struct listnode *node = NULL;
char *vrf_name = NULL;
- bool all_vrf = FALSE;
+ bool all_vrf = false;
int ret = CMD_SUCCESS;
int inst = 0;
int idx_vrf = 0;
@@ -5446,7 +5446,7 @@ DEFUN (show_ip_ospf_neighbor_detail_all,
bool uj = use_json(argc, argv);
struct listnode *node = NULL;
char *vrf_name = NULL;
- bool all_vrf = FALSE;
+ bool all_vrf = false;
int ret = CMD_SUCCESS;
int inst = 0;
int idx_vrf = 0;
@@ -5626,12 +5626,12 @@ DEFUN (show_ip_ospf_neighbor_int_detail,
bool uj = use_json(argc, argv);
struct listnode *node = NULL;
int ret = CMD_SUCCESS;
- bool ospf_output = FALSE;
+ bool ospf_output = false;
for (ALL_LIST_ELEMENTS_RO(om->ospf, node, ospf)) {
if (!ospf->oi_running)
continue;
- ospf_output = TRUE;
+ ospf_output = true;
ret = show_ip_ospf_neighbor_int_detail_common(vty, ospf, 0,
argv, uj);
}
@@ -6334,7 +6334,7 @@ DEFUN (show_ip_ospf_database_max,
struct ospf *ospf = NULL;
struct listnode *node = NULL;
char *vrf_name = NULL;
- bool all_vrf = FALSE;
+ bool all_vrf = false;
int ret = CMD_SUCCESS;
int inst = 0;
int idx_vrf = 0;
@@ -6343,7 +6343,7 @@ DEFUN (show_ip_ospf_database_max,
OSPF_FIND_VRF_ARGS(argv, argc, idx_vrf, vrf_name, all_vrf);
if (vrf_name) {
- bool ospf_output = FALSE;
+ bool ospf_output = false;
use_vrf = 1;
@@ -6351,7 +6351,7 @@ DEFUN (show_ip_ospf_database_max,
for (ALL_LIST_ELEMENTS_RO(om->ospf, node, ospf)) {
if (!ospf->oi_running)
continue;
- ospf_output = TRUE;
+ ospf_output = true;
ret = show_ip_ospf_database_common(
vty, ospf, idx_vrf ? 2 : 0, argc, argv,
use_vrf);
@@ -6403,7 +6403,7 @@ DEFUN (show_ip_ospf_instance_database,
unsigned short instance = 0;
struct listnode *node = NULL;
char *vrf_name = NULL;
- bool all_vrf = FALSE;
+ bool all_vrf = false;
int ret = CMD_SUCCESS;
int inst = 0;
int idx = 0;
@@ -6561,7 +6561,7 @@ DEFUN (show_ip_ospf_instance_database_type_adv_router,
unsigned short instance = 0;
struct listnode *node = NULL;
char *vrf_name = NULL;
- bool all_vrf = FALSE;
+ bool all_vrf = false;
int ret = CMD_SUCCESS;
int inst = 0;
int idx = 0, idx_vrf = 0;
@@ -6584,7 +6584,7 @@ DEFUN (show_ip_ospf_instance_database_type_adv_router,
OSPF_FIND_VRF_ARGS(argv, argc, idx_vrf, vrf_name, all_vrf);
if (vrf_name) {
- bool ospf_output = FALSE;
+ bool ospf_output = false;
use_vrf = 1;
@@ -6592,7 +6592,7 @@ DEFUN (show_ip_ospf_instance_database_type_adv_router,
for (ALL_LIST_ELEMENTS_RO(om->ospf, node, ospf)) {
if (!ospf->oi_running)
continue;
- ospf_output = TRUE;
+ ospf_output = true;
ret = show_ip_ospf_database_type_adv_router_common(
vty, ospf, idx ? 1 : 0, argc, argv,
use_vrf);
@@ -9372,7 +9372,7 @@ DEFUN (show_ip_ospf_border_routers,
struct ospf *ospf = NULL;
struct listnode *node = NULL;
char *vrf_name = NULL;
- bool all_vrf = FALSE;
+ bool all_vrf = false;
int ret = CMD_SUCCESS;
int inst = 0;
int idx_vrf = 0;
@@ -9381,7 +9381,7 @@ DEFUN (show_ip_ospf_border_routers,
OSPF_FIND_VRF_ARGS(argv, argc, idx_vrf, vrf_name, all_vrf);
if (vrf_name) {
- bool ospf_output = FALSE;
+ bool ospf_output = false;
use_vrf = 1;
@@ -9390,7 +9390,7 @@ DEFUN (show_ip_ospf_border_routers,
if (!ospf->oi_running)
continue;
- ospf_output = TRUE;
+ ospf_output = true;
ret = show_ip_ospf_border_routers_common(
vty, ospf, use_vrf);
}
@@ -9510,7 +9510,7 @@ DEFUN (show_ip_ospf_route,
struct ospf *ospf = NULL;
struct listnode *node = NULL;
char *vrf_name = NULL;
- bool all_vrf = FALSE;
+ bool all_vrf = false;
int ret = CMD_SUCCESS;
int inst = 0;
int idx_vrf = 0;
@@ -9525,7 +9525,7 @@ DEFUN (show_ip_ospf_route,
/* vrf input is provided could be all or specific vrf*/
if (vrf_name) {
- bool ospf_output = FALSE;
+ bool ospf_output = false;
use_vrf = 1;
@@ -9533,7 +9533,7 @@ DEFUN (show_ip_ospf_route,
for (ALL_LIST_ELEMENTS_RO(om->ospf, node, ospf)) {
if (!ospf->oi_running)
continue;
- ospf_output = TRUE;
+ ospf_output = true;
ret = show_ip_ospf_route_common(vty, ospf, json,
use_vrf);
}
diff --git a/ospfd/ospfd.c b/ospfd/ospfd.c
index 073a51561b..aa38a42714 100644
--- a/ospfd/ospfd.c
+++ b/ospfd/ospfd.c
@@ -628,8 +628,10 @@ static void ospf_finish_final(struct ospf *ospf)
if (!red_list)
continue;
- for (ALL_LIST_ELEMENTS(red_list, node, nnode, red))
+ for (ALL_LIST_ELEMENTS(red_list, node, nnode, red)) {
ospf_redistribute_unset(ospf, i, red->instance);
+ ospf_redist_del(ospf, i, red->instance);
+ }
}
ospf_redistribute_default_unset(ospf);
@@ -764,7 +766,7 @@ static void ospf_finish_final(struct ospf *ospf)
if (!ext_list)
continue;
- for (ALL_LIST_ELEMENTS_RO(ext_list, node, ext)) {
+ for (ALL_LIST_ELEMENTS(ext_list, node, nnode, ext)) {
if (ext->external_info)
for (rn = route_top(ext->external_info); rn;
rn = route_next(rn)) {
@@ -776,6 +778,8 @@ static void ospf_finish_final(struct ospf *ospf)
rn->info = NULL;
route_unlock_node(rn);
}
+
+ ospf_external_del(ospf, i, ext->instance);
}
}
diff --git a/pbrd/pbr_nht.c b/pbrd/pbr_nht.c
index 22dd6f1a38..a69bb00848 100644
--- a/pbrd/pbr_nht.c
+++ b/pbrd/pbr_nht.c
@@ -21,7 +21,8 @@
#include <log.h>
#include <nexthop.h>
-#include <nexthop_group.h>
+#include "nexthop_group.h"
+#include "nexthop_group_private.h"
#include <hash.h>
#include <jhash.h>
#include <vty.h>
@@ -576,7 +577,7 @@ void pbr_nht_delete_individual_nexthop(struct pbr_map_sequence *pbrms)
hash_release(pbr_nhg_hash, pnhgc);
- nexthop_del(pbrms->nhg, nh);
+ _nexthop_del(pbrms->nhg, nh);
nexthop_free(nh);
nexthop_group_delete(&pbrms->nhg);
XFREE(MTYPE_TMP, pbrms->internal_nhg_name);
@@ -723,7 +724,7 @@ static void pbr_nexthop_group_cache_iterate_to_group(struct hash_bucket *b,
copy_nexthops(&nh, pnhc->nexthop, NULL);
- nexthop_add(&nhg->nexthop, nh);
+ _nexthop_add(&nhg->nexthop, nh);
}
static void
diff --git a/pbrd/pbr_vty.c b/pbrd/pbr_vty.c
index a71c712ea7..95f38563b1 100644
--- a/pbrd/pbr_vty.c
+++ b/pbrd/pbr_vty.c
@@ -25,6 +25,7 @@
#include "vrf.h"
#include "nexthop.h"
#include "nexthop_group.h"
+#include "nexthop_group_private.h"
#include "log.h"
#include "debug.h"
#include "pbr.h"
@@ -329,7 +330,7 @@ DEFPY(pbr_map_nexthop, pbr_map_nexthop_cmd,
nh = nexthop_new();
memcpy(nh, &nhop, sizeof(nhop));
- nexthop_add(&pbrms->nhg->nexthop, nh);
+ _nexthop_add(&pbrms->nhg->nexthop, nh);
pbr_nht_add_individual_nexthop(pbrms);
pbr_map_check(pbrms);
diff --git a/pimd/pim_assert.c b/pimd/pim_assert.c
index 228218e3a3..3aa2a92241 100644
--- a/pimd/pim_assert.c
+++ b/pimd/pim_assert.c
@@ -728,7 +728,7 @@ void assert_action_a5(struct pim_ifchannel *ch)
winner metric as AssertWinnerMetric(S,G,I).
Set Assert Timer to Assert_Time.
If (I is RPF_interface(S)) AND (UpstreamJPState(S,G) == true)
- set SPTbit(S,G) to TRUE.
+ set SPTbit(S,G) to true.
*/
static void assert_action_a6(struct pim_ifchannel *ch,
struct pim_assert_metric winner_metric)
@@ -737,7 +737,7 @@ static void assert_action_a6(struct pim_ifchannel *ch,
/*
If (I is RPF_interface(S)) AND (UpstreamJPState(S,G) == true) set
- SPTbit(S,G) to TRUE.
+ SPTbit(S,G) to true.
*/
if (ch->upstream->rpf.source_nexthop.interface == ch->interface)
if (ch->upstream->join_state == PIM_UPSTREAM_JOINED)
diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c
index 9995b5e31f..62f13b5b53 100644
--- a/pimd/pim_bsm.c
+++ b/pimd/pim_bsm.c
@@ -865,8 +865,8 @@ static void pim_bsm_fwd_whole_sz(struct pim_instance *pim, uint8_t *buf,
struct pim_interface *pim_ifp;
struct in_addr dst_addr;
uint32_t pim_mtu;
- bool no_fwd = FALSE;
- bool ret = FALSE;
+ bool no_fwd = false;
+ bool ret = false;
/* For now only global scope zone is supported, so send on all
* pim interfaces in the vrf
@@ -891,7 +891,7 @@ static void pim_bsm_fwd_whole_sz(struct pim_instance *pim, uint8_t *buf,
if (!pim_bsm_send_intf(buf, len, ifp, dst_addr)) {
if (PIM_DEBUG_BSM)
zlog_debug(
- "%s: pim_bsm_send_intf returned FALSE",
+ "%s: pim_bsm_send_intf returned false",
__PRETTY_FUNCTION__);
}
}
@@ -1216,7 +1216,7 @@ int pim_bsm_process(struct interface *ifp, struct ip *ip_hdr, uint8_t *buf,
struct pim_instance *pim;
char bsr_str[INET_ADDRSTRLEN];
uint16_t frag_tag;
- bool empty_bsm = FALSE;
+ bool empty_bsm = false;
/* BSM Packet acceptance validation */
pim_ifp = ifp->info;
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index 72d7916b20..28b4af9457 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -4991,8 +4991,8 @@ DEFUN (show_ip_pim_bsm_db,
SHOW_STR
IP_STR
PIM_STR
- VRF_CMD_HELP_STR
"PIM cached bsm packets information\n"
+ VRF_CMD_HELP_STR
JSON_STR)
{
int idx = 2;
@@ -5012,8 +5012,8 @@ DEFUN (show_ip_pim_bsrp,
SHOW_STR
IP_STR
PIM_STR
- VRF_CMD_HELP_STR
"PIM cached group-rp mappings information\n"
+ VRF_CMD_HELP_STR
JSON_STR)
{
int idx = 2;
@@ -5030,13 +5030,13 @@ DEFUN (show_ip_pim_bsrp,
DEFUN (show_ip_pim_statistics,
show_ip_pim_statistics_cmd,
- "show ip pim [vrf NAME] statistics [interface WORD] [json]",
+ "show ip pim [vrf NAME] statistics [interface WORD] [json]",
SHOW_STR
IP_STR
PIM_STR
VRF_CMD_HELP_STR
"PIM statistics\n"
- "interface\n"
+ INTERFACE_STR
"PIM interface\n"
JSON_STR)
{
@@ -9843,7 +9843,7 @@ static void pim_show_vxlan_sg_entry(struct pim_vxlan_sg *vxlan_sg,
char src_str[INET_ADDRSTRLEN];
char grp_str[INET_ADDRSTRLEN];
json_object *json_row;
- bool installed = (vxlan_sg->up)?TRUE:FALSE;
+ bool installed = (vxlan_sg->up) ? true : false;
const char *iif_name = vxlan_sg->iif?vxlan_sg->iif->name:"-";
const char *oif_name;
@@ -9941,7 +9941,7 @@ static void pim_show_vxlan_sg_match_addr(struct pim_instance *pim,
cwd.vty = vty;
cwd.json = json;
- cwd.addr_match = TRUE;
+ cwd.addr_match = true;
hash_iterate(pim->vxlan.sg_hash, pim_show_vxlan_sg_hash_entry, &cwd);
if (uj) {
@@ -9982,7 +9982,7 @@ static void pim_show_vxlan_sg_one(struct pim_instance *pim,
vxlan_sg = pim_vxlan_sg_find(pim, &sg);
if (vxlan_sg) {
- installed = (vxlan_sg->up)?TRUE:FALSE;
+ installed = (vxlan_sg->up) ? true : false;
iif_name = vxlan_sg->iif?vxlan_sg->iif->name:"-";
if (pim_vxlan_is_orig_mroute(vxlan_sg))
@@ -10118,8 +10118,8 @@ DEFUN_HIDDEN (no_ip_pim_mlag,
struct in_addr addr;
addr.s_addr = 0;
- pim_vxlan_mlag_update(TRUE /*mlag_enable*/,
- FALSE /*peer_state*/, PIM_VXLAN_MLAG_ROLE_SECONDARY,
+ pim_vxlan_mlag_update(true/*mlag_enable*/,
+ false/*peer_state*/, PIM_VXLAN_MLAG_ROLE_SECONDARY,
NULL/*peerlink*/, &addr);
return CMD_SUCCESS;
@@ -10169,9 +10169,9 @@ DEFUN_HIDDEN (ip_pim_mlag,
idx += 2;
if (!strcmp(argv[idx]->arg, "up")) {
- peer_state = TRUE;
+ peer_state = true;
} else if (strcmp(argv[idx]->arg, "down")) {
- peer_state = FALSE;
+ peer_state = false;
} else {
vty_out(vty, "unknown MLAG state %s\n", argv[idx]->arg);
return CMD_WARNING;
@@ -10185,7 +10185,7 @@ DEFUN_HIDDEN (ip_pim_mlag,
errno, safe_strerror(errno));
return CMD_WARNING_CONFIG_FAILED;
}
- pim_vxlan_mlag_update(TRUE, peer_state, role, ifp, &reg_addr);
+ pim_vxlan_mlag_update(true, peer_state, role, ifp, &reg_addr);
return CMD_SUCCESS;
}
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index 0511a1a157..bdeda2d76b 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -134,7 +134,7 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,
pim_ifp->igmp_last_member_query_count =
IGMP_DEFAULT_ROBUSTNESS_VARIABLE;
- /* BSM config on interface: TRUE by default */
+ /* BSM config on interface: true by default */
pim_ifp->bsm_enable = true;
pim_ifp->ucast_bsm_accept = true;
diff --git a/pimd/pim_ifchannel.c b/pimd/pim_ifchannel.c
index 96c7e8052c..66e64cefa0 100644
--- a/pimd/pim_ifchannel.c
+++ b/pimd/pim_ifchannel.c
@@ -1218,10 +1218,10 @@ void pim_ifchannel_update_could_assert(struct pim_ifchannel *ch)
}
if (new_couldassert) {
- /* CouldAssert(S,G,I) switched from FALSE to TRUE */
+ /* CouldAssert(S,G,I) switched from false to true */
PIM_IF_FLAG_SET_COULD_ASSERT(ch->flags);
} else {
- /* CouldAssert(S,G,I) switched from TRUE to FALSE */
+ /* CouldAssert(S,G,I) switched from true to false */
PIM_IF_FLAG_UNSET_COULD_ASSERT(ch->flags);
if (ch->ifassert_state == PIM_IFASSERT_I_AM_WINNER) {
@@ -1301,10 +1301,10 @@ void pim_ifchannel_update_assert_tracking_desired(struct pim_ifchannel *ch)
}
if (new_atd) {
- /* AssertTrackingDesired(S,G,I) switched from FALSE to TRUE */
+ /* AssertTrackingDesired(S,G,I) switched from false to true */
PIM_IF_FLAG_SET_ASSERT_TRACKING_DESIRED(ch->flags);
} else {
- /* AssertTrackingDesired(S,G,I) switched from TRUE to FALSE */
+ /* AssertTrackingDesired(S,G,I) switched from true to false */
PIM_IF_FLAG_UNSET_ASSERT_TRACKING_DESIRED(ch->flags);
if (ch->ifassert_state == PIM_IFASSERT_I_AM_LOSER) {
diff --git a/pimd/pim_macro.c b/pimd/pim_macro.c
index 908026ab14..bdef83925a 100644
--- a/pimd/pim_macro.c
+++ b/pimd/pim_macro.c
@@ -91,11 +91,11 @@ int pim_macro_chisin_joins(const struct pim_ifchannel *ch)
lost_assert(S,G) =
{ all interfaces I such that
- lost_assert(S,G,I) == TRUE }
+ lost_assert(S,G,I) == true }
bool lost_assert(S,G,I) {
if ( RPF_interface(S) == I ) {
- return FALSE
+ return false
} else {
return ( AssertWinner(S,G,I) != NULL AND
AssertWinner(S,G,I) != me AND
@@ -150,7 +150,7 @@ int pim_macro_ch_lost_assert(const struct pim_ifchannel *ch)
pim_include(S,G) =
{ all interfaces I such that:
- ( (I_am_DR( I ) AND lost_assert(S,G,I) == FALSE )
+ ( (I_am_DR( I ) AND lost_assert(S,G,I) == false )
OR AssertWinner(S,G,I) == me )
AND local_receiver_include(S,G,I) }
@@ -178,7 +178,7 @@ int pim_macro_chisin_pim_include(const struct pim_ifchannel *ch)
return (
/* I_am_DR( I ) ? */
PIM_I_am_DR(pim_ifp) &&
- /* lost_assert(S,G,I) == FALSE ? */
+ /* lost_assert(S,G,I) == false ? */
(!pim_macro_ch_lost_assert(ch)));
}
@@ -228,7 +228,7 @@ int pim_macro_ch_could_assert_eval(const struct pim_ifchannel *ch)
return 0; /* false */
}
- /* SPTbit(S,G) == TRUE */
+ /* SPTbit(S,G) == true */
if (ch->upstream->sptbit == PIM_UPSTREAM_SPTBIT_FALSE)
return 0; /* false */
@@ -272,9 +272,9 @@ struct pim_assert_metric pim_macro_spt_assert_metric(const struct pim_rpf *rpf,
following pseudocode:
assert_metric my_assert_metric(S,G,I) {
- if( CouldAssert(S,G,I) == TRUE ) {
+ if( CouldAssert(S,G,I) == true ) {
return spt_assert_metric(S,I)
- } else if( CouldAssert(*,G,I) == TRUE ) {
+ } else if( CouldAssert(*,G,I) == true ) {
return rpt_assert_metric(G,I)
} else {
return infinite_assert_metric()
@@ -365,11 +365,11 @@ int pim_macro_chisin_oiflist(const struct pim_ifchannel *ch)
(+) ( pim_include(*,G) (-) pim_exclude(S,G) )
(-) lost_assert(*,G)
(+) joins(S,G) ) )
- OR (local_receiver_include(S,G,I) == TRUE
+ OR (local_receiver_include(S,G,I) == true
AND (I_am_DR(I) OR (AssertWinner(S,G,I) == me)))
- OR ((RPF_interface(S) == I) AND (JoinDesired(S,G) == TRUE))
- OR ((RPF_interface(RP(G)) == I) AND (JoinDesired(*,G) == TRUE)
- AND (SPTbit(S,G) == FALSE))
+ OR ((RPF_interface(S) == I) AND (JoinDesired(S,G) == true))
+ OR ((RPF_interface(RP(G)) == I) AND (JoinDesired(*,G) == true)
+ AND (SPTbit(S,G) == false))
AssertTrackingDesired(S,G,I) is true on any interface in which an
(S,G) assert might affect our behavior.
diff --git a/pimd/pim_register.c b/pimd/pim_register.c
index 3fe7e8bf64..4b66616490 100644
--- a/pimd/pim_register.c
+++ b/pimd/pim_register.c
@@ -61,7 +61,7 @@ void pim_register_join(struct pim_upstream *up)
pim_channel_add_oif(up->channel_oil, pim->regiface,
PIM_OIF_FLAG_PROTO_PIM);
up->reg_state = PIM_REG_JOIN;
- pim_vxlan_update_sg_reg_state(pim, up, TRUE /*reg_join*/);
+ pim_vxlan_update_sg_reg_state(pim, up, true /*reg_join*/);
}
void pim_register_stop_send(struct interface *ifp, struct prefix_sg *sg,
@@ -148,7 +148,7 @@ int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size)
PIM_OIF_FLAG_PROTO_PIM);
pim_upstream_start_register_stop_timer(upstream, 0);
pim_vxlan_update_sg_reg_state(pim, upstream,
- FALSE /*reg_join*/);
+ false/*reg_join*/);
break;
case PIM_REG_JOIN_PENDING:
upstream->reg_state = PIM_REG_PRUNE;
@@ -283,8 +283,8 @@ void pim_null_register_send(struct pim_upstream *up)
* # Note: this may be a spoofing attempt
* }
* if( I_am_RP(G) AND outer.dst == RP(G) ) {
- * sentRegisterStop = FALSE;
- * if ( register.borderbit == TRUE ) {
+ * sentRegisterStop = false;
+ * if ( register.borderbit == true ) {
* if ( PMBR(S,G) == unknown ) {
* PMBR(S,G) = outer.src
* } else if ( outer.src != PMBR(S,G) ) {
@@ -296,10 +296,10 @@ void pim_null_register_send(struct pim_upstream *up)
* ( SwitchToSptDesired(S,G) AND
* ( inherited_olist(S,G) == NULL ))) {
* send Register-Stop(S,G) to outer.src
- * sentRegisterStop = TRUE;
+ * sentRegisterStop = true;
* }
* if ( SPTbit(S,G) OR SwitchToSptDesired(S,G) ) {
- * if ( sentRegisterStop == TRUE ) {
+ * if ( sentRegisterStop == true ) {
* set KeepaliveTimer(S,G) to RP_Keepalive_Period;
* } else {
* set KeepaliveTimer(S,G) to Keepalive_Period;
diff --git a/pimd/pim_rpf.c b/pimd/pim_rpf.c
index b1a2e717d6..7d263e99e3 100644
--- a/pimd/pim_rpf.c
+++ b/pimd/pim_rpf.c
@@ -228,7 +228,7 @@ enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
if ((up->sg.src.s_addr == INADDR_ANY && I_am_RP(pim, up->sg.grp)) ||
PIM_UPSTREAM_FLAG_TEST_FHR(up->flags))
- neigh_needed = FALSE;
+ neigh_needed = false;
pim_find_or_track_nexthop(pim, &nht_p, up, NULL, false, NULL);
if (!pim_ecmp_nexthop_lookup(pim, &rpf->source_nexthop, &src, &grp,
neigh_needed))
diff --git a/pimd/pim_upstream.c b/pimd/pim_upstream.c
index 194fb2cffd..50df2fdbf9 100644
--- a/pimd/pim_upstream.c
+++ b/pimd/pim_upstream.c
@@ -1318,14 +1318,14 @@ int pim_upstream_is_sg_rpt(struct pim_upstream *up)
* void
* Update_SPTbit(S,G,iif) {
* if ( iif == RPF_interface(S)
- * AND JoinDesired(S,G) == TRUE
- * AND ( DirectlyConnected(S) == TRUE
+ * AND JoinDesired(S,G) == true
+ * AND ( DirectlyConnected(S) == true
* OR RPF_interface(S) != RPF_interface(RP(G))
* OR inherited_olist(S,G,rpt) == NULL
* OR ( ( RPF'(S,G) == RPF'(*,G) ) AND
* ( RPF'(S,G) != NULL ) )
* OR ( I_Am_Assert_Loser(S,G,iif) ) {
- * Set SPTbit(S,G) to TRUE
+ * Set SPTbit(S,G) to true
* }
* }
*/
@@ -1344,7 +1344,7 @@ void pim_upstream_set_sptbit(struct pim_upstream *up,
return;
}
- // AND JoinDesired(S,G) == TRUE
+ // AND JoinDesired(S,G) == true
if (!pim_upstream_evaluate_join_desired(up->channel_oil->pim, up)) {
if (PIM_DEBUG_TRACE)
zlog_debug("%s: %s Join is not Desired",
@@ -1352,7 +1352,7 @@ void pim_upstream_set_sptbit(struct pim_upstream *up,
return;
}
- // DirectlyConnected(S) == TRUE
+ // DirectlyConnected(S) == true
if (pim_if_connected_to_source(up->rpf.source_nexthop.interface,
up->sg.src)) {
if (PIM_DEBUG_TRACE)
@@ -1456,7 +1456,7 @@ static int pim_upstream_register_stop_timer(struct thread *t)
up->reg_state = PIM_REG_JOIN;
pim_channel_add_oif(up->channel_oil, pim->regiface,
PIM_OIF_FLAG_PROTO_PIM);
- pim_vxlan_update_sg_reg_state(pim, up, TRUE /*reg_join*/);
+ pim_vxlan_update_sg_reg_state(pim, up, true /*reg_join*/);
break;
case PIM_REG_JOIN:
break;
@@ -1690,7 +1690,7 @@ bool pim_upstream_equal(const void *arg1, const void *arg2)
/* rfc4601:section-4.2:"Data Packet Forwarding Rules" defines
* the cases where kat has to be restarted on rxing traffic -
*
- * if( DirectlyConnected(S) == TRUE AND iif == RPF_interface(S) ) {
+ * if( DirectlyConnected(S) == true AND iif == RPF_interface(S) ) {
* set KeepaliveTimer(S,G) to Keepalive_Period
* # Note: a register state transition or UpstreamJPState(S,G)
* # transition may happen as a result of restarting
diff --git a/pimd/pim_vxlan.c b/pimd/pim_vxlan.c
index d2648fad50..33e3b2340c 100644
--- a/pimd/pim_vxlan.c
+++ b/pimd/pim_vxlan.c
@@ -115,7 +115,7 @@ static void pim_vxlan_init_work(void)
vxlan_info.max_work_cnt = PIM_VXLAN_WORK_MAX;
vxlan_info.flags |= PIM_VXLANF_WORK_INITED;
vxlan_info.work_list = list_new();
- pim_vxlan_work_timer_setup(TRUE /* start */);
+ pim_vxlan_work_timer_setup(true/* start */);
}
static void pim_vxlan_add_work(struct pim_vxlan_sg *vxlan_sg)
diff --git a/python/clidef.py b/python/clidef.py
index 85464a62d4..bc2f5caebf 100644
--- a/python/clidef.py
+++ b/python/clidef.py
@@ -346,9 +346,11 @@ if __name__ == '__main__':
if args.show:
dumpfd = sys.stderr
+ basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
macros = Macros()
macros.load('lib/route_types.h')
- macros.load('lib/command.h')
+ macros.load(os.path.join(basepath, 'lib/command.h'))
# sigh :(
macros['PROTO_REDIST_STR'] = 'FRR_REDIST_STR_ISISD'
diff --git a/ripd/rip_cli.c b/ripd/rip_cli.c
index 346e93b8ef..627d3d1993 100644
--- a/ripd/rip_cli.c
+++ b/ripd/rip_cli.c
@@ -829,8 +829,10 @@ DEFPY (ip_rip_authentication_mode,
nb_cli_enqueue_change(vty, "./authentication-scheme/mode", NB_OP_MODIFY,
strmatch(mode, "md5") ? "md5" : "plain-text");
- nb_cli_enqueue_change(vty, "./authentication-scheme/md5-auth-length",
- NB_OP_MODIFY, value);
+ if (strmatch(mode, "md5"))
+ nb_cli_enqueue_change(vty,
+ "./authentication-scheme/md5-auth-length",
+ NB_OP_MODIFY, value);
return nb_cli_apply_changes(vty, "./frr-ripd:rip");
}
@@ -852,7 +854,7 @@ DEFPY (no_ip_rip_authentication_mode,
nb_cli_enqueue_change(vty, "./authentication-scheme/mode", NB_OP_MODIFY,
NULL);
nb_cli_enqueue_change(vty, "./authentication-scheme/md5-auth-length",
- NB_OP_MODIFY, NULL);
+ NB_OP_DESTROY, NULL);
return nb_cli_apply_changes(vty, "./frr-ripd:rip");
}
diff --git a/tests/topotests/bgp-basic-functionality-topo1/__init__.py b/tests/topotests/bgp-basic-functionality-topo1/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp-basic-functionality-topo1/__init__.py
diff --git a/tests/topotests/bgp-basic-functionality-topo1/bgp_basic_functionality.json b/tests/topotests/bgp-basic-functionality-topo1/bgp_basic_functionality.json
new file mode 100644
index 0000000000..c778ae4bed
--- /dev/null
+++ b/tests/topotests/bgp-basic-functionality-topo1/bgp_basic_functionality.json
@@ -0,0 +1,172 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 30,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py
new file mode 100755
index 0000000000..095ebe3344
--- /dev/null
+++ b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py
@@ -0,0 +1,595 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test BGP basic functionality:
+
+Test steps
+- Create topology (setup module)
+ Creating 4 routers topology, r1, r2, r3 are in IBGP and
+ r3, r4 are in EBGP
+- Bring up topology
+- Verify for bgp to converge
+- Modify/Delete and verify router-id
+- Modify and verify bgp timers
+- Create and verify static routes
+- Modify and verify admin distance for existing static routes
+- Test advertise network using network command
+- Verify clear bgp
+- Test bgp convergence with loopback interface
+- Test advertise network using network command
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+from copy import deepcopy
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, '../lib/'))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer, reset_config_on_routers, create_static_routes,
+ verify_rib, verify_admin_distance_for_static_routes
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence, create_router_bgp, verify_router_id,
+ modify_as_number, verify_as_numbers, clear_bgp_and_verify,
+ verify_bgp_timers_and_functionality
+)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/bgp_basic_functionality.json".format(CWD)
+try:
+ with open(jsonFile, 'r') as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+
+class CreateTopo(Topo):
+ """
+ Test BasicTopo - topology 1
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function"""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ global BGP_CONVERGENCE
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}". \
+ format(BGP_CONVERGENCE)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info("Testsuite end time: {}".
+ format(time.asctime(time.localtime(time.time()))))
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_modify_and_delete_router_id(request):
+ """ Test to modify, delete and verify router-id. """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Modify router id
+ input_dict = {
+ 'r1': {
+ "bgp": {
+ 'router_id': '12.12.12.12'
+ }
+ },
+ 'r2': {
+ "bgp": {
+ 'router_id': '22.22.22.22'
+ }
+ },
+ 'r3': {
+ "bgp": {
+ 'router_id': '33.33.33.33'
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".\
+ format(tc_name, result)
+
+ # Verifying router id once modified
+ result = verify_router_id(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".\
+ format(tc_name, result)
+
+ # Delete router id
+ input_dict = {
+ 'r1': {
+ "bgp": {
+ 'del_router_id': True
+ }
+ },
+ 'r2': {
+ "bgp": {
+ 'del_router_id': True
+ }
+ },
+ 'r3': {
+ "bgp": {
+ 'del_router_id': True
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ # Verifying router id once deleted
+ # Once router-id is deleted, highest interface ip should become
+ # router-id
+ result = verify_router_id(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_config_with_4byte_as_number(request):
+ """
+ Configure BGP with 4 byte ASN and verify it works fine
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "local_as": 131079
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": 131079
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 131079
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": 131080
+ }
+ }
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ result = verify_as_numbers(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_timers_functionality(request):
+ """
+ Test to modify bgp timers and verify timers functionality.
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to modfiy BGP timerse
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link":{
+ "r1": {
+ "keepalivetimer": 60,
+ "holddowntimer": 180,
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, deepcopy(input_dict))
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ # Api call to clear bgp, so timer modification would take place
+ clear_bgp_and_verify(tgen, topo, 'r1')
+
+ # Verifying bgp timers functionality
+ result = verify_bgp_timers_and_functionality(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+
+
+def test_static_routes(request):
+ """ Test to create and verify static routes. """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to create static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 9,
+ "admin_distance": 100,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ # Api call to redistribute static routes
+ input_dict_1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ # Verifying RIB routes
+ dut = 'r3'
+ protocol = 'bgp'
+ next_hop = '10.0.0.2'
+ result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop,
+ protocol=protocol)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_admin_distance_for_existing_static_routes(request):
+ """ Test to modify and verify admin distance for existing static routes."""
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "admin_distance": 10,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ # Verifying admin distance once modified
+ result = verify_admin_distance_for_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_advertise_network_using_network_command(request):
+ """ Test advertise networks using network command."""
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to advertise networks
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "20.0.0.0/32",
+ "no_of_network": 10
+ },
+ {
+ "network": "30.0.0.0/32",
+ "no_of_network": 10
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ # Verifying RIB routes
+ dut = 'r2'
+ protocol = "bgp"
+ result = verify_rib(tgen, 'ipv4', dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_clear_bgp_and_verify(request):
+ """
+ Created few static routes and verified all routes are learned via BGP
+ cleared BGP and verified all routes are intact
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # clear ip bgp
+ result = clear_bgp_and_verify(tgen, topo, 'r1')
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_with_loopback_interface(request):
+ """
+ Test BGP with loopback interface
+
+ Adding keys:value pair "dest_link": "lo" and "source_link": "lo"
+ peer dict of input json file for all router's creating config using
+ loopback interface. Once BGP neighboship is up then verifying BGP
+ convergence
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ for routerN in sorted(topo['routers'].keys()):
+ for bgp_neighbor in \
+ topo['routers'][routerN]['bgp']['address_family']['ipv4'][
+ 'unicast']['neighbor'].keys():
+
+ # Adding ['source_link'] = 'lo' key:value pair
+ topo['routers'][routerN]['bgp']['address_family']['ipv4'][
+ 'unicast']['neighbor'][bgp_neighbor]["dest_link"] = {
+ 'lo': {
+ "source_link": "lo",
+ }
+ }
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "1.0.2.17/32",
+ "next_hop": "10.0.0.2"
+ },
+ {
+ "network": "1.0.3.17/32",
+ "next_hop": "10.0.0.6"
+ }
+ ]
+ },
+ "r2": {
+ "static_routes": [{
+ "network": "1.0.1.17/32",
+ "next_hop": "10.0.0.1"
+ },
+ {
+ "network": "1.0.3.17/32",
+ "next_hop": "10.0.0.10"
+ }
+ ]
+ },
+ "r3": {
+ "static_routes": [{
+ "network": "1.0.1.17/32",
+ "next_hop": "10.0.0.5"
+ },
+ {
+ "network": "1.0.2.17/32",
+ "next_hop": "10.0.0.9"
+ },
+ {
+ "network": "1.0.4.17/32",
+ "next_hop": "10.0.0.14"
+ }
+ ]
+ },
+ "r4": {
+ "static_routes": [{
+ "network": "1.0.3.17/32",
+ "next_hop": "10.0.0.13"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ # Api call verify whether BGP is converged
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == '__main__':
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp-path-attributes-topo1/__init__.py b/tests/topotests/bgp-path-attributes-topo1/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp-path-attributes-topo1/__init__.py
diff --git a/tests/topotests/bgp-path-attributes-topo1/bgp_path_attributes.json b/tests/topotests/bgp-path-attributes-topo1/bgp_path_attributes.json
new file mode 100644
index 0000000000..15b7ec13be
--- /dev/null
+++ b/tests/topotests/bgp-path-attributes-topo1/bgp_path_attributes.json
@@ -0,0 +1,220 @@
+{
+ "ipv4base":"10.0.0.0",
+ "ipv4mask":30,
+ "ipv6base":"fd00::",
+ "ipv6mask":64,
+ "link_ip_start":{"ipv4":"10.0.0.0", "v4mask":30, "ipv6":"fd00::", "v6mask":64},
+ "lo_prefix":{"ipv4":"1.0.", "v4mask":32, "ipv6":"2001:DB8:F::", "v6mask":128},
+ "routers":{
+ "r1":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2":{"ipv4":"auto", "ipv6":"auto"},
+ "r3":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"555",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"},
+ "r4-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r4-link2": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp":{
+ "local_as":"555",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1":{"ipv4":"auto", "ipv6":"auto"},
+ "r2":{"ipv4":"auto", "ipv6":"auto"},
+ "r5":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"555",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r2-link2": {"ipv4": "auto", "ipv6": "auto"},
+ "r6": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "666",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r4-link1": {}
+ }
+ },
+ "r6": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r5":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"},
+ "r7": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp":{
+ "local_as":"666",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5": {}
+ }
+ },
+ "r7": {
+ "dest_link": {
+ "r5": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r6":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r4": {"ipv4": "auto", "ipv6": "auto"},
+ "r7": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp":{
+ "local_as":"777",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r6": {}
+ }
+ },
+ "r7": {
+ "dest_link": {
+ "r6": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r7":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r5": {"ipv4": "auto", "ipv6": "auto"},
+ "r6": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp":{
+ "local_as":"888",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r5": {
+ "dest_link": {
+ "r7": {}
+ }
+ },
+ "r6": {
+ "dest_link": {
+ "r7": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py b/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py
new file mode 100755
index 0000000000..abd6b396d1
--- /dev/null
+++ b/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py
@@ -0,0 +1,1078 @@
+#!/usr/bin/env python
+
+#
+# Modified work Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Original work Copyright (c) 2018 by Network Device Education
+# Foundation, Inc. ("NetDEF")
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test AS-Path functionality:
+
+Setup module:
+- Create topology (setup module)
+- Bring up topology
+- Verify BGP convergence
+
+Test cases:
+1. Test next_hop attribute and verify best path is installed as per
+ reachable next_hop
+2. Test aspath attribute and verify best path is installed as per
+ shortest AS-Path
+3. Test localpref attribute and verify best path is installed as per
+ shortest local-preference
+4. Test weight attribute and and verify best path is installed as per
+ highest weight
+5. Test origin attribute and verify best path is installed as per
+ IGP>EGP>INCOMPLETE rule
+6. Test med attribute and verify best path is installed as per lowest
+ med value
+7. Test admin distance and verify best path is installed as per lowest
+ admin distance
+
+Teardown module:
+- Bring down the topology
+- stop routers
+
+"""
+
+import os
+import sys
+import pdb
+import json
+import time
+import inspect
+import ipaddress
+from time import sleep
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from mininet.topo import Topo
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+
+# Required to instantiate the topology builder class.
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer, reset_config_on_routers,
+ verify_rib, create_static_routes,
+ create_prefix_lists, verify_prefix_lists,
+ create_route_maps
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence, create_router_bgp,
+ clear_bgp_and_verify, verify_best_path_as_per_bgp_attribute,
+ verify_best_path_as_per_admin_distance
+)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/bgp_path_attributes.json".format(CWD)
+
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+
+####
+class CreateTopo(Topo):
+ """
+ Test CreateTopo - topology 1
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # Building topology and configuration from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: %s", testsuite_run_time)
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Checking BGP convergence
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, ("setup_module :Failed \n Error:"
+ " {}".format(result))
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """
+ Teardown the pytest environment
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info("Testsuite end time: %s",
+ time.asctime(time.localtime(time.time())))
+ logger.info("=" * 40)
+
+
+#####################################################
+##
+## Testcases
+##
+#####################################################
+def test_next_hop_attribute(request):
+ """
+ Verifying route are not getting installed in, as next_hop is
+ unreachable, Making next hop reachable using next_hop_self
+ command and verifying routes are installed.
+ """
+
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to advertise networks
+ input_dict = {
+ "r7": {
+ "bgp":{
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r1"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: %s", result)
+
+ # Configure next-hop-self to bgp neighbor
+ input_dict_1 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r1"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_aspath_attribute(request):
+ " Verifying AS_PATH attribute functionality"
+
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to advertise networks
+ input_dict = {
+ "r7": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying best path
+ dut = "r1"
+ attribute = "aspath"
+ result = verify_best_path_as_per_bgp_attribute(tgen, "ipv4", dut,
+ input_dict, attribute)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_localpref_attribute(request):
+ " Verifying LOCAL PREFERENCE attribute functionality"
+
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to advertise networks
+ input_dict = {
+ "r7": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create Prefix list
+ input_dict_2 = {
+ "r2": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_ls_1": [{
+ "seqid": 10,
+ "network": "200.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create route map
+ input_dict_3 = {
+ "r2": {
+ "route_maps": {
+ "RMAP_LOCAL_PREF": [{
+ "action": "permit",
+ "match": {
+ "ipv4": {
+ "prefix_lists": "pf_ls_1"
+ }
+ },
+ "set": {
+ "localpref": 1000
+ }
+ }]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure neighbor for route map
+ input_dict_4 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {"name": "RMAP_LOCAL_PREF",
+ "direction": "in"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying best path
+ dut = "r1"
+ attribute = "localpref"
+ result = verify_best_path_as_per_bgp_attribute(tgen, "ipv4", dut,
+ input_dict, attribute)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_weight_attribute(request):
+ " Verifying WEIGHT attribute functionality"
+
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to advertise networks
+ input_dict = {
+ "r7": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create Prefix list
+ input_dict_2 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_ls_1": [{
+ "seqid": 10,
+ "network": "200.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create route map
+ input_dict_3 = {
+ "r1": {
+ "route_maps": {
+ "RMAP_WEIGHT": [{
+ "action": "permit",
+ "match": {
+ "ipv4": {
+ "prefix_lists": "pf_ls_1"
+ }
+ },
+ "set": {
+ "weight": 500
+ }
+ }]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure neighbor for route map
+ input_dict_4 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {"name": "RMAP_WEIGHT",
+ "direction": "in"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying best path
+ dut = "r1"
+ attribute = "weight"
+ result = verify_best_path_as_per_bgp_attribute(tgen, "ipv4", dut,
+ input_dict, attribute)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_origin_attribute(request):
+ " Verifying ORIGIN attribute functionality"
+
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to advertise networks
+ input_dict = {
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r5": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to create static routes
+ input_dict_3 = {
+ "r5": {
+ "static_routes": [
+ {
+ "network": "200.50.2.0/32",
+ "next_hop": "10.0.0.26"
+ },
+ {
+ "network": "200.60.2.0/32",
+ "next_hop": "10.0.0.26"
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+
+ # Configure next-hop-self to bgp neighbor
+
+ # Verifying best path
+ dut = "r1"
+ attribute = "origin"
+ result = verify_best_path_as_per_bgp_attribute(tgen, "ipv4", dut,
+ input_dict, attribute)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_med_attribute(request):
+ " Verifying MED attribute functionality"
+
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to advertise networks
+ input_dict = {
+ "r4": {
+ "bgp":{
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to advertise networks
+
+
+ # Configure next-hop-self to bgp neighbor
+
+
+ # Create Prefix list
+ input_dict_3 = {
+ "r2": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_ls_r2": [{
+ "seqid": 10,
+ "network": "200.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ },
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_ls_r3": [{
+ "seqid": 10,
+ "network": "200.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create route map
+ input_dict_3 = {
+ "r2": {
+ "route_maps": {
+ "RMAP_MED_R2": [{
+ "action": "permit",
+ "match": {
+ "ipv4": {
+ "prefix_lists": "pf_ls_r2"
+ }
+ },
+ "set": {
+ "med": 100
+ }
+ }]
+ }
+ },
+ "r3": {
+ "route_maps": {
+ "RMAP_MED_R3": [{
+ "action": "permit",
+ "match": {
+ "ipv4": {
+ "prefix_lists": "pf_ls_r3"
+ }
+ },
+ "set": {
+ "med": 10
+ }
+ }]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure neighbor for route map
+ input_dict_4 = {
+ "r5": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r2-link1": {
+ "route_maps": [
+ {"name": "RMAP_MED_R2",
+ "direction": "in"}
+ ]
+ }
+ }
+ },
+ "r1": {
+ "dest_link": {
+ "r2": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"next_hop_self": True}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {"name": "RMAP_MED_R3",
+ "direction": "in"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying best path
+ dut = "r1"
+ attribute = "med"
+ result = verify_best_path_as_per_bgp_attribute(tgen, "ipv4", dut,
+ input_dict, attribute)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ logger.info("Testcase %s :Passed \n", tc_name)
+
+ # Uncomment next line for debugging
+ # tgen.mininet_cli()
+
+
+def test_admin_distance(request):
+ " Verifying admin distance functionality"
+
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to create static routes
+ input_dict = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": "200.50.2.0/32",
+ "admin_distance": 80,
+ "next_hop": "10.0.0.14"
+ },
+ {
+ "network": "200.50.2.0/32",
+ "admin_distance": 60,
+ "next_hop": "10.0.0.18"
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+ input_dict_2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying best path
+ dut = "r1"
+ attribute = "admin_distance"
+ result = verify_best_path_as_per_admin_distance(tgen, "ipv4", dut,
+ input_dict, attribute)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp-prefix-list-topo1/__init__.py b/tests/topotests/bgp-prefix-list-topo1/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp-prefix-list-topo1/__init__.py
diff --git a/tests/topotests/bgp-prefix-list-topo1/prefix_lists.json b/tests/topotests/bgp-prefix-list-topo1/prefix_lists.json
new file mode 100644
index 0000000000..3bb07ad994
--- /dev/null
+++ b/tests/topotests/bgp-prefix-list-topo1/prefix_lists.json
@@ -0,0 +1,123 @@
+{
+ "address_types": ["ipv4"],
+ "ipv4base":"10.0.0.0",
+ "ipv4mask":30,
+ "ipv6base":"fd00::",
+ "ipv6mask":64,
+ "link_ip_start":{"ipv4":"10.0.0.0", "v4mask":30, "ipv6":"fd00::", "v6mask":64},
+ "lo_prefix":{"ipv4":"1.0.", "v4mask":32, "ipv6":"2001:DB8:F::", "v6mask":128},
+ "routers":{
+ "r1":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback", "add_static_route":"yes"},
+ "r2":{"ipv4":"auto", "ipv6":"auto"},
+ "r3":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback", "add_static_route":"yes"},
+ "r1":{"ipv4":"auto", "ipv6":"auto"},
+ "r3":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback", "add_static_route":"yes"},
+ "r1":{"ipv4":"auto", "ipv6":"auto"},
+ "r2":{"ipv4":"auto", "ipv6":"auto"},
+ "r4":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback", "add_static_route":"yes"},
+ "r3":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py b/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py
new file mode 100755
index 0000000000..25a346f20d
--- /dev/null
+++ b/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py
@@ -0,0 +1,1450 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test prefix-list functionality:
+
+Test steps
+- Create topology (setup module)
+ Creating 4 routers topology, r1, r2, r3 are in IBGP and
+ r3, r4 are in EBGP
+- Bring up topology
+- Verify for bgp to converge
+
+IP prefix-list tests
+- Test ip prefix-lists IN permit
+- Test ip prefix-lists OUT permit
+- Test ip prefix-lists IN deny and permit any
+- Test delete ip prefix-lists
+- Test ip prefix-lists OUT deny and permit any
+- Test modify ip prefix-lists IN permit to deny
+- Test modify ip prefix-lists IN deny to permit
+- Test modify ip prefix-lists OUT permit to deny
+- Test modify prefix-lists OUT deny to permit
+- Test ip prefix-lists implicit deny
+"""
+
+import sys
+import json
+import time
+import os
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from mininet.topo import Topo
+from lib.topogen import Topogen, get_topogen
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer, reset_config_on_routers,
+ verify_rib, create_static_routes,
+ create_prefix_lists, verify_prefix_lists
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence, create_router_bgp,
+ clear_bgp_and_verify
+)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/prefix_lists.json".format(CWD)
+
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+# Global variables
+bgp_convergence = False
+
+
+class BGPPrefixListTopo(Topo):
+ """
+ Test BGPPrefixListTopo - topology 1
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("="*40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(BGPPrefixListTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Checking BGP convergence
+ global BGP_CONVERGENCE
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:"
+ " {}".format(BGP_CONVERGENCE))
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info("Testsuite end time: {}".
+ format(time.asctime(time.localtime(time.time()))))
+ logger.info("="*40)
+
+#####################################################
+#
+# Tests starting
+#
+#####################################################
+
+
+def test_ip_prefix_lists_in_permit(request):
+ """
+ Create ip prefix list and test permit prefixes IN direction
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Create Static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "20.0.20.1/32",
+ "no_of_ip": 1,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create ip prefix list
+ input_dict_2 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [{
+ "seqid": 10,
+ "network": "any",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure bgp neighbor with prefix list
+ input_dict_3 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "in"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ip_prefix_lists_out_permit(request):
+ """
+ Create ip prefix list and test permit prefixes out direction
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create Static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 1,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create Static routes
+ input_dict_1 = {
+ "r1": {
+ "static_routes": [{
+ "network": "20.0.20.1/32",
+ "no_of_ip": 1,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_5 = {
+ "r3": {
+ "static_routes": [{
+ "network": "10.0.0.2/30",
+ "no_of_ip": 1,
+ "next_hop": "10.0.0.9"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_5)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+
+ # Create ip prefix list
+ input_dict_2 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [{
+ "seqid": 10,
+ "network": "20.0.20.1/32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure prefix list to bgp neighbor
+ # Configure bgp neighbor with prefix list
+ input_dict_3 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "out"
+ }
+ ]
+ }
+ }
+ }
+ },
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+ write_test_footer(tc_name)
+
+
+def test_ip_prefix_lists_in_deny_and_permit_any(request):
+ """
+ Create ip prefix list and test permit/deny prefixes IN direction
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create Static Routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 1,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+ # Create ip prefix list
+ input_dict_2 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.20.1/32",
+ "action": "deny"
+ },
+ {
+ "seqid": "11",
+ "network": "any",
+ "action": "permit"
+ }
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure bgp neighbor with prefix list
+ input_dict_3 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "in"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ # Configure prefix list to bgp neighbor
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_delete_prefix_lists(request):
+ """
+ Delete ip prefix list
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create ip prefix list
+ input_dict_2 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.20.1/32",
+ "action": "deny"
+ }
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_prefix_lists(tgen, input_dict_2)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+ logger.info(result)
+
+ # Delete prefix list
+ input_dict_2 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.20.1/32",
+ "action": "deny",
+ "delete": True
+ }
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ip_prefix_lists_out_deny_and_permit_any(request):
+ """
+ Create ip prefix list and test deny/permit any prefixes OUT direction
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create Static Routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 9,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create Static Routes
+ input_dict_1 = {
+ "r2": {
+ "static_routes": [{
+ "network": "20.0.20.1/32",
+ "no_of_ip": 9,
+ "next_hop": "10.0.0.1"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+
+ # Create ip prefix list
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "deny"
+ },
+ {
+ "seqid": "11",
+ "network": "any",
+ "action": "permit"
+ }
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure prefix list to bgp neighbor
+ input_dict_4 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "out"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r4"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r4"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_modify_prefix_lists_in_permit_to_deny(request):
+ """
+ Modify ip prefix list and test permit to deny prefixes IN direction
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create Static Routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 9,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+
+ # Create ip prefix list
+ input_dict_2 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [{
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure prefix list to bgp neighbor
+ input_dict_3 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link":{
+ "r3": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "in"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Modify prefix list
+ input_dict_1 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "deny"
+ },
+ {
+ "seqid": "11",
+ "network": "any",
+ "action": "permit"
+ }
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to clear bgp, so config changes would be reflected
+ dut = "r3"
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_modify_prefix_lists_in_deny_to_permit(request):
+ """
+ Modify ip prefix list and test deny to permit prefixes IN direction
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create Static Routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 9,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+
+ # Create ip prefix list
+ input_dict_1 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "deny"
+ },
+ {
+ "seqid": "11",
+ "network": "any",
+ "action": "permit"
+ }
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure prefix list to bgp neighbor
+ input_dict_2 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "in"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Modify ip prefix list
+ input_dict_1 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [{
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+
+ }
+ result = create_prefix_lists(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to clear bgp, so config changes would be reflected
+ dut = "r3"
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_modify_prefix_lists_out_permit_to_deny(request):
+ """
+ Modify ip prefix list and test permit to deny prefixes OUT direction
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create Static Routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 9,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+
+ # Create ip prefix list
+ input_dict_1 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [{
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+
+ }
+ result = create_prefix_lists(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure prefix list to bgp neighbor
+ input_dict_2 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "out"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r4"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Modify ip prefix list
+ input_dict_1 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "deny"
+ },
+ {
+ "seqid": "11",
+ "network": "any",
+ "action": "permit"
+ }
+ ]
+ }
+ }
+ }
+
+ }
+ result = create_prefix_lists(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to clear bgp, so config changes would be reflected
+ dut = "r3"
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r4"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_modify_prefix_lists_out_deny_to_permit(request):
+ """
+ Modify ip prefix list and test deny to permit prefixes OUT direction
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create Static Routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 9,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+ # Create ip prefix list
+ input_dict_1 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "deny"
+ },
+ {
+ "seqid": "11",
+ "network": "any",
+ "action": "permit"
+ }
+ ]
+ }
+ }
+ }
+
+ }
+ result = create_prefix_lists(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure prefix list to bgp neighbor
+ input_dict_2 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link":{
+ "r3": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "out"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r4"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Modify ip prefix list
+ input_dict_1 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [{
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+
+ }
+ result = create_prefix_lists(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to clear bgp, so config changes would be reflected
+ dut = "r3"
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r4"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ip_prefix_lists_implicit_deny(request):
+ """
+ Create ip prefix list and test implicit deny
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create Static Routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 9,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create Static Routes
+ input_dict_1 = {
+ "r2": {
+ "static_routes": [{
+ "network": "20.0.20.1/32",
+ "no_of_ip": 9,
+ "next_hop": "10.0.0.1"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+ # Create ip prefix list
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [{
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure prefix list to bgp neighbor
+ input_dict_4 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "out"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r4"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r4"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_multiview_topo1/README.md b/tests/topotests/bgp_multiview_topo1/README.md
index c5e615d252..2a2747344a 100644
--- a/tests/topotests/bgp_multiview_topo1/README.md
+++ b/tests/topotests/bgp_multiview_topo1/README.md
@@ -94,7 +94,7 @@ Simplified `R1` config:
Test is executed by running
- vtysh -c "show log" | grep "Logging configuration for"
+ vtysh -c "show logging" | grep "Logging configuration for"
on router `R1`. This should return the logging information for all daemons registered
to Zebra and the list of running daemons is compared to the daemons started for this
diff --git a/tests/topotests/example-topojson-test/__init__.py b/tests/topotests/example-topojson-test/__init__.py
new file mode 100755
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/example-topojson-test/__init__.py
diff --git a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/__init__.py b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/__init__.py
new file mode 100755
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/__init__.py
diff --git a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/example_topojson_multiple_links.json b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/example_topojson_multiple_links.json
new file mode 100644
index 0000000000..3968348b1f
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/example_topojson_multiple_links.json
@@ -0,0 +1,152 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 30,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r1-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "100.0.20.1/32",
+ "no_of_ip": 9,
+ "admin_distance": 100,
+ "next_hop": "10.0.0.1"
+ }
+ ]
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "10.0.0.1/30",
+ "next_hop": "10.0.0.5"
+ }
+ ]
+ }
+ }
+}
+
diff --git a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py
new file mode 100755
index 0000000000..8e794b9946
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py
@@ -0,0 +1,194 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+<example>.py: Test <example tests>.
+"""
+
+import os
+import sys
+import json
+import time
+import inspect
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, '../../'))
+
+# pylint: disable=C0413
+from lib.topogen import Topogen, get_topogen
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer, verify_rib
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence
+)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/example_topojson_multiple_links.json".format(CWD)
+try:
+ with open(jsonFile, 'r') as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+# Global variables
+bgp_convergence = False
+input_dict = {}
+
+
+class TemplateTopo(Topo):
+ """
+ Test topology builder
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # This function only purpose is to create topology
+ # as defined in input json file.
+ #
+ # Example
+ #
+ # Creating 2 routers having 2 links in between,
+ # one is used to establised BGP neighborship
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # This function only purpose is to create configuration
+ # as defined in input json file.
+ #
+ # Example
+ #
+ # Creating configuration defined in input JSON
+ # file, example, BGP config, interface config, static routes
+ # config, prefix list config
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def test_bgp_convergence(request):
+ " Test BGP daemon convergence "
+
+ tgen = get_topogen()
+ global bgp_convergence
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "test_bgp_convergence failed.. \n" \
+ " Error: {}".format(bgp_convergence)
+
+ logger.info("BGP is converged successfully \n")
+ write_test_footer(tc_name)
+
+
+def test_static_routes(request):
+ " Test to create and verify static routes. "
+
+ tgen = get_topogen()
+ if bgp_convergence is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Static routes are created as part of initial configuration,
+ # verifying RIB
+ dut = 'r3'
+ protocol = 'bgp'
+ next_hop = '10.0.0.1'
+ input_dict = {"r1": topo["routers"]["r1"]}
+
+ # Uncomment below to debug
+ # tgen.mininet_cli()
+ result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == '__main__':
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link/__init__.py b/tests/topotests/example-topojson-test/test_topo_json_single_link/__init__.py
new file mode 100755
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link/__init__.py
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link/example_topojson.json b/tests/topotests/example-topojson-test/test_topo_json_single_link/example_topojson.json
new file mode 100644
index 0000000000..629d2d6d78
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link/example_topojson.json
@@ -0,0 +1,153 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 30,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "100.0.20.1/32",
+ "no_of_ip": 9,
+ "admin_distance": 100,
+ "next_hop": "10.0.0.1"
+ }
+ ]
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "10.0.0.1/30",
+ "next_hop": "10.0.0.5"
+ }
+ ]
+ }
+ }
+}
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py
new file mode 100755
index 0000000000..315c7b3f2d
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+<example>.py: Test <example tests>.
+"""
+
+import os
+import sys
+import time
+import json
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, '../../'))
+
+# pylint: disable=C0413
+from lib.topogen import Topogen, get_topogen
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer, verify_rib
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence
+)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/example_topojson.json".format(CWD)
+
+try:
+ with open(jsonFile, 'r') as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+# Global variables
+bgp_convergence = False
+input_dict = {}
+
+class TemplateTopo(Topo):
+ """
+ Test topology builder
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # This function only purpose is to create topology
+ # as defined in input json file.
+ #
+ # Example
+ #
+ # Creating 2 routers having single links in between,
+ # which is used to establised BGP neighborship
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("="*40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # This function only purpose is to create configuration
+ # as defined in input json file.
+ #
+ # Example
+ #
+ # Creating configuration defined in input JSON
+ # file, example, BGP config, interface config, static routes
+ # config, prefix list config
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ logger.info("Running setup_module() done")
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def test_bgp_convergence(request):
+ " Test BGP daemon convergence "
+
+ tgen = get_topogen()
+ global bgp_convergence
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "test_bgp_convergence failed.. \n"\
+ " Error: {}".format(bgp_convergence)
+
+ logger.info("BGP is converged successfully \n")
+ write_test_footer(tc_name)
+
+
+def test_static_routes(request):
+ " Test to create and verify static routes. "
+
+ tgen = get_topogen()
+ if bgp_convergence is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Static routes are created as part of initial configuration,
+ # verifying RIB
+ dut = 'r3'
+ next_hop = '10.0.0.1'
+ input_dict = {"r1": topo["routers"]["r1"]}
+
+ # Uncomment below to debug
+ # tgen.mininet_cli()
+ result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == '__main__':
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/__init__.py b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/__init__.py
new file mode 100755
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/__init__.py
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/example_topojson.json b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/example_topojson.json
new file mode 100644
index 0000000000..c76c6264be
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/example_topojson.json
@@ -0,0 +1,161 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 30,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "lo": {
+ "source_link": "lo"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "1.0.2.17/32",
+ "next_hop": "10.0.0.2"
+ }
+ ]
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "lo": {
+ "source_link": "lo"
+ }
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "lo": {
+ "source_link": "lo"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "100.0.20.1/32",
+ "no_of_ip": 9,
+ "admin_distance": 100,
+ "next_hop": "10.0.0.1"
+ },
+ {
+ "network": "1.0.1.17/32",
+ "next_hop": "10.0.0.1"
+ },
+ {
+ "network": "1.0.3.17/32",
+ "next_hop": "10.0.0.6"
+ }
+ ]
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "lo": {
+ "source_link": "lo"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "1.0.2.17/32",
+ "next_hop": "10.0.0.5"
+ },
+ {
+ "network": "10.0.0.1/30",
+ "next_hop": "10.0.0.5"
+ }
+ ]
+ }
+ }
+}
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py
new file mode 100755
index 0000000000..b794b96a63
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py
@@ -0,0 +1,205 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+<example>.py: Test <example tests>.
+"""
+
+import os
+import sys
+import time
+import json
+import inspect
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, '../../'))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer, verify_rib
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence
+)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/example_topojson.json".format(CWD)
+
+try:
+ with open(jsonFile, 'r') as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+# Global variables
+bgp_convergence = False
+input_dict = {}
+
+
+class TemplateTopo(Topo):
+ """
+ Test topology builder
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # This function only purpose is to create topology
+ # as defined in input json file.
+ #
+ # Example
+ #
+ # Creating 2 routers having single links in between,
+ # which is used to establised BGP neighborship
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("="*40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # This function only purpose is to create configuration
+ # as defined in input json file.
+ #
+ # Example
+ #
+ # Creating configuration defined in input JSON
+ # file, example, BGP config, interface config, static routes
+ # config, prefix list config
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def test_bgp_convergence(request):
+ " Test BGP daemon convergence "
+
+ tgen = get_topogen()
+ global bgp_convergence
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "test_bgp_convergence failed.. \n"\
+ " Error: {}".format(bgp_convergence)
+
+ logger.info("BGP is converged successfully \n")
+ write_test_footer(tc_name)
+
+
+def test_static_routes(request):
+ " Test to create and verify static routes. "
+
+ tgen = get_topogen()
+ if bgp_convergence is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Static routes are created as part of initial configuration,
+ # verifying RIB
+ dut = 'r3'
+ next_hop = '10.0.0.1'
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": "100.0.20.1/32",
+ "no_of_ip": 9,
+ "admin_distance": 100,
+ "next_hop": "10.0.0.1"
+ }
+ ]
+ }
+ }
+ # Uncomment below to debug
+ # tgen.mininet_cli()
+ result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == '__main__':
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
new file mode 100644
index 0000000000..13f8824976
--- /dev/null
+++ b/tests/topotests/lib/bgp.py
@@ -0,0 +1,1521 @@
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+from copy import deepcopy
+from time import sleep
+import traceback
+import ipaddr
+from lib import topotest
+
+from lib.topolog import logger
+
+# Import common_config to use commomnly used APIs
+from lib.common_config import (create_common_configuration,
+ InvalidCLIError,
+ load_config_to_router,
+ check_address_types,
+ generate_ips,
+ find_interface_with_greater_ip)
+
+BGP_CONVERGENCE_TIMEOUT = 10
+
+
+def create_router_bgp(tgen, topo, input_dict=None, build=False):
+ """
+ API to configure bgp on router
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "local_as": "200",
+ "router_id": "22.22.22.22",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "advertise_networks": [
+ {
+ "network": "20.0.0.0/32",
+ "no_of_network": 10
+ },
+ {
+ "network": "30.0.0.0/32",
+ "no_of_network": 10
+ }
+ ],
+ "neighbor": {
+ "r3": {
+ "keepalivetimer": 60,
+ "holddowntimer": 180,
+ "dest_link": {
+ "r4": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "in"
+ }
+ ],
+ "route_maps": [
+ {"name": "RMAP_MED_R3",
+ "direction": "in"}
+ ],
+ "next_hop_self": True
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ Returns
+ -------
+ True or False
+ """
+ logger.debug("Entering lib API: create_router_bgp()")
+ result = False
+ if not input_dict:
+ input_dict = deepcopy(topo)
+ else:
+ topo = topo["routers"]
+ for router in input_dict.keys():
+ if "bgp" not in input_dict[router]:
+ logger.debug("Router %s: 'bgp' not present in input_dict", router)
+ continue
+
+ result = __create_bgp_global(tgen, input_dict, router, build)
+ if result is True:
+ bgp_data = input_dict[router]["bgp"]
+
+ bgp_addr_data = bgp_data.setdefault("address_family", {})
+
+ if not bgp_addr_data:
+ logger.debug("Router %s: 'address_family' not present in "
+ "input_dict for BGP", router)
+ else:
+
+ ipv4_data = bgp_addr_data.setdefault("ipv4", {})
+ ipv6_data = bgp_addr_data.setdefault("ipv6", {})
+
+ neigh_unicast = True if ipv4_data.setdefault("unicast", {}) \
+ or ipv6_data.setdefault("unicast", {}) else False
+
+ if neigh_unicast:
+ result = __create_bgp_unicast_neighbor(
+ tgen, topo, input_dict, router, build)
+
+ logger.debug("Exiting lib API: create_router_bgp()")
+ return result
+
+
+def __create_bgp_global(tgen, input_dict, router, build=False):
+ """
+ Helper API to create bgp global configuration.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured.
+ * `build` : Only for initial setup phase this is set as True.
+
+ Returns
+ -------
+ True or False
+ """
+
+ result = False
+ logger.debug("Entering lib API: __create_bgp_global()")
+ try:
+
+ bgp_data = input_dict[router]["bgp"]
+ del_bgp_action = bgp_data.setdefault("delete", False)
+ if del_bgp_action:
+ config_data = ["no router bgp"]
+ result = create_common_configuration(tgen, router, config_data,
+ "bgp", build=build)
+ return result
+
+ config_data = []
+
+ if "local_as" not in bgp_data and build:
+ logger.error("Router %s: 'local_as' not present in input_dict"
+ "for BGP", router)
+ return False
+
+ local_as = bgp_data.setdefault("local_as", "")
+ cmd = "router bgp {}".format(local_as)
+ vrf_id = bgp_data.setdefault("vrf", None)
+ if vrf_id:
+ cmd = "{} vrf {}".format(cmd, vrf_id)
+
+ config_data.append(cmd)
+
+ router_id = bgp_data.setdefault("router_id", None)
+ del_router_id = bgp_data.setdefault("del_router_id", False)
+ if del_router_id:
+ config_data.append("no bgp router-id")
+ if router_id:
+ config_data.append("bgp router-id {}".format(
+ router_id))
+
+ aggregate_address = bgp_data.setdefault("aggregate_address",
+ {})
+ if aggregate_address:
+ network = aggregate_address.setdefault("network", None)
+ if not network:
+ logger.error("Router %s: 'network' not present in "
+ "input_dict for BGP", router)
+ else:
+ cmd = "aggregate-address {}".format(network)
+
+ as_set = aggregate_address.setdefault("as_set", False)
+ summary = aggregate_address.setdefault("summary", False)
+ del_action = aggregate_address.setdefault("delete", False)
+ if as_set:
+ cmd = "{} {}".format(cmd, "as-set")
+ if summary:
+ cmd = "{} {}".format(cmd, "summary")
+
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+
+ result = create_common_configuration(tgen, router, config_data,
+ "bgp", build=build)
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: create_bgp_global()")
+ return result
+
+
+def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, build=False):
+ """
+ Helper API to create configuration for address-family unicast
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured.
+ * `build` : Only for initial setup phase this is set as True.
+ """
+
+ result = False
+ logger.debug("Entering lib API: __create_bgp_unicast_neighbor()")
+ try:
+ config_data = ["router bgp"]
+ bgp_data = input_dict[router]["bgp"]["address_family"]
+
+ for addr_type, addr_dict in bgp_data.iteritems():
+ if not addr_dict:
+ continue
+
+ if not check_address_types(addr_type):
+ continue
+
+ config_data.append("address-family {} unicast".format(
+ addr_type
+ ))
+ addr_data = addr_dict["unicast"]
+ advertise_network = addr_data.setdefault("advertise_networks",
+ [])
+ for advertise_network_dict in advertise_network:
+ network = advertise_network_dict["network"]
+ if type(network) is not list:
+ network = [network]
+
+ if "no_of_network" in advertise_network_dict:
+ no_of_network = advertise_network_dict["no_of_network"]
+ else:
+ no_of_network = 1
+
+ del_action = advertise_network_dict.setdefault("delete",
+ False)
+
+ # Generating IPs for verification
+ prefix = str(
+ ipaddr.IPNetwork(unicode(network[0])).prefixlen)
+ network_list = generate_ips(network, no_of_network)
+ for ip in network_list:
+ ip = str(ipaddr.IPNetwork(unicode(ip)).network)
+
+ cmd = "network {}/{}\n".format(ip, prefix)
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+
+ max_paths = addr_data.setdefault("maximum_paths", {})
+ if max_paths:
+ ibgp = max_paths.setdefault("ibgp", None)
+ ebgp = max_paths.setdefault("ebgp", None)
+ if ibgp:
+ config_data.append("maximum-paths ibgp {}".format(
+ ibgp
+ ))
+ if ebgp:
+ config_data.append("maximum-paths {}".format(
+ ebgp
+ ))
+
+ aggregate_address = addr_data.setdefault("aggregate_address",
+ {})
+ if aggregate_address:
+ ip = aggregate_address("network", None)
+ attribute = aggregate_address("attribute", None)
+ if ip:
+ cmd = "aggregate-address {}".format(ip)
+ if attribute:
+ cmd = "{} {}".format(cmd, attribute)
+
+ config_data.append(cmd)
+
+ redistribute_data = addr_data.setdefault("redistribute", {})
+ if redistribute_data:
+ for redistribute in redistribute_data:
+ if "redist_type" not in redistribute:
+ logger.error("Router %s: 'redist_type' not present in "
+ "input_dict", router)
+ else:
+ cmd = "redistribute {}".format(
+ redistribute["redist_type"])
+ redist_attr = redistribute.setdefault("attribute",
+ None)
+ if redist_attr:
+ cmd = "{} {}".format(cmd, redist_attr)
+ del_action = redistribute.setdefault("delete", False)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if "neighbor" in addr_data:
+ neigh_data = __create_bgp_neighbor(topo, input_dict,
+ router, addr_type)
+ config_data.extend(neigh_data)
+
+ for addr_type, addr_dict in bgp_data.iteritems():
+ if not addr_dict or not check_address_types(addr_type):
+ continue
+
+ addr_data = addr_dict["unicast"]
+ if "neighbor" in addr_data:
+ neigh_addr_data = __create_bgp_unicast_address_family(
+ topo, input_dict, router, addr_type)
+
+ config_data.extend(neigh_addr_data)
+
+ result = create_common_configuration(tgen, router, config_data,
+ None, build=build)
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: __create_bgp_unicast_neighbor()")
+ return result
+
+
+def __create_bgp_neighbor(topo, input_dict, router, addr_type):
+ """
+ Helper API to create neighbor specific configuration
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured
+ """
+
+ config_data = []
+ logger.debug("Entering lib API: __create_bgp_neighbor()")
+
+ bgp_data = input_dict[router]["bgp"]["address_family"]
+ neigh_data = bgp_data[addr_type]["unicast"]["neighbor"]
+
+ for name, peer_dict in neigh_data.iteritems():
+ for dest_link, peer in peer_dict["dest_link"].iteritems():
+ nh_details = topo[name]
+ remote_as = nh_details["bgp"]["local_as"]
+ update_source = None
+
+ if dest_link in nh_details["links"].keys():
+ ip_addr = \
+ nh_details["links"][dest_link][addr_type].split("/")[0]
+ # Loopback interface
+ if "source_link" in peer and peer["source_link"] == "lo":
+ update_source = topo[router]["links"]["lo"][
+ addr_type].split("/")[0]
+
+ neigh_cxt = "neighbor {}".format(ip_addr)
+
+ config_data.append("{} remote-as {}".format(neigh_cxt, remote_as))
+ if addr_type == "ipv6":
+ config_data.append("address-family ipv6 unicast")
+ config_data.append("{} activate".format(neigh_cxt))
+
+ disable_connected = peer.setdefault("disable_connected_check",
+ False)
+ keep_alive = peer.setdefault("keep_alive", 60)
+ hold_down = peer.setdefault("hold_down", 180)
+ password = peer.setdefault("password", None)
+ max_hop_limit = peer.setdefault("ebgp_multihop", 1)
+
+ if update_source:
+ config_data.append("{} update-source {}".format(
+ neigh_cxt, update_source))
+ if disable_connected:
+ config_data.append("{} disable-connected-check".format(
+ disable_connected))
+ if update_source:
+ config_data.append("{} update-source {}".format(neigh_cxt,
+ update_source))
+ if int(keep_alive) != 60 and int(hold_down) != 180:
+ config_data.append(
+ "{} timers {} {}".format(neigh_cxt, keep_alive,
+ hold_down))
+ if password:
+ config_data.append(
+ "{} password {}".format(neigh_cxt, password))
+
+ if max_hop_limit > 1:
+ config_data.append("{} ebgp-multihop {}".format(neigh_cxt,
+ max_hop_limit))
+ config_data.append("{} enforce-multihop".format(neigh_cxt))
+
+ logger.debug("Exiting lib API: __create_bgp_unicast_neighbor()")
+ return config_data
+
+
+def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type):
+ """
+ API prints bgp global config to bgp_json file.
+
+ Parameters
+ ----------
+ * `bgp_cfg` : BGP class variables have BGP config saved in it for
+ particular router,
+ * `local_as_no` : Local as number
+ * `router_id` : Router-id
+ * `ecmp_path` : ECMP max path
+ * `gr_enable` : BGP global gracefull restart config
+ """
+
+ config_data = []
+ logger.debug("Entering lib API: __create_bgp_unicast_neighbor()")
+
+ bgp_data = input_dict[router]["bgp"]["address_family"]
+ neigh_data = bgp_data[addr_type]["unicast"]["neighbor"]
+
+ for name, peer_dict in deepcopy(neigh_data).iteritems():
+ for dest_link, peer in peer_dict["dest_link"].iteritems():
+ deactivate = None
+ nh_details = topo[name]
+ # Loopback interface
+ if "source_link" in peer and peer["source_link"] == "lo":
+ for destRouterLink, data in sorted(nh_details["links"].
+ iteritems()):
+ if "type" in data and data["type"] == "loopback":
+ if dest_link == destRouterLink:
+ ip_addr = \
+ nh_details["links"][destRouterLink][
+ addr_type].split("/")[0]
+
+ # Physical interface
+ else:
+ if dest_link in nh_details["links"].keys():
+
+ ip_addr = nh_details["links"][dest_link][
+ addr_type].split("/")[0]
+ if addr_type == "ipv4" and bgp_data["ipv6"]:
+ deactivate = nh_details["links"][
+ dest_link]["ipv6"].split("/")[0]
+
+ neigh_cxt = "neighbor {}".format(ip_addr)
+ config_data.append("address-family {} unicast".format(
+ addr_type
+ ))
+ if deactivate:
+ config_data.append(
+ "no neighbor {} activate".format(deactivate))
+
+ next_hop_self = peer.setdefault("next_hop_self", None)
+ send_community = peer.setdefault("send_community", None)
+ prefix_lists = peer.setdefault("prefix_lists", {})
+ route_maps = peer.setdefault("route_maps", {})
+
+ # next-hop-self
+ if next_hop_self:
+ config_data.append("{} next-hop-self".format(neigh_cxt))
+ # no_send_community
+ if send_community:
+ config_data.append("{} send-community".format(neigh_cxt))
+
+ if prefix_lists:
+ for prefix_list in prefix_lists:
+ name = prefix_list.setdefault("name", {})
+ direction = prefix_list.setdefault("direction", "in")
+ del_action = prefix_list.setdefault("delete", False)
+ if not name:
+ logger.info("Router %s: 'name' not present in "
+ "input_dict for BGP neighbor prefix lists",
+ router)
+ else:
+ cmd = "{} prefix-list {} {}".format(neigh_cxt, name,
+ direction)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if route_maps:
+ for route_map in route_maps:
+ name = route_map.setdefault("name", {})
+ direction = route_map.setdefault("direction", "in")
+ del_action = route_map.setdefault("delete", False)
+ if not name:
+ logger.info("Router %s: 'name' not present in "
+ "input_dict for BGP neighbor route name",
+ router)
+ else:
+ cmd = "{} route-map {} {}".format(neigh_cxt, name,
+ direction)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ return config_data
+
+
+#############################################
+# Verification APIs
+#############################################
+def verify_router_id(tgen, topo, input_dict):
+ """
+ Running command "show ip bgp json" for DUT and reading router-id
+ from input_dict and verifying with command output.
+ 1. Statically modfified router-id should take place
+ 2. When static router-id is deleted highest loopback should
+ become router-id
+ 3. When loopback intf is down then highest physcial intf
+ should become router-id
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `input_dict`: input dictionary, have details of Device Under Test, for
+ which user wants to test the data
+ Usage
+ -----
+ # Verify if router-id for r1 is 12.12.12.12
+ input_dict = {
+ "r1":{
+ "router_id": "12.12.12.12"
+ }
+ # Verify that router-id for r1 is highest interface ip
+ input_dict = {
+ "routers": ["r1"]
+ }
+ result = verify_router_id(tgen, topo, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: verify_router_id()")
+ for router in input_dict.keys():
+ if router not in tgen.routers():
+ continue
+
+ rnode = tgen.routers()[router]
+
+ del_router_id = input_dict[router]["bgp"].setdefault(
+ "del_router_id", False)
+
+ logger.info("Checking router %s router-id", router)
+ show_bgp_json = rnode.vtysh_cmd("show ip bgp json",
+ isjson=True)
+ router_id_out = show_bgp_json["routerId"]
+ router_id_out = ipaddr.IPv4Address(unicode(router_id_out))
+
+ # Once router-id is deleted, highest interface ip should become
+ # router-id
+ if del_router_id:
+ router_id = find_interface_with_greater_ip(topo, router)
+ else:
+ router_id = input_dict[router]["bgp"]["router_id"]
+ router_id = ipaddr.IPv4Address(unicode(router_id))
+
+ if router_id == router_id_out:
+ logger.info("Found expected router-id %s for router %s",
+ router_id, router)
+ else:
+ errormsg = "Router-id for router:{} mismatch, expected:" \
+ " {} but found:{}".format(router, router_id,
+ router_id_out)
+ return errormsg
+
+ logger.info("Exiting lib API: verify_router_id()")
+ return True
+
+
+def verify_bgp_convergence(tgen, topo):
+ """
+ API will verify if BGP is converged with in the given time frame.
+ Running "show bgp summary json" command and verify bgp neighbor
+ state is established,
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type`: ip_type, ipv4/ipv6
+
+ Usage
+ -----
+ # To veriry is BGP is converged for all the routers used in
+ topology
+ results = verify_bgp_convergence(tgen, topo, "ipv4")
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: verify_bgp_confergence()")
+ for router, rnode in tgen.routers().iteritems():
+ logger.info("Verifying BGP Convergence on router %s:", router)
+
+ for retry in range(1, 11):
+ show_bgp_json = rnode.vtysh_cmd("show bgp summary json",
+ isjson=True)
+ # Verifying output dictionary show_bgp_json is empty or not
+ if not bool(show_bgp_json):
+ errormsg = "BGP is not running"
+ return errormsg
+
+ # To find neighbor ip type
+ total_peer = 0
+
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+ for addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor in bgp_neighbors:
+ total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
+
+ for addr_type in bgp_addr_type.keys():
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ no_of_peer = 0
+ for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
+ for dest_link in peer_data["dest_link"].keys():
+ data = topo["routers"][bgp_neighbor]["links"]
+ if dest_link in data:
+ neighbor_ip = \
+ data[dest_link][addr_type].split("/")[0]
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"][
+ "peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"][
+ "peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+
+ if nh_state == "Established":
+ no_of_peer += 1
+ if no_of_peer == total_peer:
+ logger.info("BGP is Converged for router %s", router)
+ break
+ else:
+ logger.warning("BGP is not yet Converged for router %s",
+ router)
+ sleeptime = 2 * retry
+ if sleeptime <= BGP_CONVERGENCE_TIMEOUT:
+ # Waiting for BGP to converge
+ logger.info("Waiting for %s sec for BGP to converge on"
+ " router %s...", sleeptime, router)
+ sleep(sleeptime)
+ else:
+ show_bgp_summary = rnode.vtysh_cmd("show bgp summary")
+ errormsg = "TIMEOUT!! BGP is not converged in {} " \
+ "seconds for router {} \n {}".format(
+ BGP_CONVERGENCE_TIMEOUT, router,
+ show_bgp_summary)
+ return errormsg
+
+ logger.info("Exiting API: verify_bgp_confergence()")
+ return True
+
+
+def modify_as_number(tgen, topo, input_dict):
+ """
+ API reads local_as and remote_as from user defined input_dict and
+ modify router"s ASNs accordingly. Router"s config is modified and
+ recent/changed config is loadeded to router.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : defines for which router ASNs needs to be modified
+
+ Usage
+ -----
+ To modify ASNs for router r1
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "local_as": 131079
+ }
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: modify_as_number()")
+ try:
+
+ new_topo = deepcopy(topo["routers"])
+ router_dict = {}
+ for router in input_dict.keys():
+ # Remove bgp configuration
+
+ router_dict.update({
+ router: {
+ "bgp": {
+ "delete": True
+ }
+ }
+ })
+
+ new_topo[router]["bgp"]["local_as"] = \
+ input_dict[router]["bgp"]["local_as"]
+
+ logger.info("Removing bgp configuration")
+ create_router_bgp(tgen, topo, router_dict)
+
+ logger.info("Applying modified bgp configuration")
+ create_router_bgp(tgen, new_topo)
+
+ except Exception as e:
+ # handle any exception
+ logger.error("Error %s occured. Arguments %s.", e.message, e.args)
+
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.info("Exiting lib API: modify_as_number()")
+
+ return True
+
+
+def verify_as_numbers(tgen, topo, input_dict):
+ """
+ This API is to verify AS numbers for given DUT by running
+ "show ip bgp neighbor json" command. Local AS and Remote AS
+ will ve verified with input_dict data and command output.
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type` : ip type, ipv4/ipv6
+ * `input_dict`: defines - for which router, AS numbers needs to be verified
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "local_as": 131079
+ }
+ }
+ }
+ result = verify_as_numbers(tgen, topo, addr_type, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: verify_as_numbers()")
+ for router in input_dict.keys():
+ if router not in tgen.routers():
+ continue
+
+ rnode = tgen.routers()[router]
+
+ logger.info("Verifying AS numbers for dut %s:", router)
+
+ show_ip_bgp_neighbor_json = rnode.vtysh_cmd(
+ "show ip bgp neighbor json", isjson=True)
+ local_as = input_dict[router]["bgp"]["local_as"]
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+
+ for addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"][
+ "neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
+ remote_as = input_dict[bgp_neighbor]["bgp"]["local_as"]
+ for dest_link, peer_dict in peer_data["dest_link"].iteritems():
+ neighbor_ip = None
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type]. \
+ split("/")[0]
+ neigh_data = show_ip_bgp_neighbor_json[neighbor_ip]
+ # Verify Local AS for router
+ if neigh_data["localAs"] != local_as:
+ errormsg = "Failed: Verify local_as for dut {}," \
+ " found: {} but expected: {}".format(
+ router, neigh_data["localAs"],
+ local_as)
+ return errormsg
+ else:
+ logger.info("Verified local_as for dut %s, found"
+ " expected: %s", router, local_as)
+
+ # Verify Remote AS for neighbor
+ if neigh_data["remoteAs"] != remote_as:
+ errormsg = "Failed: Verify remote_as for dut " \
+ "{}'s neighbor {}, found: {} but " \
+ "expected: {}".format(
+ router, bgp_neighbor,
+ neigh_data["remoteAs"], remote_as)
+ return errormsg
+ else:
+ logger.info("Verified remote_as for dut %s's "
+ "neighbor %s, found expected: %s",
+ router, bgp_neighbor, remote_as)
+
+ logger.info("Exiting lib API: verify_AS_numbers()")
+ return True
+
+
+def clear_bgp_and_verify(tgen, topo, router):
+ """
+ This API is to clear bgp neighborship and verify bgp neighborship
+ is coming up(BGP is converged) usinf "show bgp summary json" command
+ and also verifying for all bgp neighbors uptime before and after
+ clear bgp sessions is different as the uptime must be changed once
+ bgp sessions are cleared using "clear ip bgp */clear bgp ipv6 *" cmd.
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `router`: device under test
+
+ Usage
+ -----
+ result = clear_bgp_and_verify(tgen, topo, addr_type, dut)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: clear_bgp_and_verify()")
+
+ if router not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[router]
+
+ peer_uptime_before_clear_bgp = {}
+ # Verifying BGP convergence before bgp clear command
+ for retry in range(1, 11):
+ sleeptime = 2 * retry
+ if sleeptime <= BGP_CONVERGENCE_TIMEOUT:
+ # Waiting for BGP to converge
+ logger.info("Waiting for %s sec for BGP to converge on router"
+ " %s...", sleeptime, router)
+ sleep(sleeptime)
+ else:
+ errormsg = "TIMEOUT!! BGP is not converged in {} seconds for" \
+ " router {}".format(BGP_CONVERGENCE_TIMEOUT, router)
+ return errormsg
+
+ show_bgp_json = rnode.vtysh_cmd("show bgp summary json",
+ isjson=True)
+ logger.info(show_bgp_json)
+ # Verifying output dictionary show_bgp_json is empty or not
+ if not bool(show_bgp_json):
+ errormsg = "BGP is not running"
+ return errormsg
+
+ # To find neighbor ip type
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+ total_peer = 0
+ for addr_type in bgp_addr_type.keys():
+
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor in bgp_neighbors:
+ total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
+
+ no_of_peer = 0
+ for addr_type in bgp_addr_type:
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
+ for dest_link, peer_dict in peer_data["dest_link"].iteritems():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"][
+ "peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+
+ # Peer up time dictionary
+ peer_uptime_before_clear_bgp[bgp_neighbor] = \
+ ipv4_data[neighbor_ip]["peerUptime"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"][
+ "peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+
+ # Peer up time dictionary
+ peer_uptime_before_clear_bgp[bgp_neighbor] = \
+ ipv6_data[neighbor_ip]["peerUptime"]
+
+ if nh_state == "Established":
+ no_of_peer += 1
+
+ if no_of_peer == total_peer:
+ logger.info("BGP is Converged for router %s before bgp"
+ " clear", router)
+ break
+ else:
+ logger.warning("BGP is not yet Converged for router %s "
+ "before bgp clear", router)
+
+ # Clearing BGP
+ logger.info("Clearing BGP neighborship for router %s..", router)
+ for addr_type in bgp_addr_type.keys():
+ if addr_type == "ipv4":
+ rnode.vtysh_cmd("clear ip bgp *")
+ elif addr_type == "ipv6":
+ rnode.vtysh_cmd("clear bgp ipv6 *")
+
+ peer_uptime_after_clear_bgp = {}
+ # Verifying BGP convergence after bgp clear command
+ for retry in range(1, 11):
+ sleeptime = 2 * retry
+ if sleeptime <= BGP_CONVERGENCE_TIMEOUT:
+ # Waiting for BGP to converge
+ logger.info("Waiting for %s sec for BGP to converge on router"
+ " %s...", sleeptime, router)
+ sleep(sleeptime)
+ else:
+ errormsg = "TIMEOUT!! BGP is not converged in {} seconds for" \
+ " router {}".format(BGP_CONVERGENCE_TIMEOUT, router)
+ return errormsg
+
+ show_bgp_json = rnode.vtysh_cmd("show bgp summary json",
+ isjson=True)
+ # Verifying output dictionary show_bgp_json is empty or not
+ if not bool(show_bgp_json):
+ errormsg = "BGP is not running"
+ return errormsg
+
+ # To find neighbor ip type
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+ total_peer = 0
+ for addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor in bgp_neighbors:
+ total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
+
+ no_of_peer = 0
+ for addr_type in bgp_addr_type:
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
+ for dest_link, peer_dict in peer_data["dest_link"].iteritems():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].\
+ split("/")[0]
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"][
+ "peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ peer_uptime_after_clear_bgp[bgp_neighbor] = \
+ ipv4_data[neighbor_ip]["peerUptime"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"][
+ "peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+ # Peer up time dictionary
+ peer_uptime_after_clear_bgp[bgp_neighbor] = \
+ ipv6_data[neighbor_ip]["peerUptime"]
+
+ if nh_state == "Established":
+ no_of_peer += 1
+
+ if no_of_peer == total_peer:
+ logger.info("BGP is Converged for router %s after bgp clear",
+ router)
+ break
+ else:
+ logger.warning("BGP is not yet Converged for router %s after"
+ " bgp clear", router)
+
+ # Compariung peerUptime dictionaries
+ if peer_uptime_before_clear_bgp != peer_uptime_after_clear_bgp:
+ logger.info("BGP neighborship is reset after clear BGP on router %s",
+ router)
+ else:
+ errormsg = "BGP neighborship is not reset after clear bgp on router" \
+ " {}".format(router)
+ return errormsg
+
+ logger.info("Exiting lib API: clear_bgp_and_verify()")
+ return True
+
+
+def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
+ """
+ To verify BGP timer config, execute "show ip bgp neighbor json" command
+ and verify bgp timers with input_dict data.
+ To veirfy bgp timers functonality, shutting down peer interface
+ and verify BGP neighborship status.
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type`: ip type, ipv4/ipv6
+ * `input_dict`: defines for which router, bgp timers needs to be verified
+
+ Usage:
+ # To verify BGP timers for neighbor r2 of router r1
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "bgp_neighbors":{
+ "r2":{
+ "keepalivetimer": 5,
+ "holddowntimer": 15,
+ }}}}}
+ result = verify_bgp_timers_and_functionality(tgen, topo, "ipv4",
+ input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: verify_bgp_timers_and_functionality()")
+ sleep(5)
+ router_list = tgen.routers()
+ for router in input_dict.keys():
+ if router not in router_list:
+ continue
+
+ rnode = router_list[router]
+
+ logger.info("Verifying bgp timers functionality, DUT is %s:",
+ router)
+
+ show_ip_bgp_neighbor_json = \
+ rnode.vtysh_cmd("show ip bgp neighbor json", isjson=True)
+
+ bgp_addr_type = input_dict[router]["bgp"]["address_family"]
+
+ for addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"][
+ "neighbor"]
+ for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
+ for dest_link, peer_dict in peer_data["dest_link"].iteritems():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ keepalivetimer = peer_dict["keepalivetimer"]
+ holddowntimer = peer_dict["holddowntimer"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type]. \
+ split("/")[0]
+ neighbor_intf = data[dest_link]["interface"]
+
+ # Verify HoldDownTimer for neighbor
+ bgpHoldTimeMsecs = show_ip_bgp_neighbor_json[
+ neighbor_ip]["bgpTimerHoldTimeMsecs"]
+ if bgpHoldTimeMsecs != holddowntimer * 1000:
+ errormsg = "Verifying holddowntimer for bgp " \
+ "neighbor {} under dut {}, found: {} " \
+ "but expected: {}".format(
+ neighbor_ip, router,
+ bgpHoldTimeMsecs,
+ holddowntimer * 1000)
+ return errormsg
+
+ # Verify KeepAliveTimer for neighbor
+ bgpKeepAliveTimeMsecs = show_ip_bgp_neighbor_json[
+ neighbor_ip]["bgpTimerKeepAliveIntervalMsecs"]
+ if bgpKeepAliveTimeMsecs != keepalivetimer * 1000:
+ errormsg = "Verifying keepalivetimer for bgp " \
+ "neighbor {} under dut {}, found: {} " \
+ "but expected: {}".format(
+ neighbor_ip, router,
+ bgpKeepAliveTimeMsecs,
+ keepalivetimer * 1000)
+ return errormsg
+
+ ####################
+ # Shutting down peer interface after keepalive time and
+ # after some time bringing up peer interface.
+ # verifying BGP neighborship in (hold down-keep alive)
+ # time, it should not go down
+ ####################
+
+ # Wait till keep alive time
+ logger.info("=" * 20)
+ logger.info("Scenario 1:")
+ logger.info("Shutdown and bring up peer interface: %s "
+ "in keep alive time : %s sec and verify "
+ " BGP neighborship is intact in %s sec ",
+ neighbor_intf, keepalivetimer,
+ (holddowntimer - keepalivetimer))
+ logger.info("=" * 20)
+ logger.info("Waiting for %s sec..", keepalivetimer)
+ sleep(keepalivetimer)
+
+ # Shutting down peer ineterface
+ logger.info("Shutting down interface %s on router %s",
+ neighbor_intf, bgp_neighbor)
+ topotest.interface_set_status(
+ router_list[bgp_neighbor], neighbor_intf,
+ ifaceaction=False)
+
+ # Bringing up peer interface
+ sleep(5)
+ logger.info("Bringing up interface %s on router %s..",
+ neighbor_intf, bgp_neighbor)
+ topotest.interface_set_status(
+ router_list[bgp_neighbor], neighbor_intf,
+ ifaceaction=True)
+
+ # Verifying BGP neighborship is intact in
+ # (holddown - keepalive) time
+ for timer in range(keepalivetimer, holddowntimer,
+ int(holddowntimer / 3)):
+ logger.info("Waiting for %s sec..", keepalivetimer)
+ sleep(keepalivetimer)
+ sleep(2)
+ show_bgp_json = \
+ rnode.vtysh_cmd("show bgp summary json",
+ isjson=True)
+
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"]["peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"]["peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+
+ if timer == \
+ (holddowntimer - keepalivetimer):
+ if nh_state != "Established":
+ errormsg = "BGP neighborship has not gone " \
+ "down in {} sec for neighbor {}\n" \
+ "show_bgp_json: \n {} ".format(
+ timer, bgp_neighbor,
+ show_bgp_json)
+ return errormsg
+ else:
+ logger.info("BGP neighborship is intact in %s"
+ " sec for neighbor %s \n "
+ "show_bgp_json : \n %s",
+ timer, bgp_neighbor,
+ show_bgp_json)
+
+ ####################
+ # Shutting down peer interface and verifying that BGP
+ # neighborship is going down in holddown time
+ ####################
+ logger.info("=" * 20)
+ logger.info("Scenario 2:")
+ logger.info("Shutdown peer interface: %s and verify BGP"
+ " neighborship has gone down in hold down "
+ "time %s sec", neighbor_intf, holddowntimer)
+ logger.info("=" * 20)
+
+ logger.info("Shutting down interface %s on router %s..",
+ neighbor_intf, bgp_neighbor)
+ topotest.interface_set_status(router_list[bgp_neighbor],
+ neighbor_intf,
+ ifaceaction=False)
+
+ # Verifying BGP neighborship is going down in holddown time
+ for timer in range(keepalivetimer,
+ (holddowntimer + keepalivetimer),
+ int(holddowntimer / 3)):
+ logger.info("Waiting for %s sec..", keepalivetimer)
+ sleep(keepalivetimer)
+ sleep(2)
+ show_bgp_json = \
+ rnode.vtysh_cmd("show bgp summary json",
+ isjson=True)
+
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"]["peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"]["peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+
+ if timer == holddowntimer:
+ if nh_state == "Established":
+ errormsg = "BGP neighborship has not gone " \
+ "down in {} sec for neighbor {}\n" \
+ "show_bgp_json: \n {} ".format(
+ timer, bgp_neighbor,
+ show_bgp_json)
+ return errormsg
+ else:
+ logger.info("BGP neighborship has gone down in"
+ " %s sec for neighbor %s \n"
+ "show_bgp_json : \n %s",
+ timer, bgp_neighbor,
+ show_bgp_json)
+
+ logger.info("Exiting lib API: verify_bgp_timers_and_functionality()")
+ return True
+
+
+def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict,
+ attribute):
+ """
+ API is to verify best path according to BGP attributes for given routes.
+ "show bgp ipv4/6 json" command will be run and verify best path according
+ to shortest as-path, highest local-preference and med, lowest weight and
+ route origin IGP>EGP>INCOMPLETE.
+
+ Parameters
+ ----------
+ * `tgen` : topogen object
+ * `addr_type` : ip type, ipv4/ipv6
+ * `tgen` : topogen object
+ * `attribute` : calculate best path using this attribute
+ * `input_dict`: defines different routes to calculate for which route
+ best path is selected
+
+ Usage
+ -----
+ # To verify best path for routes 200.50.2.0/32 and 200.60.2.0/32 from
+ router r7 to router r1(DUT) as per shortest as-path attribute
+ input_dict = {
+ "r7": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ attribute = "localpref"
+ result = verify_best_path_as_per_bgp_attribute(tgen, "ipv4", dut, \
+ input_dict, attribute)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: verify_best_path_as_per_bgp_attribute()")
+ if router not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[router]
+
+ # TODO get addr_type from address
+ # Verifying show bgp json
+ command = "show bgp {} json".format(addr_type)
+
+ sleep(2)
+ logger.info("Verifying router %s RIB for best path:", router)
+ sh_ip_bgp_json = rnode.vtysh_cmd(command, isjson=True)
+
+ for route_val in input_dict.values():
+ net_data = route_val["bgp"]["address_family"]["ipv4"]["unicast"]
+ networks = net_data["advertise_networks"]
+ for network in networks:
+ route = network["network"]
+
+ route_attributes = sh_ip_bgp_json["routes"][route]
+ _next_hop = None
+ compare = None
+ attribute_dict = {}
+ for route_attribute in route_attributes:
+ next_hops = route_attribute["nexthops"]
+ for next_hop in next_hops:
+ next_hop_ip = next_hop["ip"]
+ attribute_dict[next_hop_ip] = route_attribute[attribute]
+
+ # AS_PATH attribute
+ if attribute == "aspath":
+ # Find next_hop for the route have minimum as_path
+ _next_hop = min(attribute_dict, key=lambda x: len(set(
+ attribute_dict[x])))
+ compare = "SHORTEST"
+
+ # LOCAL_PREF attribute
+ elif attribute == "localpref":
+ # Find next_hop for the route have highest local preference
+ _next_hop = max(attribute_dict, key=(lambda k:
+ attribute_dict[k]))
+ compare = "HIGHEST"
+
+ # WEIGHT attribute
+ elif attribute == "weight":
+ # Find next_hop for the route have highest weight
+ _next_hop = max(attribute_dict, key=(lambda k:
+ attribute_dict[k]))
+ compare = "HIGHEST"
+
+ # ORIGIN attribute
+ elif attribute == "origin":
+ # Find next_hop for the route have IGP as origin, -
+ # - rule is IGP>EGP>INCOMPLETE
+ _next_hop = [key for (key, value) in
+ attribute_dict.iteritems()
+ if value == "IGP"][0]
+ compare = ""
+
+ # MED attribute
+ elif attribute == "med":
+ # Find next_hop for the route have LOWEST MED
+ _next_hop = min(attribute_dict, key=(lambda k:
+ attribute_dict[k]))
+ compare = "LOWEST"
+
+ # Show ip route
+ if addr_type == "ipv4":
+ command = "show ip route json"
+ else:
+ command = "show ipv6 route json"
+
+ rib_routes_json = rnode.vtysh_cmd(command, isjson=True)
+
+ # Verifying output dictionary rib_routes_json is not empty
+ if not bool(rib_routes_json):
+ errormsg = "No route found in RIB of router {}..". \
+ format(router)
+ return errormsg
+
+ st_found = False
+ nh_found = False
+ # Find best is installed in RIB
+ if route in rib_routes_json:
+ st_found = True
+ # Verify next_hop in rib_routes_json
+ if rib_routes_json[route][0]["nexthops"][0]["ip"] == \
+ _next_hop:
+ nh_found = True
+ else:
+ errormsg = "Incorrect Nexthop for BGP route {} in " \
+ "RIB of router {}, Expected: {}, Found:" \
+ " {}\n".format(route, router,
+ rib_routes_json[route][0][
+ "nexthops"][0]["ip"],
+ _next_hop)
+ return errormsg
+
+ if st_found and nh_found:
+ logger.info(
+ "Best path for prefix: %s with next_hop: %s is "
+ "installed according to %s %s: (%s) in RIB of "
+ "router %s", route, _next_hop, compare,
+ attribute, attribute_dict[_next_hop], router)
+
+ logger.debug("Exiting lib API: verify_best_path_as_per_bgp_attribute()")
+ return True
+
+
+def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict,
+ attribute):
+ """
+ API is to verify best path according to admin distance for given
+ route. "show ip/ipv6 route json" command will be run and verify
+ best path accoring to shortest admin distanc.
+
+ Parameters
+ ----------
+ * `addr_type` : ip type, ipv4/ipv6
+ * `dut`: Device Under Test
+ * `tgen` : topogen object
+ * `attribute` : calculate best path using admin distance
+ * `input_dict`: defines different routes with different admin distance
+ to calculate for which route best path is selected
+ Usage
+ -----
+ # To verify best path for route 200.50.2.0/32 from router r2 to
+ router r1(DUT) as per shortest admin distance which is 60.
+ input_dict = {
+ "r2": {
+ "static_routes": [{"network": "200.50.2.0/32", \
+ "admin_distance": 80, "next_hop": "10.0.0.14"},
+ {"network": "200.50.2.0/32", \
+ "admin_distance": 60, "next_hop": "10.0.0.18"}]
+ }}
+ attribute = "localpref"
+ result = verify_best_path_as_per_admin_distance(tgen, "ipv4", dut, \
+ input_dict, attribute):
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: verify_best_path_as_per_admin_distance()")
+ router_list = tgen.routers()
+ if router not in router_list:
+ return False
+
+ rnode = tgen.routers()[router]
+
+ sleep(2)
+ logger.info("Verifying router %s RIB for best path:", router)
+
+ # Show ip route cmd
+ if addr_type == "ipv4":
+ command = "show ip route json"
+ else:
+ command = "show ipv6 route json"
+
+ for routes_from_router in input_dict.keys():
+ sh_ip_route_json = router_list[routes_from_router].vtysh_cmd(
+ command, isjson=True)
+ networks = input_dict[routes_from_router]["static_routes"]
+ for network in networks:
+ route = network["network"]
+
+ route_attributes = sh_ip_route_json[route]
+ _next_hop = None
+ compare = None
+ attribute_dict = {}
+ for route_attribute in route_attributes:
+ next_hops = route_attribute["nexthops"]
+ for next_hop in next_hops:
+ next_hop_ip = next_hop["ip"]
+ attribute_dict[next_hop_ip] = route_attribute["distance"]
+
+ # Find next_hop for the route have LOWEST Admin Distance
+ _next_hop = min(attribute_dict, key=(lambda k:
+ attribute_dict[k]))
+ compare = "LOWEST"
+
+ # Show ip route
+ rib_routes_json = rnode.vtysh_cmd(command, isjson=True)
+
+ # Verifying output dictionary rib_routes_json is not empty
+ if not bool(rib_routes_json):
+ errormsg = "No route found in RIB of router {}..".format(router)
+ return errormsg
+
+ st_found = False
+ nh_found = False
+ # Find best is installed in RIB
+ if route in rib_routes_json:
+ st_found = True
+ # Verify next_hop in rib_routes_json
+ if rib_routes_json[route][0]["nexthops"][0]["ip"] == \
+ _next_hop:
+ nh_found = True
+ else:
+ errormsg = ("Nexthop {} is Missing for BGP route {}"
+ " in RIB of router {}\n".format(_next_hop,
+ route, router))
+ return errormsg
+
+ if st_found and nh_found:
+ logger.info("Best path for prefix: %s is installed according"
+ " to %s %s: (%s) in RIB of router %s", route,
+ compare, attribute,
+ attribute_dict[_next_hop], router)
+
+ logger.info(
+ "Exiting lib API: verify_best_path_as_per_admin_distance()")
+ return True
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
new file mode 100644
index 0000000000..d2c1d82430
--- /dev/null
+++ b/tests/topotests/lib/common_config.py
@@ -0,0 +1,1391 @@
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+from collections import OrderedDict
+from datetime import datetime
+from time import sleep
+from subprocess import call
+from subprocess import STDOUT as SUB_STDOUT
+import StringIO
+import os
+import ConfigParser
+import traceback
+import socket
+import ipaddr
+
+from lib.topolog import logger, logger_config
+from lib.topogen import TopoRouter
+
+
+FRRCFG_FILE = "frr_json.conf"
+FRRCFG_BKUP_FILE = "frr_json_initial.conf"
+
+ERROR_LIST = ["Malformed", "Failure", "Unknown"]
+
+####
+CD = os.path.dirname(os.path.realpath(__file__))
+PYTESTINI_PATH = os.path.join(CD, "../pytest.ini")
+
+# Creating tmp dir with testsuite name to avoid conflict condition when
+# multiple testsuites run together. All temporary files would be created
+# in this dir and this dir would be removed once testsuite run is
+# completed
+LOGDIR = "/tmp/topotests/"
+TMPDIR = None
+
+# NOTE: to save execution logs to log file frrtest_log_dir must be configured
+# in `pytest.ini`.
+config = ConfigParser.ConfigParser()
+config.read(PYTESTINI_PATH)
+
+config_section = "topogen"
+
+if config.has_option("topogen", "verbosity"):
+ loglevel = config.get("topogen", "verbosity")
+ loglevel = loglevel.upper()
+else:
+ loglevel = "INFO"
+
+if config.has_option("topogen", "frrtest_log_dir"):
+ frrtest_log_dir = config.get("topogen", "frrtest_log_dir")
+ time_stamp = datetime.time(datetime.now())
+ logfile_name = "frr_test_bgp_"
+ frrtest_log_file = frrtest_log_dir + logfile_name + str(time_stamp)
+ print("frrtest_log_file..", frrtest_log_file)
+
+ logger = logger_config.get_logger(name="test_execution_logs",
+ log_level=loglevel,
+ target=frrtest_log_file)
+ print("Logs will be sent to logfile: {}".format(frrtest_log_file))
+
+if config.has_option("topogen", "show_router_config"):
+ show_router_config = config.get("topogen", "show_router_config")
+else:
+ show_router_config = False
+
+# env variable for setting what address type to test
+ADDRESS_TYPES = os.environ.get("ADDRESS_TYPES")
+
+
+# Saves sequence id numbers
+SEQ_ID = {
+ "prefix_lists": {},
+ "route_maps": {}
+}
+
+
+def get_seq_id(obj_type, router, obj_name):
+ """
+ Generates and saves sequence number in interval of 10
+
+ Parameters
+ ----------
+ * `obj_type`: prefix_lists or route_maps
+ * `router`: router name
+ *` obj_name`: name of the prefix-list or route-map
+
+ Returns
+ --------
+ Sequence number generated
+ """
+
+ router_data = SEQ_ID[obj_type].setdefault(router, {})
+ obj_data = router_data.setdefault(obj_name, {})
+ seq_id = obj_data.setdefault("seq_id", 0)
+
+ seq_id = int(seq_id) + 10
+ obj_data["seq_id"] = seq_id
+
+ return seq_id
+
+
+def set_seq_id(obj_type, router, id, obj_name):
+ """
+ Saves sequence number if not auto-generated and given by user
+
+ Parameters
+ ----------
+ * `obj_type`: prefix_lists or route_maps
+ * `router`: router name
+ *` obj_name`: name of the prefix-list or route-map
+ """
+ router_data = SEQ_ID[obj_type].setdefault(router, {})
+ obj_data = router_data.setdefault(obj_name, {})
+ seq_id = obj_data.setdefault("seq_id", 0)
+
+ seq_id = int(seq_id) + int(id)
+ obj_data["seq_id"] = seq_id
+
+
+class InvalidCLIError(Exception):
+ """Raise when the CLI command is wrong"""
+ pass
+
+
+def create_common_configuration(tgen, router, data, config_type=None,
+ build=False):
+ """
+ API to create object of class FRRConfig and also create frr_json.conf
+ file. It will create interface and common configurations and save it to
+ frr_json.conf and load to router
+
+ Parameters
+ ----------
+ * `tgen`: tgen onject
+ * `data`: Congiguration data saved in a list.
+ * `router` : router id to be configured.
+ * `config_type` : Syntactic information while writing configuration. Should
+ be one of the value as mentioned in the config_map below.
+ * `build` : Only for initial setup phase this is set as True
+
+ Returns
+ -------
+ True or False
+ """
+ TMPDIR = os.path.join(LOGDIR, tgen.modname)
+
+ fname = "{}/{}/{}".format(TMPDIR, router, FRRCFG_FILE)
+
+ config_map = OrderedDict({
+ "general_config": "! FRR General Config\n",
+ "interface_config": "! Interfaces Config\n",
+ "static_route": "! Static Route Config\n",
+ "prefix_list": "! Prefix List Config\n",
+ "route_maps": "! Route Maps Config\n",
+ "bgp": "! BGP Config\n"
+ })
+
+ if build:
+ mode = "a"
+ else:
+ mode = "w"
+
+ try:
+ frr_cfg_fd = open(fname, mode)
+ if config_type:
+ frr_cfg_fd.write(config_map[config_type])
+ for line in data:
+ frr_cfg_fd.write("{} \n".format(str(line)))
+
+ except IOError as err:
+ logger.error("Unable to open FRR Config File. error(%s): %s" %
+ (err.errno, err.strerror))
+ return False
+ finally:
+ frr_cfg_fd.close()
+
+ # If configuration applied from build, it will done at last
+ if not build:
+ load_config_to_router(tgen, router)
+
+ return True
+
+
+def reset_config_on_routers(tgen, routerName=None):
+ """
+ Resets configuration on routers to the snapshot created using input JSON
+ file. It replaces existing router configuration with FRRCFG_BKUP_FILE
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `routerName` : router config is to be reset
+ """
+
+ logger.debug("Entering API: reset_config_on_routers")
+
+ router_list = tgen.routers()
+ for rname, router in router_list.iteritems():
+ if routerName and routerName != rname:
+ continue
+
+ cfg = router.run("vtysh -c 'show running'")
+ fname = "{}/{}/frr.sav".format(TMPDIR, rname)
+ dname = "{}/{}/delta.conf".format(TMPDIR, rname)
+ f = open(fname, "w")
+ for line in cfg.split("\n"):
+ line = line.strip()
+
+ if (line == "Building configuration..." or
+ line == "Current configuration:" or
+ not line):
+ continue
+ f.write(line)
+ f.write("\n")
+
+ f.close()
+
+ command = "/usr/lib/frr/frr-reload.py --input {}/{}/frr.sav" \
+ " --test {}/{}/frr_json_initial.conf > {}". \
+ format(TMPDIR, rname, TMPDIR, rname, dname)
+ result = call(command, shell=True, stderr=SUB_STDOUT)
+
+ # Assert if command fail
+ if result > 0:
+ errormsg = ("Command:{} is failed due to non-zero exit"
+ " code".format(command))
+ return errormsg
+
+ f = open(dname, "r")
+ delta = StringIO.StringIO()
+ delta.write("configure terminal\n")
+ t_delta = f.read()
+ for line in t_delta.split("\n"):
+ line = line.strip()
+ if (line == "Lines To Delete" or
+ line == "===============" or
+ line == "Lines To Add" or
+ line == "============" or
+ not line):
+ continue
+ delta.write(line)
+ delta.write("\n")
+
+ delta.write("end\n")
+ output = router.vtysh_multicmd(delta.getvalue(),
+ pretty_output=False)
+ logger.info("New configuration for router {}:".format(rname))
+ delta.close()
+ delta = StringIO.StringIO()
+ cfg = router.run("vtysh -c 'show running'")
+ for line in cfg.split("\n"):
+ line = line.strip()
+ delta.write(line)
+ delta.write("\n")
+
+ # Router current configuration to log file or console if
+ # "show_router_config" is defined in "pytest.ini"
+ if show_router_config:
+ logger.info(delta.getvalue())
+ delta.close()
+
+ logger.debug("Exting API: reset_config_on_routers")
+ return True
+
+
+def load_config_to_router(tgen, routerName, save_bkup=False):
+ """
+ Loads configuration on router from the file FRRCFG_FILE.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `routerName` : router for which configuration to be loaded
+ * `save_bkup` : If True, Saves snapshot of FRRCFG_FILE to FRRCFG_BKUP_FILE
+ """
+
+ logger.debug("Entering API: load_config_to_router")
+
+ router_list = tgen.routers()
+ for rname, router in router_list.iteritems():
+ if rname == routerName:
+ try:
+ frr_cfg_file = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_FILE)
+ frr_cfg_bkup = "{}/{}/{}".format(TMPDIR, rname,
+ FRRCFG_BKUP_FILE)
+ with open(frr_cfg_file, "r") as cfg:
+ data = cfg.read()
+ if save_bkup:
+ with open(frr_cfg_bkup, "w") as bkup:
+ bkup.write(data)
+
+ output = router.vtysh_multicmd(data, pretty_output=False)
+ for out_err in ERROR_LIST:
+ if out_err.lower() in output.lower():
+ raise InvalidCLIError("%s" % output)
+ except IOError as err:
+ errormsg = ("Unable to open config File. error(%s):"
+ " %s", (err.errno, err.strerror))
+ return errormsg
+
+ logger.info("New configuration for router {}:".format(rname))
+ new_config = router.run("vtysh -c 'show running'")
+
+ # Router current configuration to log file or console if
+ # "show_router_config" is defined in "pytest.ini"
+ if show_router_config:
+ logger.info(new_config)
+
+ logger.debug("Exting API: load_config_to_router")
+ return True
+
+
+def start_topology(tgen):
+ """
+ Starting topology, create tmp files which are loaded to routers
+ to start deamons and then start routers
+ * `tgen` : topogen object
+ """
+
+ global TMPDIR
+ # Starting topology
+ tgen.start_topology()
+
+ # Starting deamons
+ router_list = tgen.routers()
+ TMPDIR = os.path.join(LOGDIR, tgen.modname)
+
+ for rname, router in router_list.iteritems():
+ try:
+ os.chdir(TMPDIR)
+
+ # Creating rouer named dir and empty zebra.conf bgpd.conf files
+ # inside the current directory
+
+ if os.path.isdir('{}'.format(rname)):
+ os.system("rm -rf {}".format(rname))
+ os.mkdir('{}'.format(rname))
+ os.system('chmod -R go+rw {}'.format(rname))
+ os.chdir('{}/{}'.format(TMPDIR, rname))
+ os.system('touch zebra.conf bgpd.conf')
+ else:
+ os.mkdir('{}'.format(rname))
+ os.system('chmod -R go+rw {}'.format(rname))
+ os.chdir('{}/{}'.format(TMPDIR, rname))
+ os.system('touch zebra.conf bgpd.conf')
+
+ except IOError as (errno, strerror):
+ logger.error("I/O error({0}): {1}".format(errno, strerror))
+
+ # Loading empty zebra.conf file to router, to start the zebra deamon
+ router.load_config(
+ TopoRouter.RD_ZEBRA,
+ '{}/{}/zebra.conf'.format(TMPDIR, rname)
+ # os.path.join(tmpdir, '{}/zebra.conf'.format(rname))
+ )
+ # Loading empty bgpd.conf file to router, to start the bgp deamon
+ router.load_config(
+ TopoRouter.RD_BGP,
+ '{}/{}/bgpd.conf'.format(TMPDIR, rname)
+ # os.path.join(tmpdir, '{}/bgpd.conf'.format(rname))
+ )
+
+ # Starting routers
+ logger.info("Starting all routers once topology is created")
+ tgen.start_router()
+
+
+def number_to_row(routerName):
+ """
+ Returns the number for the router.
+ Calculation based on name a0 = row 0, a1 = row 1, b2 = row 2, z23 = row 23
+ etc
+ """
+ return int(routerName[1:])
+
+
+def number_to_column(routerName):
+ """
+ Returns the number for the router.
+ Calculation based on name a0 = columnn 0, a1 = column 0, b2= column 1,
+ z23 = column 26 etc
+ """
+ return ord(routerName[0]) - 97
+
+
+#############################################
+# Common APIs, will be used by all protocols
+#############################################
+
+def validate_ip_address(ip_address):
+ """
+ Validates the type of ip address
+
+ Parameters
+ ----------
+ * `ip_address`: IPv4/IPv6 address
+
+ Returns
+ -------
+ Type of address as string
+ """
+
+ if "/" in ip_address:
+ ip_address = ip_address.split("/")[0]
+
+ v4 = True
+ v6 = True
+ try:
+ socket.inet_aton(ip_address)
+ except socket.error as error:
+ logger.debug("Not a valid IPv4 address")
+ v4 = False
+ else:
+ return "ipv4"
+
+ try:
+ socket.inet_pton(socket.AF_INET6, ip_address)
+ except socket.error as error:
+ logger.debug("Not a valid IPv6 address")
+ v6 = False
+ else:
+ return "ipv6"
+
+ if not v4 and not v6:
+ raise Exception("InvalidIpAddr", "%s is neither valid IPv4 or IPv6"
+ " address" % ip_address)
+
+
+def check_address_types(addr_type):
+ """
+ Checks environment variable set and compares with the current address type
+ """
+ global ADDRESS_TYPES
+ if ADDRESS_TYPES is None:
+ ADDRESS_TYPES = "dual"
+
+ if ADDRESS_TYPES == "dual":
+ ADDRESS_TYPES = ["ipv4", "ipv6"]
+ elif ADDRESS_TYPES == "ipv4":
+ ADDRESS_TYPES = ["ipv4"]
+ elif ADDRESS_TYPES == "ipv6":
+ ADDRESS_TYPES = ["ipv6"]
+
+ if addr_type not in ADDRESS_TYPES:
+ logger.error("{} not in supported/configured address types {}".
+ format(addr_type, ADDRESS_TYPES))
+ return False
+
+ return ADDRESS_TYPES
+
+
+def generate_ips(network, no_of_ips):
+ """
+ Returns list of IPs.
+ based on start_ip and no_of_ips
+
+ * `network` : from here the ip will start generating, start_ip will be
+ first ip
+ * `no_of_ips` : these many IPs will be generated
+
+ Limitation: It will generate IPs only for ip_mask 32
+
+ """
+ ipaddress_list = []
+ if type(network) is not list:
+ network = [network]
+
+ for start_ipaddr in network:
+ if "/" in start_ipaddr:
+ start_ip = start_ipaddr.split("/")[0]
+ mask = int(start_ipaddr.split("/")[1])
+
+ addr_type = validate_ip_address(start_ip)
+ if addr_type == "ipv4":
+ start_ip = ipaddr.IPv4Address(unicode(start_ip))
+ step = 2 ** (32 - mask)
+ if addr_type == "ipv6":
+ start_ip = ipaddr.IPv6Address(unicode(start_ip))
+ step = 2 ** (128 - mask)
+
+ next_ip = start_ip
+ count = 0
+ while count < no_of_ips:
+ ipaddress_list.append("{}/{}".format(next_ip, mask))
+ if addr_type == "ipv6":
+ next_ip = ipaddr.IPv6Address(int(next_ip) + step)
+ else:
+ next_ip += step
+ count += 1
+
+ return ipaddress_list
+
+
+def find_interface_with_greater_ip(topo, router, loopback=True,
+ interface=True):
+ """
+ Returns highest interface ip for ipv4/ipv6. If loopback is there then
+ it will return highest IP from loopback IPs otherwise from physical
+ interface IPs.
+
+ * `topo` : json file data
+ * `router` : router for which hightest interface should be calculated
+ """
+
+ link_data = topo["routers"][router]["links"]
+ lo_list = []
+ interfaces_list = []
+ lo_exists = False
+ for destRouterLink, data in sorted(link_data.iteritems()):
+ if loopback:
+ if "type" in data and data["type"] == "loopback":
+ lo_exists = True
+ ip_address = topo["routers"][router]["links"][
+ destRouterLink]["ipv4"].split("/")[0]
+ lo_list.append(ip_address)
+ if interface:
+ ip_address = topo["routers"][router]["links"][
+ destRouterLink]["ipv4"].split("/")[0]
+ interfaces_list.append(ip_address)
+
+ if lo_exists:
+ return sorted(lo_list)[-1]
+
+ return sorted(interfaces_list)[-1]
+
+
+def write_test_header(tc_name):
+ """ Display message at beginning of test case"""
+ count = 20
+ logger.info("*"*(len(tc_name)+count))
+ logger.info("START -> Testcase : %s", tc_name)
+ logger.info("*"*(len(tc_name)+count))
+
+
+def write_test_footer(tc_name):
+ """ Display message at end of test case"""
+ count = 21
+ logger.info("="*(len(tc_name)+count))
+ logger.info("PASSED -> Testcase : %s", tc_name)
+ logger.info("="*(len(tc_name)+count))
+
+
+#############################################
+# These APIs, will used by testcase
+#############################################
+def create_interfaces_cfg(tgen, topo, build=False):
+ """
+ Create interface configuration for created topology. Basic Interface
+ configuration is provided in input json file.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `build` : Only for initial setup phase this is set as True.
+
+ Returns
+ -------
+ True or False
+ """
+ result = False
+
+ try:
+ for c_router, c_data in topo.iteritems():
+ interface_data = []
+ for destRouterLink, data in sorted(c_data["links"].iteritems()):
+ # Loopback interfaces
+ if "type" in data and data["type"] == "loopback":
+ interface_name = destRouterLink
+ else:
+ interface_name = data["interface"]
+ interface_data.append("interface {}\n".format(
+ str(interface_name)
+ ))
+ if "ipv4" in data:
+ intf_addr = c_data["links"][destRouterLink]["ipv4"]
+ interface_data.append("ip address {}\n".format(
+ intf_addr
+ ))
+ if "ipv6" in data:
+ intf_addr = c_data["links"][destRouterLink]["ipv6"]
+ interface_data.append("ipv6 address {}\n".format(
+ intf_addr
+ ))
+ result = create_common_configuration(tgen, c_router,
+ interface_data,
+ "interface_config",
+ build=build)
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ return result
+
+
+def create_static_routes(tgen, input_dict, build=False):
+ """
+ Create static routes for given router as defined in input_dict
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+
+ Usage
+ -----
+ input_dict should be in the format below:
+ # static_routes: list of all routes
+ # network: network address
+ # no_of_ip: number of next-hop address that will be configured
+ # admin_distance: admin distance for route/routes.
+ # next_hop: starting next-hop address
+ # tag: tag id for static routes
+ # delete: True if config to be removed. Default False.
+
+ Example:
+ "routers": {
+ "r1": {
+ "static_routes": [
+ {
+ "network": "100.0.20.1/32",
+ "no_of_ip": 9,
+ "admin_distance": 100,
+ "next_hop": "10.0.0.1",
+ "tag": 4001
+ "delete": true
+ }
+ ]
+ }
+ }
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+ result = False
+ logger.debug("Entering lib API: create_static_routes()")
+ try:
+ for router in input_dict.keys():
+ if "static_routes" not in input_dict[router]:
+ errormsg = "static_routes not present in input_dict"
+ logger.info(errormsg)
+ continue
+
+ static_routes_list = []
+
+ static_routes = input_dict[router]["static_routes"]
+ for static_route in static_routes:
+ del_action = static_route.setdefault("delete", False)
+ # No of IPs
+ no_of_ip = static_route.setdefault("no_of_ip", 1)
+ admin_distance = static_route.setdefault("admin_distance",
+ None)
+ tag = static_route.setdefault("tag", None)
+ if "next_hop" not in static_route or \
+ "network" not in static_route:
+ errormsg = "'next_hop' or 'network' missing in" \
+ " input_dict"
+ return errormsg
+
+ next_hop = static_route["next_hop"]
+ network = static_route["network"]
+ ip_list = generate_ips([network], no_of_ip)
+ for ip in ip_list:
+ addr_type = validate_ip_address(ip)
+ if addr_type == "ipv4":
+ cmd = "ip route {} {}".format(ip, next_hop)
+ else:
+ cmd = "ipv6 route {} {}".format(ip, next_hop)
+
+ if tag:
+ cmd = "{} {}".format(cmd, str(tag))
+ if admin_distance:
+ cmd = "{} {}".format(cmd, admin_distance)
+
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ static_routes_list.append(cmd)
+
+ result = create_common_configuration(tgen, router,
+ static_routes_list,
+ "static_route",
+ build=build)
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: create_static_routes()")
+ return result
+
+
+def create_prefix_lists(tgen, input_dict, build=False):
+ """
+ Create ip prefix lists as per the config provided in input
+ JSON or input_dict
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+
+ Usage
+ -----
+ # pf_lists_1: name of prefix-list, user defined
+ # seqid: prefix-list seqid, auto-generated if not given by user
+ # network: criteria for applying prefix-list
+ # action: permit/deny
+ # le: less than or equal number of bits
+ # ge: greater than or equal number of bits
+
+ Example
+ -------
+ input_dict = {
+ "r1": {
+ "prefix_lists":{
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": 10,
+ "network": "any",
+ "action": "permit",
+ "le": "32",
+ "ge": "30",
+ "delete": True
+ }
+ ]
+ }
+ }
+ }
+ }
+
+ Returns
+ -------
+ errormsg or True
+ """
+
+ logger.debug("Entering lib API: create_prefix_lists()")
+ result = False
+ try:
+ for router in input_dict.keys():
+ if "prefix_lists" not in input_dict[router]:
+ errormsg = "prefix_lists not present in input_dict"
+ logger.info(errormsg)
+ continue
+
+ config_data = []
+ prefix_lists = input_dict[router]["prefix_lists"]
+ for addr_type, prefix_data in prefix_lists.iteritems():
+ if not check_address_types(addr_type):
+ continue
+
+ for prefix_name, prefix_list in prefix_data.iteritems():
+ for prefix_dict in prefix_list:
+ if "action" not in prefix_dict or \
+ "network" not in prefix_dict:
+ errormsg = "'action' or network' missing in" \
+ " input_dict"
+ return errormsg
+
+ network_addr = prefix_dict["network"]
+ action = prefix_dict["action"]
+ le = prefix_dict.setdefault("le", None)
+ ge = prefix_dict.setdefault("ge", None)
+ seqid = prefix_dict.setdefault("seqid", None)
+ del_action = prefix_dict.setdefault("delete", False)
+ if seqid is None:
+ seqid = get_seq_id("prefix_lists", router,
+ prefix_name)
+ else:
+ set_seq_id("prefix_lists", router, seqid,
+ prefix_name)
+
+ if addr_type == "ipv4":
+ protocol = "ip"
+ else:
+ protocol = "ipv6"
+
+ cmd = "{} prefix-list {} seq {} {} {}".format(
+ protocol, prefix_name, seqid, action, network_addr
+ )
+ if le:
+ cmd = "{} le {}".format(cmd, le)
+ if ge:
+ cmd = "{} ge {}".format(cmd, ge)
+
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+ result = create_common_configuration(tgen, router,
+ config_data,
+ "prefix_list",
+ build=build)
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: create_prefix_lists()")
+ return result
+
+
+def create_route_maps(tgen, input_dict, build=False):
+ """
+ Create route-map on the devices as per the arguments passed
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+
+ Usage
+ -----
+ # route_maps: key, value pair for route-map name and its attribute
+ # rmap_match_prefix_list_1: user given name for route-map
+ # action: PERMIT/DENY
+ # match: key,value pair for match criteria. prefix_list, community-list,
+ large-community-list or tag. Only one option at a time.
+ # prefix_list: name of prefix list
+ # large-community-list: name of large community list
+ # community-ist: name of community list
+ # tag: tag id for static routes
+ # set: key, value pair for modifying route attributes
+ # localpref: preference value for the network
+ # med: metric value advertised for AS
+ # aspath: set AS path value
+ # weight: weight for the route
+ # community: standard community value to be attached
+ # large_community: large community value to be attached
+ # community_additive: if set to "additive", adds community/large-community
+ value to the existing values of the network prefix
+
+ Example:
+ --------
+ input_dict = {
+ "r1": {
+ "route_maps": {
+ "rmap_match_prefix_list_1": [
+ {
+ "action": "PERMIT",
+ "match": {
+ "ipv4": {
+ "prefix_list": "pf_list_1"
+ }
+ "ipv6": {
+ "prefix_list": "pf_list_1"
+ }
+
+ "large-community-list": "{
+ "id": "community_1",
+ "exact_match": True
+ }
+ "community": {
+ "id": "community_2",
+ "exact_match": True
+ }
+ "tag": "tag_id"
+ },
+ "set": {
+ "localpref": 150,
+ "med": 30,
+ "aspath": {
+ "num": 20000,
+ "action": "prepend",
+ },
+ "weight": 500,
+ "community": {
+ "num": "1:2 2:3",
+ "action": additive
+ }
+ "large_community": {
+ "num": "1:2:3 4:5;6",
+ "action": additive
+ },
+ }
+ }
+ ]
+ }
+ }
+ }
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ result = False
+ logger.debug("Entering lib API: create_route_maps()")
+
+ try:
+ for router in input_dict.keys():
+ if "route_maps" not in input_dict[router]:
+ errormsg = "route_maps not present in input_dict"
+ logger.info(errormsg)
+ continue
+ rmap_data = []
+ for rmap_name, rmap_value in \
+ input_dict[router]["route_maps"].iteritems():
+
+ for rmap_dict in rmap_value:
+ del_action = rmap_dict.setdefault("delete", False)
+
+ if del_action:
+ rmap_data.append("no route-map {}".format(rmap_name))
+ continue
+
+ if "action" not in rmap_dict:
+ errormsg = "action not present in input_dict"
+ logger.error(errormsg)
+ return False
+
+ rmap_action = rmap_dict.setdefault("action", "deny")
+
+ seq_id = rmap_dict.setdefault("seq_id", None)
+ if seq_id is None:
+ seq_id = get_seq_id("route_maps", router, rmap_name)
+ else:
+ set_seq_id("route_maps", router, seq_id, rmap_name)
+
+ rmap_data.append("route-map {} {} {}".format(
+ rmap_name, rmap_action, seq_id
+ ))
+
+ # Verifying if SET criteria is defined
+ if "set" in rmap_dict:
+ set_data = rmap_dict["set"]
+
+ local_preference = set_data.setdefault("localpref",
+ None)
+ metric = set_data.setdefault("med", None)
+ as_path = set_data.setdefault("aspath", {})
+ weight = set_data.setdefault("weight", None)
+ community = set_data.setdefault("community", {})
+ large_community = set_data.setdefault(
+ "large_community", {})
+ set_action = set_data.setdefault("set_action", None)
+
+ # Local Preference
+ if local_preference:
+ rmap_data.append("set local-preference {}".
+ format(local_preference))
+
+ # Metric
+ if metric:
+ rmap_data.append("set metric {} \n".format(metric))
+
+ # AS Path Prepend
+ if as_path:
+ as_num = as_path.setdefault("as_num", None)
+ as_action = as_path.setdefault("as_action", None)
+ if as_action and as_num:
+ rmap_data.append("set as-path {} {}".
+ format(as_action, as_num))
+
+ # Community
+ if community:
+ num = community.setdefault("num", None)
+ comm_action = community.setdefault("action", None)
+ if num:
+ cmd = "set community {}".format(num)
+ if comm_action:
+ cmd = "{} {}".format(cmd, comm_action)
+ rmap_data.append(cmd)
+ else:
+ logger.error("In community, AS Num not"
+ " provided")
+ return False
+
+ if large_community:
+ num = large_community.setdefault("num", None)
+ comm_action = large_community.setdefault("action",
+ None)
+ if num:
+ cmd = "set large-community {}".format(num)
+ if comm_action:
+ cmd = "{} {}".format(cmd, comm_action)
+
+ rmap_data.append(cmd)
+ else:
+ logger.errror("In large_community, AS Num not"
+ " provided")
+ return False
+
+ # Weight
+ if weight:
+ rmap_data.append("set weight {} \n".format(
+ weight))
+
+ # Adding MATCH and SET sequence to RMAP if defined
+ if "match" in rmap_dict:
+ match_data = rmap_dict["match"]
+ ipv4_data = match_data.setdefault("ipv4", {})
+ ipv6_data = match_data.setdefault("ipv6", {})
+ community = match_data.setdefault("community-list",
+ {})
+ large_community = match_data.setdefault(
+ "large-community-list", {}
+ )
+ tag = match_data.setdefault("tag", None)
+
+ if ipv4_data:
+ prefix_name = ipv4_data.setdefault("prefix_lists",
+ None)
+ if prefix_name:
+ rmap_data.append("match ip address prefix-list"
+ " {}".format(prefix_name))
+ if ipv6_data:
+ prefix_name = ipv6_data.setdefault("prefix_lists",
+ None)
+ if prefix_name:
+ rmap_data.append("match ipv6 address "
+ "prefix-list {}".
+ format(prefix_name))
+ if tag:
+ rmap_data.append("match tag {}".format(tag))
+
+ if community:
+ if "id" not in community:
+ logger.error("'id' is mandatory for "
+ "community-list in match"
+ " criteria")
+ return False
+ cmd = "match community {}".format(community["id"])
+ exact_match = community.setdefault("exact_match",
+ False)
+ if exact_match:
+ cmd = "{} exact-match".format(cmd)
+
+ rmap_data.append(cmd)
+
+ if large_community:
+ if "id" not in large_community:
+ logger.error("'num' is mandatory for "
+ "large-community-list in match "
+ "criteria")
+ return False
+ cmd = "match large-community {}".format(
+ large_community["id"])
+ exact_match = large_community.setdefault(
+ "exact_match", False)
+ if exact_match:
+ cmd = "{} exact-match".format(cmd)
+
+ rmap_data.append(cmd)
+
+ result = create_common_configuration(tgen, router,
+ rmap_data,
+ "route_maps",
+ build=build)
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: create_prefix_lists()")
+ return result
+
+
+#############################################
+# Verification APIs
+#############################################
+def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
+ """
+ Data will be read from input_dict or input JSON file, API will generate
+ same prefixes, which were redistributed by either create_static_routes() or
+ advertise_networks_using_network_command() and do will verify next_hop and
+ each prefix/routes is present in "show ip/ipv6 route {bgp/stataic} json"
+ command o/p.
+
+ Parameters
+ ----------
+ * `tgen` : topogen object
+ * `addr_type` : ip type, ipv4/ipv6
+ * `dut`: Device Under Test, for which user wants to test the data
+ * `input_dict` : input dict, has details of static routes
+ * `next_hop`[optional]: next_hop which needs to be verified,
+ default: static
+ * `protocol`[optional]: protocol, default = None
+
+ Usage
+ -----
+ # RIB can be verified for static routes OR network advertised using
+ network command. Following are input_dicts to create static routes
+ and advertise networks using network command. Any one of the input_dict
+ can be passed to verify_rib() to verify routes in DUT"s RIB.
+
+ # Creating static routes for r1
+ input_dict = {
+ "r1": {
+ "static_routes": [{"network": "10.0.20.1/32", "no_of_ip": 9, \
+ "admin_distance": 100, "next_hop": "10.0.0.2", "tag": 4001}]
+ }}
+ # Advertising networks using network command in router r1
+ input_dict = {
+ "r1": {
+ "advertise_networks": [{"start_ip": "20.0.0.0/32",
+ "no_of_network": 10},
+ {"start_ip": "30.0.0.0/32"}]
+ }}
+ # Verifying ipv4 routes in router r1 learned via BGP
+ dut = "r2"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol = protocol)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: verify_rib()")
+
+ router_list = tgen.routers()
+ for routerInput in input_dict.keys():
+ for router, rnode in router_list.iteritems():
+ if router != dut:
+ continue
+
+ # Verifying RIB routes
+ if addr_type == "ipv4":
+ if protocol:
+ command = "show ip route {} json".format(protocol)
+ else:
+ command = "show ip route json"
+ else:
+ if protocol:
+ command = "show ipv6 route {} json".format(protocol)
+ else:
+ command = "show ipv6 route json"
+
+ sleep(2)
+ logger.info("Checking router %s RIB:", router)
+ rib_routes_json = rnode.vtysh_cmd(command, isjson=True)
+
+ # Verifying output dictionary rib_routes_json is not empty
+ if bool(rib_routes_json) is False:
+ errormsg = "No {} route found in rib of router {}..". \
+ format(protocol, router)
+ return errormsg
+
+ if "static_routes" in input_dict[routerInput]:
+ static_routes = input_dict[routerInput]["static_routes"]
+ st_found = False
+ nh_found = False
+ found_routes = []
+ missing_routes = []
+ for static_route in static_routes:
+ network = static_route["network"]
+ if "no_of_ip" in static_route:
+ no_of_ip = static_route["no_of_ip"]
+ else:
+ no_of_ip = 0
+
+ # Generating IPs for verification
+ ip_list = generate_ips(network, no_of_ip)
+ for st_rt in ip_list:
+ st_rt = str(ipaddr.IPNetwork(unicode(st_rt)))
+
+ if st_rt in rib_routes_json:
+ st_found = True
+ found_routes.append(st_rt)
+
+ if next_hop:
+ if type(next_hop) is not list:
+ next_hop = [next_hop]
+
+ found_hops = [rib_r["ip"] for rib_r in
+ rib_routes_json[st_rt][0][
+ "nexthops"]]
+ for nh in next_hop:
+ nh_found = False
+ if nh and nh in found_hops:
+ nh_found = True
+ else:
+ errormsg = ("Nexthop {} is Missing for {}"
+ " route {} in RIB of router"
+ " {}\n".format(next_hop,
+ protocol,
+ st_rt, dut))
+
+ return errormsg
+ else:
+ missing_routes.append(st_rt)
+ if nh_found:
+ logger.info("Found next_hop %s for all routes in RIB of"
+ " router %s\n", next_hop, dut)
+
+ if not st_found and len(missing_routes) > 0:
+ errormsg = "Missing route in RIB of router {}, routes: " \
+ "{}\n".format(dut, missing_routes)
+ return errormsg
+
+ logger.info("Verified routes in router %s RIB, found routes"
+ " are: %s\n", dut, found_routes)
+
+ advertise_network = input_dict[routerInput].setdefault(
+ "advertise_networks", {})
+ if advertise_network:
+ found_routes = []
+ missing_routes = []
+ found = False
+ for advertise_network_dict in advertise_network:
+ start_ip = advertise_network_dict["network"]
+ if "no_of_network" in advertise_network_dict:
+ no_of_network = advertise_network_dict["no_of_network"]
+ else:
+ no_of_network = 0
+
+ # Generating IPs for verification
+ ip_list = generate_ips(start_ip, no_of_network)
+ for st_rt in ip_list:
+ st_rt = str(ipaddr.IPNetwork(unicode(st_rt)))
+
+ if st_rt in rib_routes_json:
+ found = True
+ found_routes.append(st_rt)
+ else:
+ missing_routes.append(st_rt)
+
+ if not found and len(missing_routes) > 0:
+ errormsg = "Missing route in RIB of router {}, are: {}" \
+ " \n".format(dut, missing_routes)
+ return errormsg
+
+ logger.info("Verified routes in router %s RIB, found routes"
+ " are: %s", dut, found_routes)
+
+ logger.info("Exiting lib API: verify_rib()")
+ return True
+
+
+def verify_admin_distance_for_static_routes(tgen, input_dict):
+ """
+ API to verify admin distance for static routes as defined in input_dict/
+ input JSON by running show ip/ipv6 route json command.
+
+ Parameter
+ ---------
+ * `tgen` : topogen object
+ * `input_dict`: having details like - for which router and static routes
+ admin dsitance needs to be verified
+ Usage
+ -----
+ # To verify admin distance is 10 for prefix 10.0.20.1/32 having next_hop
+ 10.0.0.2 in router r1
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "admin_distance": 10,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = verify_admin_distance_for_static_routes(tgen, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: verify_admin_distance_for_static_routes()")
+
+ for router in input_dict.keys():
+ if router not in tgen.routers():
+ continue
+
+ rnode = tgen.routers()[router]
+
+ for static_route in input_dict[router]["static_routes"]:
+ addr_type = validate_ip_address(static_route["network"])
+ # Command to execute
+ if addr_type == "ipv4":
+ command = "show ip route json"
+ else:
+ command = "show ipv6 route json"
+ show_ip_route_json = rnode.vtysh_cmd(command, isjson=True)
+
+ logger.info("Verifying admin distance for static route %s"
+ " under dut %s:", static_route, router)
+ network = static_route["network"]
+ next_hop = static_route["next_hop"]
+ admin_distance = static_route["admin_distance"]
+ route_data = show_ip_route_json[network][0]
+ if network in show_ip_route_json:
+ if route_data["nexthops"][0]["ip"] == next_hop:
+ if route_data["distance"] != admin_distance:
+ errormsg = ("Verification failed: admin distance"
+ " for static route {} under dut {},"
+ " found:{} but expected:{}".
+ format(static_route, router,
+ route_data["distance"],
+ admin_distance))
+ return errormsg
+ else:
+ logger.info("Verification successful: admin"
+ " distance for static route %s under"
+ " dut %s, found:%s", static_route,
+ router, route_data["distance"])
+
+ else:
+ errormsg = ("Static route {} not found in "
+ "show_ip_route_json for dut {}".
+ format(network, router))
+ return errormsg
+
+ logger.info("Exiting lib API: verify_admin_distance_for_static_routes()")
+ return True
+
+
+def verify_prefix_lists(tgen, input_dict):
+ """
+ Running "show ip prefix-list" command and verifying given prefix-list
+ is present in router.
+
+ Parameters
+ ----------
+ * `tgen` : topogen object
+ * `input_dict`: data to verify prefix lists
+
+ Usage
+ -----
+ # To verify pf_list_1 is present in router r1
+ input_dict = {
+ "r1": {
+ "prefix_lists": ["pf_list_1"]
+ }}
+ result = verify_prefix_lists("ipv4", input_dict, tgen)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: verify_prefix_lists()")
+
+ for router in input_dict.keys():
+ if router not in tgen.routers():
+ continue
+
+ rnode = tgen.routers()[router]
+
+ # Show ip prefix list
+ show_prefix_list = rnode.vtysh_cmd("show ip prefix-list")
+
+ # Verify Prefix list is deleted
+ prefix_lists_addr = input_dict[router]["prefix_lists"]
+ for addr_type in prefix_lists_addr:
+ if not check_address_types(addr_type):
+ continue
+
+ for prefix_list in prefix_lists_addr[addr_type].keys():
+ if prefix_list in show_prefix_list:
+ errormsg = ("Prefix list {} is not deleted from router"
+ " {}".format(prefix_list, router))
+ return errormsg
+
+ logger.info("Prefix list %s is/are deleted successfully"
+ " from router %s", prefix_list, router)
+
+ logger.info("Exiting lib API: verify_prefix_lissts()")
+ return True
diff --git a/tests/topotests/lib/topojson.py b/tests/topotests/lib/topojson.py
new file mode 100644
index 0000000000..4130451d2e
--- /dev/null
+++ b/tests/topotests/lib/topojson.py
@@ -0,0 +1,193 @@
+#
+# Modified work Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Original work Copyright (c) 2018 by Network Device Education
+# Foundation, Inc. ("NetDEF")
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+from collections import OrderedDict
+from json import dumps as json_dumps
+import ipaddr
+import pytest
+
+# Import topogen and topotest helpers
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+from lib.common_config import (
+ number_to_row, number_to_column,
+ load_config_to_router,
+ create_interfaces_cfg,
+ create_static_routes,
+ create_prefix_lists,
+ create_route_maps,
+)
+
+from lib.bgp import create_router_bgp
+
+def build_topo_from_json(tgen, topo):
+ """
+ Reads configuration from JSON file. Adds routers, creates interface
+ names dynamically and link routers as defined in JSON to create
+ topology. Assigns IPs dynamically to all interfaces of each router.
+
+ * `tgen`: Topogen object
+ * `topo`: json file data
+ """
+
+ listRouters = []
+ for routerN in sorted(topo['routers'].iteritems()):
+ logger.info('Topo: Add router {}'.format(routerN[0]))
+ tgen.add_router(routerN[0])
+ listRouters.append(routerN[0])
+
+ listRouters.sort()
+ if 'ipv4base' in topo:
+ ipv4Next = ipaddr.IPv4Address(topo['link_ip_start']['ipv4'])
+ ipv4Step = 2 ** (32 - topo['link_ip_start']['v4mask'])
+ if topo['link_ip_start']['v4mask'] < 32:
+ ipv4Next += 1
+ if 'ipv6base' in topo:
+ ipv6Next = ipaddr.IPv6Address(topo['link_ip_start']['ipv6'])
+ ipv6Step = 2 ** (128 - topo['link_ip_start']['v6mask'])
+ if topo['link_ip_start']['v6mask'] < 127:
+ ipv6Next += 1
+ for router in listRouters:
+ topo['routers'][router]['nextIfname'] = 0
+
+ while listRouters != []:
+ curRouter = listRouters.pop(0)
+ # Physical Interfaces
+ if 'links' in topo['routers'][curRouter]:
+ def link_sort(x):
+ if x == 'lo':
+ return 0
+ elif 'link' in x:
+ return int(x.split('-link')[1])
+ else:
+ return int(x.split('r')[1])
+ for destRouterLink, data in sorted(topo['routers'][curRouter]['links']. \
+ iteritems(),
+ key=lambda x: link_sort(x[0])):
+ currRouter_lo_json = \
+ topo['routers'][curRouter]['links'][destRouterLink]
+ # Loopback interfaces
+ if 'type' in data and data['type'] == 'loopback':
+ if 'ipv4' in currRouter_lo_json and \
+ currRouter_lo_json['ipv4'] == 'auto':
+ currRouter_lo_json['ipv4'] = '{}{}.{}/{}'. \
+ format(topo['lo_prefix']['ipv4'], number_to_row(curRouter), \
+ number_to_column(curRouter), topo['lo_prefix']['v4mask'])
+ if 'ipv6' in currRouter_lo_json and \
+ currRouter_lo_json['ipv6'] == 'auto':
+ currRouter_lo_json['ipv6'] = '{}{}:{}/{}'. \
+ format(topo['lo_prefix']['ipv6'], number_to_row(curRouter), \
+ number_to_column(curRouter), topo['lo_prefix']['v6mask'])
+
+ if "-" in destRouterLink:
+ # Spliting and storing destRouterLink data in tempList
+ tempList = destRouterLink.split("-")
+
+ # destRouter
+ destRouter = tempList.pop(0)
+
+ # Current Router Link
+ tempList.insert(0, curRouter)
+ curRouterLink = "-".join(tempList)
+ else:
+ destRouter = destRouterLink
+ curRouterLink = curRouter
+
+ if destRouter in listRouters:
+ currRouter_link_json = \
+ topo['routers'][curRouter]['links'][destRouterLink]
+ destRouter_link_json = \
+ topo['routers'][destRouter]['links'][curRouterLink]
+
+ # Assigning name to interfaces
+ currRouter_link_json['interface'] = \
+ '{}-{}-eth{}'.format(curRouter, destRouter, topo['routers'] \
+ [curRouter]['nextIfname'])
+ destRouter_link_json['interface'] = \
+ '{}-{}-eth{}'.format(destRouter, curRouter, topo['routers'] \
+ [destRouter]['nextIfname'])
+
+ topo['routers'][curRouter]['nextIfname'] += 1
+ topo['routers'][destRouter]['nextIfname'] += 1
+
+ # Linking routers to each other as defined in JSON file
+ tgen.gears[curRouter].add_link(tgen.gears[destRouter],
+ topo['routers'][curRouter]['links'][destRouterLink] \
+ ['interface'], topo['routers'][destRouter]['links'] \
+ [curRouterLink]['interface'])
+
+ # IPv4
+ if 'ipv4' in currRouter_link_json:
+ if currRouter_link_json['ipv4'] == 'auto':
+ currRouter_link_json['ipv4'] = \
+ '{}/{}'.format(ipv4Next, topo['link_ip_start'][ \
+ 'v4mask'])
+ destRouter_link_json['ipv4'] = \
+ '{}/{}'.format(ipv4Next + 1, topo['link_ip_start'][ \
+ 'v4mask'])
+ ipv4Next += ipv4Step
+ # IPv6
+ if 'ipv6' in currRouter_link_json:
+ if currRouter_link_json['ipv6'] == 'auto':
+ currRouter_link_json['ipv6'] = \
+ '{}/{}'.format(ipv6Next, topo['link_ip_start'][ \
+ 'v6mask'])
+ destRouter_link_json['ipv6'] = \
+ '{}/{}'.format(ipv6Next + 1, topo['link_ip_start'][ \
+ 'v6mask'])
+ ipv6Next = ipaddr.IPv6Address(int(ipv6Next) + ipv6Step)
+
+ logger.debug("Generated link data for router: %s\n%s", curRouter,
+ json_dumps(topo["routers"][curRouter]["links"],
+ indent=4, sort_keys=True))
+
+
+def build_config_from_json(tgen, topo, save_bkup=True):
+ """
+ Reads initial configuraiton from JSON for each router, builds
+ configuration and loads its to router.
+
+ * `tgen`: Topogen object
+ * `topo`: json file data
+ """
+
+ func_dict = OrderedDict([
+ ("links", create_interfaces_cfg),
+ ("static_routes", create_static_routes),
+ ("prefix_lists", create_prefix_lists),
+ ("route_maps", create_route_maps),
+ ("bgp", create_router_bgp)
+ ])
+
+ data = topo["routers"]
+ for func_type in func_dict.keys():
+ logger.info('Building configuration for {}'.format(func_type))
+
+ func_dict.get(func_type)(tgen, data, build=True)
+
+ for router in sorted(topo['routers'].keys()):
+ logger.info('Configuring router {}...'.format(router))
+
+ result = load_config_to_router(tgen, router, save_bkup)
+ if not result:
+ logger.info("Failed while configuring {}".format(router))
+ pytest.exit(1)
+
diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py
index 86993665ce..867f9f2f03 100644
--- a/tests/topotests/lib/topotest.py
+++ b/tests/topotests/lib/topotest.py
@@ -959,7 +959,7 @@ class Router(Node):
global fatal_error
- daemonsRunning = self.cmd('vtysh -c "show log" | grep "Logging configuration for"')
+ daemonsRunning = self.cmd('vtysh -c "show logging" | grep "Logging configuration for"')
# Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"):
return "%s: vtysh killed by AddressSanitizer" % (self.name)
diff --git a/tests/topotests/ospf6-topo1/README.md b/tests/topotests/ospf6-topo1/README.md
index 28f68e8fa5..526c019c6a 100644
--- a/tests/topotests/ospf6-topo1/README.md
+++ b/tests/topotests/ospf6-topo1/README.md
@@ -102,7 +102,7 @@ Simplified `R3` config
Test is executed by running
- vtysh -c "show log" | grep "Logging configuration for"
+ vtysh -c "show logging" | grep "Logging configuration for"
on each FRR router. This should return the logging information for all daemons registered
to Zebra and the list of running daemons is compared to the daemons started for this test (`zebra` and `ospf6d`)
diff --git a/tests/topotests/pytest.ini b/tests/topotests/pytest.ini
index 119ab93857..7ea38491d8 100644
--- a/tests/topotests/pytest.ini
+++ b/tests/topotests/pytest.ini
@@ -10,6 +10,13 @@ norecursedirs = .git example-test lib docker
# value is 'info', but can be changed to 'debug' to provide more details.
#verbosity = info
+# Save logs to log file, by default logs will be displayed to console
+#frrtest_log_dir = /tmp/topotests/
+
+# Display router current configuration during test execution,
+# by default configuration will not be shown
+show_router_config = True
+
# Default daemons binaries path.
#frrdir = /usr/lib/frr
#quaggadir = /usr/lib/quagga
diff --git a/vrrpd/vrrp_packet.c b/vrrpd/vrrp_packet.c
index c6b7ac1a7f..461310c1e5 100644
--- a/vrrpd/vrrp_packet.c
+++ b/vrrpd/vrrp_packet.c
@@ -86,14 +86,14 @@ static uint16_t vrrp_pkt_checksum(struct vrrp_pkt *pkt, size_t pktsize,
ph.src = src->ipaddr_v6;
inet_pton(AF_INET6, VRRP_MCASTV6_GROUP_STR, &ph.dst);
ph.ulpl = htons(pktsize);
- ph.next_hdr = 112;
+ ph.next_hdr = IPPROTO_VRRP;
chksum = in_cksum_with_ph6(&ph, pkt, pktsize);
} else if (!v6 && ((pkt->hdr.vertype >> 4) == 3)) {
struct ipv4_ph ph = {};
ph.src = src->ipaddr_v4;
inet_pton(AF_INET, VRRP_MCASTV4_GROUP_STR, &ph.dst);
- ph.proto = 112;
+ ph.proto = IPPROTO_VRRP;
ph.len = htons(pktsize);
chksum = in_cksum_with_ph4(&ph, pkt, pktsize);
} else if (!v6 && ((pkt->hdr.vertype >> 4) == 2)) {
diff --git a/vrrpd/vrrp_zebra.c b/vrrpd/vrrp_zebra.c
index 7503034de3..c15c250bdf 100644
--- a/vrrpd/vrrp_zebra.c
+++ b/vrrpd/vrrp_zebra.c
@@ -208,6 +208,8 @@ static int vrrp_zebra_if_address_del(int command, struct zclient *client,
vrrp_if_address_del(c->ifp);
+ if_set_index(c->ifp, IFINDEX_INTERNAL);
+
return 0;
}
diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c
index c3aeb27eb9..053848bfc3 100644
--- a/vtysh/vtysh.c
+++ b/vtysh/vtysh.c
@@ -140,6 +140,21 @@ struct vtysh_client vtysh_client[] = {
{.fd = -1, .name = "vrrpd", .flag = VTYSH_VRRPD, .next = NULL},
};
+/* Searches for client by name, returns index */
+static int vtysh_client_lookup(const char *name)
+{
+ int idx = -1;
+
+ for (unsigned int i = 0; i < array_size(vtysh_client); i++) {
+ if (strmatch(vtysh_client[i].name, name)) {
+ idx = i;
+ break;
+ }
+ }
+
+ return idx;
+}
+
enum vtysh_write_integrated vtysh_write_integrated =
WRITE_INTEGRATED_UNSPECIFIED;
@@ -394,6 +409,23 @@ static int vtysh_client_execute(struct vtysh_client *head_client,
return vtysh_client_run_all(head_client, line, 0, NULL, NULL);
}
+/* Execute by name */
+static int vtysh_client_execute_name(const char *name, const char *line)
+{
+ int ret = CMD_SUCCESS;
+ int idx_client = -1;
+
+ idx_client = vtysh_client_lookup(name);
+ if (idx_client != -1)
+ ret = vtysh_client_execute(&vtysh_client[idx_client], line);
+ else {
+ vty_out(vty, "Client not found\n");
+ ret = CMD_WARNING;
+ }
+
+ return ret;
+}
+
/*
* Retrieve all running config from daemons and parse it with the vtysh config
* parser. Returned output is not displayed to the user.
@@ -2269,33 +2301,15 @@ DEFUN (vtysh_show_work_queues,
DEFUN (vtysh_show_work_queues_daemon,
vtysh_show_work_queues_daemon_cmd,
- "show work-queues <zebra|ripd|ripngd|ospfd|ospf6d|bgpd|isisd|pbrd|fabricd|pimd|staticd>",
+ "show work-queues " DAEMONS_LIST,
SHOW_STR
"Work Queue information\n"
- "For the zebra daemon\n"
- "For the rip daemon\n"
- "For the ripng daemon\n"
- "For the ospf daemon\n"
- "For the ospfv6 daemon\n"
- "For the bgp daemon\n"
- "For the isis daemon\n"
- "For the pbr daemon\n"
- "For the fabricd daemon\n"
- "For the pim daemon\n"
- "For the static daemon\n")
+ DAEMONS_STR)
{
int idx_protocol = 2;
- unsigned int i;
- int ret = CMD_SUCCESS;
-
- for (i = 0; i < array_size(vtysh_client); i++) {
- if (strmatch(vtysh_client[i].name, argv[idx_protocol]->text))
- break;
- }
- ret = vtysh_client_execute(&vtysh_client[i], "show work-queues\n");
-
- return ret;
+ return vtysh_client_execute_name(argv[idx_protocol]->text,
+ "show work-queues\n");
}
DEFUNSH(VTYSH_ZEBRA, vtysh_link_params, vtysh_link_params_cmd, "link-params",
@@ -2627,22 +2641,109 @@ DEFUNSH(VTYSH_ALL, no_vtysh_config_enable_password,
return CMD_SUCCESS;
}
+/* Log filter */
+DEFUN (vtysh_log_filter,
+ vtysh_log_filter_cmd,
+ "[no] log-filter WORD ["DAEMONS_LIST"]",
+ NO_STR
+ FILTER_LOG_STR
+ "String to filter by\n"
+ DAEMONS_STR)
+{
+ char *filter = NULL;
+ char *daemon = NULL;
+ int found = 0;
+ int idx = 0;
+ int daemon_idx = 2;
+ int total_len = 0;
+ int len = 0;
+
+ char line[ZLOG_FILTER_LENGTH_MAX + 20];
+
+ found = argv_find(argv, argc, "no", &idx);
+ if (found == 1) {
+ len = snprintf(line, sizeof(line), "no log-filter");
+ daemon_idx += 1;
+ } else
+ len = snprintf(line, sizeof(line), "log-filter");
+
+ total_len += len;
+
+ idx = 1;
+ found = argv_find(argv, argc, "WORD", &idx);
+ if (found != 1) {
+ vty_out(vty, "%% No filter string given\n");
+ return CMD_WARNING;
+ }
+ filter = argv[idx]->arg;
+
+ if (strnlen(filter, ZLOG_FILTER_LENGTH_MAX + 1)
+ > ZLOG_FILTER_LENGTH_MAX) {
+ vty_out(vty, "%% Filter is too long\n");
+ return CMD_WARNING;
+ }
+
+ len = snprintf(line + total_len, sizeof(line) - total_len, " %s\n",
+ filter);
+
+ if ((len < 0) || (size_t)(total_len + len) > sizeof(line)) {
+ vty_out(vty, "%% Error buffering filter to daemons\n");
+ return CMD_ERR_INCOMPLETE;
+ }
+
+ if (argc >= (daemon_idx + 1))
+ daemon = argv[daemon_idx]->text;
+
+ if (daemon != NULL) {
+ vty_out(vty, "Applying log filter change to %s:\n", daemon);
+ return vtysh_client_execute_name(daemon, line);
+ } else
+ return show_per_daemon(line,
+ "Applying log filter change to %s:\n");
+}
+
+/* Clear log filters */
+DEFUN (vtysh_log_filter_clear,
+ vtysh_log_filter_clear_cmd,
+ "log-filter clear ["DAEMONS_LIST"]",
+ FILTER_LOG_STR
+ CLEAR_STR
+ DAEMONS_STR)
+{
+ char *daemon = NULL;
+ int daemon_idx = 2;
+
+ char line[] = "clear log-filter\n";
+
+ if (argc >= (daemon_idx + 1))
+ daemon = argv[daemon_idx]->text;
+
+ if (daemon != NULL) {
+ vty_out(vty, "Clearing all filters applied to %s:\n", daemon);
+ return vtysh_client_execute_name(daemon, line);
+ } else
+ return show_per_daemon(line,
+ "Clearing all filters applied to %s:\n");
+}
+
+/* Show log filter */
+DEFUN (vtysh_show_log_filter,
+ vtysh_show_log_filter_cmd,
+ "show log-filter",
+ SHOW_STR
+ FILTER_LOG_STR)
+{
+ char line[] = "do show log-filter\n";
+
+ return show_per_daemon(line, "Log filters applied to %s:\n");
+}
+
DEFUN (vtysh_write_terminal,
vtysh_write_terminal_cmd,
- "write terminal [<zebra|ripd|ripngd|ospfd|ospf6d|ldpd|bgpd|isisd|fabricd|pimd|staticd>]",
+ "write terminal ["DAEMONS_LIST"]",
"Write running configuration to memory, network, or terminal\n"
"Write to terminal\n"
- "For the zebra daemon\n"
- "For the rip daemon\n"
- "For the ripng daemon\n"
- "For the ospf daemon\n"
- "For the ospfv6 daemon\n"
- "For the ldpd daemon\n"
- "For the bgp daemon\n"
- "For the isis daemon\n"
- "For the fabricd daemon\n"
- "For the pim daemon\n"
- "For the static daemon\n")
+ DAEMONS_STR)
{
unsigned int i;
char line[] = "do write terminal\n";
@@ -2668,20 +2769,10 @@ DEFUN (vtysh_write_terminal,
DEFUN (vtysh_show_running_config,
vtysh_show_running_config_cmd,
- "show running-config [<zebra|ripd|ripngd|ospfd|ospf6d|ldpd|bgpd|isisd|fabricd|pimd|staticd>]",
+ "show running-config ["DAEMONS_LIST"]",
SHOW_STR
"Current operating configuration\n"
- "For the zebra daemon\n"
- "For the rip daemon\n"
- "For the ripng daemon\n"
- "For the ospf daemon\n"
- "For the ospfv6 daemon\n"
- "For the ldp daemon\n"
- "For the bgp daemon\n"
- "For the isis daemon\n"
- "For the fabricd daemon\n"
- "For the pim daemon\n"
- "For the static daemon\n")
+ DAEMONS_STR)
{
return vtysh_write_terminal(self, vty, argc, argv);
}
@@ -3871,6 +3962,9 @@ void vtysh_init_vty(void)
/* Logging */
install_element(VIEW_NODE, &vtysh_show_logging_cmd);
+ install_element(VIEW_NODE, &vtysh_show_log_filter_cmd);
+ install_element(CONFIG_NODE, &vtysh_log_filter_cmd);
+ install_element(CONFIG_NODE, &vtysh_log_filter_clear_cmd);
install_element(CONFIG_NODE, &vtysh_log_stdout_cmd);
install_element(CONFIG_NODE, &vtysh_log_stdout_level_cmd);
install_element(CONFIG_NODE, &no_vtysh_log_stdout_cmd);
diff --git a/vtysh/vtysh_config.c b/vtysh/vtysh_config.c
index b8957c2b00..4ae1e499ff 100644
--- a/vtysh/vtysh_config.c
+++ b/vtysh/vtysh_config.c
@@ -377,6 +377,9 @@ void vtysh_config_parse_line(void *arg, const char *line)
strlen("debug route-map"))
== 0)
config = config_get(RMAP_DEBUG_NODE, line);
+ else if (strncmp(line, "debug resolver",
+ strlen("debug resolver")) == 0)
+ config = config_get(RESOLVER_DEBUG_NODE, line);
else if (strncmp(line, "debug", strlen("debug")) == 0)
config = config_get(DEBUG_NODE, line);
else if (strncmp(line, "password", strlen("password")) == 0
@@ -423,7 +426,7 @@ void vtysh_config_parse_line(void *arg, const char *line)
|| (I) == PREFIX_IPV6_NODE || (I) == FORWARDING_NODE \
|| (I) == DEBUG_NODE || (I) == AAA_NODE || (I) == VRF_DEBUG_NODE \
|| (I) == NORTHBOUND_DEBUG_NODE || (I) == RMAP_DEBUG_NODE \
- || (I) == MPLS_NODE)
+ || (I) == RESOLVER_DEBUG_NODE || (I) == MPLS_NODE)
/* Display configuration to file pointer. */
void vtysh_config_dump(void)
diff --git a/zebra/interface.h b/zebra/interface.h
index 6a3914451a..e134b9b423 100644
--- a/zebra/interface.h
+++ b/zebra/interface.h
@@ -46,7 +46,7 @@ extern "C" {
struct rtadvconf {
/* A flag indicating whether or not the router sends periodic Router
Advertisements and responds to Router Solicitations.
- Default: FALSE */
+ Default: false */
int AdvSendAdvertisements;
/* The maximum time allowed between sending unsolicited multicast
@@ -70,19 +70,19 @@ struct rtadvconf {
/* Unsolicited Router Advertisements' interval timer. */
int AdvIntervalTimer;
- /* The TRUE/FALSE value to be placed in the "Managed address
+ /* The true/false value to be placed in the "Managed address
configuration" flag field in the Router Advertisement. See
[ADDRCONF].
- Default: FALSE */
+ Default: false */
int AdvManagedFlag;
- /* The TRUE/FALSE value to be placed in the "Other stateful
+ /* The true/false value to be placed in the "Other stateful
configuration" flag field in the Router Advertisement. See
[ADDRCONF].
- Default: FALSE */
+ Default: false */
int AdvOtherConfigFlag;
/* The value to be placed in MTU options sent by the router. A
@@ -136,10 +136,10 @@ struct rtadvconf {
included in the list of advertised prefixes. */
struct list *AdvPrefixList;
- /* The TRUE/FALSE value to be placed in the "Home agent"
+ /* The true/false value to be placed in the "Home agent"
flag field in the Router Advertisement. See [RFC6275 7.1].
- Default: FALSE */
+ Default: false */
int AdvHomeAgentFlag;
#ifndef ND_RA_FLAG_HOME_AGENT
#define ND_RA_FLAG_HOME_AGENT 0x20
@@ -159,10 +159,10 @@ struct rtadvconf {
int HomeAgentLifetime;
#define RTADV_MAX_HALIFETIME 65520 /* 18.2 hours */
- /* The TRUE/FALSE value to insert or not an Advertisement Interval
+ /* The true/false value to insert or not an Advertisement Interval
option. See [RFC 6275 7.3]
- Default: FALSE */
+ Default: false */
int AdvIntervalOption;
/* The value to be placed in the Default Router Preference field of
diff --git a/zebra/irdp.h b/zebra/irdp.h
index 3f4fa93460..ff4ab8dfbd 100644
--- a/zebra/irdp.h
+++ b/zebra/irdp.h
@@ -32,9 +32,6 @@
extern "C" {
#endif
-#define TRUE 1
-#define FALSE 0
-
/* ICMP Messages */
#ifndef ICMP_ROUTERADVERT
#define ICMP_ROUTERADVERT 9
diff --git a/zebra/irdp_interface.c b/zebra/irdp_interface.c
index c0b772cd01..8e1ca122d3 100644
--- a/zebra/irdp_interface.c
+++ b/zebra/irdp_interface.c
@@ -352,7 +352,7 @@ static void irdp_if_no_shutdown(struct interface *ifp)
irdp->flags &= ~IF_SHUTDOWN;
- irdp_if_start(ifp, irdp->flags & IF_BROADCAST ? FALSE : TRUE, FALSE);
+ irdp_if_start(ifp, irdp->flags & IF_BROADCAST ? false : true, false);
}
@@ -407,7 +407,7 @@ DEFUN (ip_irdp_multicast,
VTY_DECLVAR_CONTEXT(interface, ifp);
irdp_if_get(ifp);
- irdp_if_start(ifp, TRUE, TRUE);
+ irdp_if_start(ifp, true, true);
return CMD_SUCCESS;
}
@@ -421,7 +421,7 @@ DEFUN (ip_irdp_broadcast,
VTY_DECLVAR_CONTEXT(interface, ifp);
irdp_if_get(ifp);
- irdp_if_start(ifp, FALSE, TRUE);
+ irdp_if_start(ifp, false, true);
return CMD_SUCCESS;
}
diff --git a/zebra/irdp_packet.c b/zebra/irdp_packet.c
index 2804787620..f6fe6bbf1e 100644
--- a/zebra/irdp_packet.c
+++ b/zebra/irdp_packet.c
@@ -136,10 +136,10 @@ static void parse_irdp_packet(char *p, int len, struct interface *ifp)
return;
if (icmp->code != 0) {
- flog_warn(EC_ZEBRA_IRDP_BAD_TYPE_CODE,
- "IRDP: RX packet type %d from %s. Bad ICMP type code,"
- " silently ignored",
- icmp->type, inet_ntoa(src));
+ flog_warn(
+ EC_ZEBRA_IRDP_BAD_TYPE_CODE,
+ "IRDP: RX packet type %d from %s. Bad ICMP type code, silently ignored",
+ icmp->type, inet_ntoa(src));
return;
}
@@ -174,8 +174,8 @@ static void parse_irdp_packet(char *p, int len, struct interface *ifp)
default:
flog_warn(
- EC_ZEBRA_IRDP_BAD_TYPE,
- "IRDP: RX type %d from %s. Bad ICMP type, silently ignored",
+ EC_ZEBRA_IRDP_BAD_TYPE_CODE,
+ "IRDP: RX packet type %d from %s. Bad ICMP type code, silently ignored",
icmp->type, inet_ntoa(src));
}
}
diff --git a/zebra/label_manager.c b/zebra/label_manager.c
index 16d45836e0..8295e461cc 100644
--- a/zebra/label_manager.c
+++ b/zebra/label_manager.c
@@ -75,6 +75,10 @@ static int relay_response_back(void)
unsigned short instance;
struct zserv *zserv;
+ /* sanity */
+ if (!zclient || zclient->sock < 0)
+ return -1;
+
/* input buffer with msg from label manager */
src = zclient->ibuf;
@@ -83,10 +87,11 @@ static int relay_response_back(void)
/* parse header */
ret = zclient_read_header(src, zclient->sock, &size, &marker, &version,
&vrf_id, &resp_cmd);
- if (ret < 0 && errno != EAGAIN) {
- flog_err(EC_ZEBRA_LM_RESPONSE,
- "Error reading Label Manager response: %s",
- strerror(errno));
+ if (ret < 0) {
+ if (errno != EAGAIN)
+ flog_err(EC_ZEBRA_LM_RESPONSE,
+ "Error reading Label Manager response: %s",
+ strerror(errno));
return -1;
}
diff --git a/zebra/redistribute.c b/zebra/redistribute.c
index b13f1170cd..d56579ff4f 100644
--- a/zebra/redistribute.c
+++ b/zebra/redistribute.c
@@ -52,7 +52,7 @@
static int zebra_import_table_used[AFI_MAX][ZEBRA_KERNEL_TABLE_MAX];
static uint32_t zebra_import_table_distance[AFI_MAX][ZEBRA_KERNEL_TABLE_MAX];
-int is_zebra_import_table_enabled(afi_t afi, uint32_t table_id)
+int is_zebra_import_table_enabled(afi_t afi, vrf_id_t vrf_id, uint32_t table_id)
{
/*
* Make sure that what we are called with actualy makes sense
@@ -568,8 +568,8 @@ void zebra_interface_vrf_update_add(struct interface *ifp, vrf_id_t old_vrf_id)
}
}
-int zebra_add_import_table_entry(struct route_node *rn, struct route_entry *re,
- const char *rmap_name)
+int zebra_add_import_table_entry(struct zebra_vrf *zvrf, struct route_node *rn,
+ struct route_entry *re, const char *rmap_name)
{
struct route_entry *newre;
struct route_entry *same;
@@ -581,11 +581,11 @@ int zebra_add_import_table_entry(struct route_node *rn, struct route_entry *re,
if (rmap_name)
ret = zebra_import_table_route_map_check(
afi, re->type, re->instance, &rn->p, re->ng.nexthop,
- re->vrf_id, re->tag, rmap_name);
+ zvrf->vrf->vrf_id, re->tag, rmap_name);
if (ret != RMAP_MATCH) {
UNSET_FLAG(re->flags, ZEBRA_FLAG_SELECTED);
- zebra_del_import_table_entry(rn, re);
+ zebra_del_import_table_entry(zvrf, rn, re);
return 0;
}
@@ -603,7 +603,7 @@ int zebra_add_import_table_entry(struct route_node *rn, struct route_entry *re,
if (same) {
UNSET_FLAG(same->flags, ZEBRA_FLAG_SELECTED);
- zebra_del_import_table_entry(rn, same);
+ zebra_del_import_table_entry(zvrf, rn, same);
}
newre = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
@@ -612,7 +612,7 @@ int zebra_add_import_table_entry(struct route_node *rn, struct route_entry *re,
newre->flags = re->flags;
newre->metric = re->metric;
newre->mtu = re->mtu;
- newre->table = 0;
+ newre->table = zvrf->table_id;
newre->nexthop_num = 0;
newre->uptime = monotime(NULL);
newre->instance = re->table;
@@ -623,7 +623,8 @@ int zebra_add_import_table_entry(struct route_node *rn, struct route_entry *re,
return 0;
}
-int zebra_del_import_table_entry(struct route_node *rn, struct route_entry *re)
+int zebra_del_import_table_entry(struct zebra_vrf *zvrf, struct route_node *rn,
+ struct route_entry *re)
{
struct prefix p;
afi_t afi;
@@ -631,20 +632,21 @@ int zebra_del_import_table_entry(struct route_node *rn, struct route_entry *re)
afi = family2afi(rn->p.family);
prefix_copy(&p, &rn->p);
- rib_delete(afi, SAFI_UNICAST, re->vrf_id, ZEBRA_ROUTE_TABLE, re->table,
- re->flags, &p, NULL, re->ng.nexthop, 0, re->metric,
- re->distance, false);
+ rib_delete(afi, SAFI_UNICAST, zvrf->vrf->vrf_id, ZEBRA_ROUTE_TABLE,
+ re->table, re->flags, &p, NULL, re->ng.nexthop,
+ zvrf->table_id, re->metric, re->distance, false);
return 0;
}
/* Assuming no one calls this with the main routing table */
-int zebra_import_table(afi_t afi, uint32_t table_id, uint32_t distance,
- const char *rmap_name, int add)
+int zebra_import_table(afi_t afi, vrf_id_t vrf_id, uint32_t table_id,
+ uint32_t distance, const char *rmap_name, int add)
{
struct route_table *table;
struct route_entry *re;
struct route_node *rn;
+ struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(vrf_id);
if (!is_zebra_valid_kernel_table(table_id)
|| (table_id == RT_TABLE_MAIN))
@@ -653,8 +655,8 @@ int zebra_import_table(afi_t afi, uint32_t table_id, uint32_t distance,
if (afi >= AFI_MAX)
return (-1);
- table = zebra_vrf_table_with_table_id(afi, SAFI_UNICAST,
- table_id, VRF_DEFAULT);
+ table = zebra_vrf_table_with_table_id(afi, SAFI_UNICAST, vrf_id,
+ table_id);
if (table == NULL) {
return 0;
} else if (IS_ZEBRA_DEBUG_RIB) {
@@ -708,15 +710,16 @@ int zebra_import_table(afi_t afi, uint32_t table_id, uint32_t distance,
if (((afi == AFI_IP) && (rn->p.family == AF_INET))
|| ((afi == AFI_IP6) && (rn->p.family == AF_INET6))) {
if (add)
- zebra_add_import_table_entry(rn, re, rmap_name);
+ zebra_add_import_table_entry(zvrf, rn, re,
+ rmap_name);
else
- zebra_del_import_table_entry(rn, re);
+ zebra_del_import_table_entry(zvrf, rn, re);
}
}
return 0;
}
-int zebra_import_table_config(struct vty *vty)
+int zebra_import_table_config(struct vty *vty, vrf_id_t vrf_id)
{
int i;
afi_t afi;
@@ -726,7 +729,7 @@ int zebra_import_table_config(struct vty *vty)
for (afi = AFI_IP; afi < AFI_MAX; afi++) {
for (i = 1; i < ZEBRA_KERNEL_TABLE_MAX; i++) {
- if (!is_zebra_import_table_enabled(afi, i))
+ if (!is_zebra_import_table_enabled(afi, vrf_id, i))
continue;
if (zebra_import_table_distance[afi][i]
@@ -751,61 +754,84 @@ int zebra_import_table_config(struct vty *vty)
return write;
}
-void zebra_import_table_rm_update(const char *rmap)
+static void zebra_import_table_rm_update_vrf_afi(struct zebra_vrf *zvrf,
+ afi_t afi, int table_id,
+ const char *rmap)
{
- afi_t afi;
- int i;
struct route_table *table;
struct route_entry *re;
struct route_node *rn;
const char *rmap_name;
- for (afi = AFI_IP; afi < AFI_MAX; afi++) {
- for (i = 1; i < ZEBRA_KERNEL_TABLE_MAX; i++) {
- if (!is_zebra_import_table_enabled(afi, i))
- continue;
+ rmap_name = zebra_get_import_table_route_map(afi, table_id);
+ if ((!rmap_name) || (strcmp(rmap_name, rmap) != 0))
+ return;
- rmap_name = zebra_get_import_table_route_map(afi, i);
- if ((!rmap_name) || (strcmp(rmap_name, rmap) != 0))
+ table = zebra_vrf_table_with_table_id(afi, SAFI_UNICAST,
+ zvrf->vrf->vrf_id, table_id);
+ if (!table) {
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug("%s: Table id=%d not found", __func__,
+ table_id);
+ return;
+ }
+
+ for (rn = route_top(table); rn; rn = route_next(rn)) {
+ /*
+ * For each entry in the non-default routing table,
+ * add the entry in the main table
+ */
+ if (!rn->info)
+ continue;
+
+ RNODE_FOREACH_RE (rn, re) {
+ if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED))
continue;
- table = zebra_vrf_table_with_table_id(afi, SAFI_UNICAST,
- i, VRF_DEFAULT);
- if (!table) {
- if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug("%s: Table id=%d not found",
- __func__, i);
+ break;
+ }
+
+ if (!re)
+ continue;
+
+ if (((afi == AFI_IP) && (rn->p.family == AF_INET))
+ || ((afi == AFI_IP6) && (rn->p.family == AF_INET6)))
+ zebra_add_import_table_entry(zvrf, rn, re, rmap_name);
+ }
+
+ return;
+}
+
+static void zebra_import_table_rm_update_vrf(struct zebra_vrf *zvrf,
+ const char *rmap)
+{
+ afi_t afi;
+ int i;
+
+ for (afi = AFI_IP; afi < AFI_MAX; afi++) {
+ for (i = 1; i < ZEBRA_KERNEL_TABLE_MAX; i++) {
+ if (!is_zebra_import_table_enabled(
+ afi, zvrf->vrf->vrf_id, i))
continue;
- }
- for (rn = route_top(table); rn; rn = route_next(rn)) {
- /* For each entry in the non-default
- * routing table,
- * add the entry in the main table
- */
- if (!rn->info)
- continue;
-
- RNODE_FOREACH_RE (rn, re) {
- if (CHECK_FLAG(re->status,
- ROUTE_ENTRY_REMOVED))
- continue;
- break;
- }
-
- if (!re)
- continue;
-
- if (((afi == AFI_IP)
- && (rn->p.family == AF_INET))
- || ((afi == AFI_IP6)
- && (rn->p.family == AF_INET6)))
- zebra_add_import_table_entry(rn, re,
- rmap_name);
- }
+ zebra_import_table_rm_update_vrf_afi(zvrf, afi, i,
+ rmap);
}
}
+}
- return;
+void zebra_import_table_rm_update(const char *rmap)
+{
+ struct vrf *vrf;
+ struct zebra_vrf *zvrf;
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ zvrf = vrf->info;
+
+ if (!zvrf)
+ continue;
+
+ zebra_import_table_rm_update_vrf(zvrf, rmap);
+ }
}
/* Interface parameters update */
diff --git a/zebra/redistribute.h b/zebra/redistribute.h
index 74a593b240..30ff6bcd09 100644
--- a/zebra/redistribute.h
+++ b/zebra/redistribute.h
@@ -64,17 +64,21 @@ extern void zebra_interface_vrf_update_del(struct interface *,
extern void zebra_interface_vrf_update_add(struct interface *,
vrf_id_t old_vrf_id);
-extern int zebra_import_table(afi_t afi, uint32_t table_id, uint32_t distance,
+extern int zebra_import_table(afi_t afi, vrf_id_t vrf_id,
+ uint32_t table_id, uint32_t distance,
const char *rmap_name, int add);
-extern int zebra_add_import_table_entry(struct route_node *rn,
+extern int zebra_add_import_table_entry(struct zebra_vrf *zvrf,
+ struct route_node *rn,
struct route_entry *re,
const char *rmap_name);
-extern int zebra_del_import_table_entry(struct route_node *rn,
+extern int zebra_del_import_table_entry(struct zebra_vrf *zvrf,
+ struct route_node *rn,
struct route_entry *re);
-extern int is_zebra_import_table_enabled(afi_t, uint32_t table_id);
+extern int is_zebra_import_table_enabled(afi_t, vrf_id_t vrf_id,
+ uint32_t table_id);
-extern int zebra_import_table_config(struct vty *);
+extern int zebra_import_table_config(struct vty *, vrf_id_t vrf_id);
extern void zebra_import_table_rm_update(const char *rmap);
diff --git a/zebra/rib.h b/zebra/rib.h
index 292f6bc600..9d8cee8bf7 100644
--- a/zebra/rib.h
+++ b/zebra/rib.h
@@ -500,7 +500,7 @@ static inline void rib_tables_iter_init(rib_tables_iter_t *iter)
/*
* rib_tables_iter_started
*
- * Returns TRUE if this iterator has started iterating over the set of
+ * Returns true if this iterator has started iterating over the set of
* tables.
*/
static inline int rib_tables_iter_started(rib_tables_iter_t *iter)
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index b05d037f96..f951738b70 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -150,6 +150,9 @@ static inline int zebra2proto(int proto)
case ZEBRA_ROUTE_OPENFABRIC:
proto = RTPROT_OPENFABRIC;
break;
+ case ZEBRA_ROUTE_TABLE:
+ proto = RTPROT_ZEBRA;
+ break;
default:
/*
* When a user adds a new protocol this will show up
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index 61200806ba..9a638f8e7f 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -2432,6 +2432,7 @@ static inline void zread_iptable(ZAPI_HANDLER_ARGS)
STREAM_GETW(s, zpi.tcp_mask_flags);
STREAM_GETC(s, zpi.dscp_value);
STREAM_GETC(s, zpi.fragment);
+ STREAM_GETC(s, zpi.protocol);
STREAM_GETL(s, zpi.nb_interface);
zebra_pbr_iptable_update_interfacelist(s, &zpi);
diff --git a/zebra/zebra_errors.c b/zebra/zebra_errors.c
index cb5f30df1f..5f0a9ec011 100644
--- a/zebra/zebra_errors.c
+++ b/zebra/zebra_errors.c
@@ -381,14 +381,6 @@ static struct log_ref ferr_zebra_err[] = {
"If you wish to receive the messages, change your IRDP settings accordingly.",
},
{
- .code = EC_ZEBRA_IRDP_BAD_TYPE,
- .title =
- "Zebra received IRDP packet with bad type",
- .description =
- "THIS IS BULLSHIT REMOVE ME",
- .suggestion = "asdf",
- },
- {
.code = EC_ZEBRA_RNH_NO_TABLE,
.title =
"Zebra could not find table for next hop",
diff --git a/zebra/zebra_errors.h b/zebra/zebra_errors.h
index 2b7831a408..222055dd81 100644
--- a/zebra/zebra_errors.h
+++ b/zebra/zebra_errors.h
@@ -85,7 +85,6 @@ enum zebra_log_refs {
EC_ZEBRA_IRDP_BAD_CHECKSUM,
EC_ZEBRA_IRDP_BAD_TYPE_CODE,
EC_ZEBRA_IRDP_BAD_RX_FLAGS,
- EC_ZEBRA_IRDP_BAD_TYPE,
EC_ZEBRA_RNH_NO_TABLE,
EC_ZEBRA_IFLIST_FAILED,
EC_ZEBRA_FPM_FORMAT_UNKNOWN,
diff --git a/zebra/zebra_fpm.c b/zebra/zebra_fpm.c
index 32b9763c56..eaf43095bc 100644
--- a/zebra/zebra_fpm.c
+++ b/zebra/zebra_fpm.c
@@ -829,7 +829,7 @@ static bool zfpm_updates_pending(void)
/*
* zfpm_writes_pending
*
- * Returns TRUE if we may have something to write to the FPM.
+ * Returns true if we may have something to write to the FPM.
*/
static int zfpm_writes_pending(void)
{
@@ -1403,7 +1403,7 @@ static void zfpm_start_connect_timer(const char *reason)
/*
* zfpm_is_enabled
*
- * Returns TRUE if the zebra FPM module has been enabled.
+ * Returns true if the zebra FPM module has been enabled.
*/
static inline int zfpm_is_enabled(void)
{
@@ -1413,7 +1413,7 @@ static inline int zfpm_is_enabled(void)
/*
* zfpm_conn_is_up
*
- * Returns TRUE if the connection to the FPM is up.
+ * Returns true if the connection to the FPM is up.
*/
static inline int zfpm_conn_is_up(void)
{
@@ -1969,10 +1969,10 @@ static struct cmd_node zebra_node = {ZEBRA_NODE, "", 1};
* One-time initialization of the Zebra FPM module.
*
* @param[in] port port at which FPM is running.
- * @param[in] enable TRUE if the zebra FPM module should be enabled
+ * @param[in] enable true if the zebra FPM module should be enabled
* @param[in] format to use to talk to the FPM. Can be 'netink' or 'protobuf'.
*
- * Returns TRUE on success.
+ * Returns true on success.
*/
static int zfpm_init(struct thread_master *master)
{
diff --git a/zebra/zebra_fpm_netlink.c b/zebra/zebra_fpm_netlink.c
index d5479bc627..822def318a 100644
--- a/zebra/zebra_fpm_netlink.c
+++ b/zebra/zebra_fpm_netlink.c
@@ -195,7 +195,7 @@ typedef struct netlink_route_info_t_ {
* Add information about the given nexthop to the given route info
* structure.
*
- * Returns TRUE if a nexthop was added, FALSE otherwise.
+ * Returns true if a nexthop was added, false otherwise.
*/
static int netlink_route_info_add_nh(netlink_route_info_t *ri,
struct nexthop *nexthop,
@@ -278,7 +278,7 @@ static uint8_t netlink_proto_from_route_type(int type)
*
* Fill out the route information object from the given route.
*
- * Returns TRUE on success and FALSE on failure.
+ * Returns true on success and false on failure.
*/
static int netlink_route_info_fill(netlink_route_info_t *ri, int cmd,
rib_dest_t *dest, struct route_entry *re)
diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c
index f2a76d1c52..4a88296051 100644
--- a/zebra/zebra_nhg.c
+++ b/zebra/zebra_nhg.c
@@ -23,6 +23,7 @@
#include <zebra.h>
#include "lib/nexthop.h"
+#include "lib/nexthop_group_private.h"
#include "lib/routemap.h"
#include "zebra/connected.h"
@@ -100,7 +101,7 @@ static void nexthop_set_resolved(afi_t afi, const struct nexthop *newhop,
&newhop->nh_label->label[0]);
resolved_hop->rparent = nexthop;
- nexthop_add(&nexthop->resolved, resolved_hop);
+ _nexthop_add(&nexthop->resolved, resolved_hop);
}
/*
diff --git a/zebra/zebra_pbr.c b/zebra/zebra_pbr.c
index a82dd4c24a..f95a4ff950 100644
--- a/zebra/zebra_pbr.c
+++ b/zebra/zebra_pbr.c
@@ -373,6 +373,7 @@ uint32_t zebra_pbr_iptable_hash_key(const void *arg)
key = jhash_1word(iptable->tcp_flags, key);
key = jhash_1word(iptable->tcp_mask_flags, key);
key = jhash_1word(iptable->dscp_value, key);
+ key = jhash_1word(iptable->protocol, key);
key = jhash_1word(iptable->fragment, key);
key = jhash_1word(iptable->vrf_id, key);
@@ -414,6 +415,8 @@ bool zebra_pbr_iptable_hash_equal(const void *arg1, const void *arg2)
return false;
if (r1->fragment != r2->fragment)
return false;
+ if (r1->protocol != r2->protocol)
+ return false;
return true;
}
@@ -1095,6 +1098,10 @@ static void zebra_pbr_show_iptable_unit(struct zebra_pbr_iptable *iptable,
" not" : "", lookup_msg(fragment_value_str,
iptable->fragment, val_str));
}
+ if (iptable->protocol) {
+ vty_out(vty, "\t protocol %d\n",
+ iptable->protocol);
+ }
ret = hook_call(zebra_pbr_iptable_get_stat, iptable, &pkts,
&bytes);
if (ret && pkts > 0)
diff --git a/zebra/zebra_pbr.h b/zebra/zebra_pbr.h
index cc1cc5acd5..fcc9c5c39a 100644
--- a/zebra/zebra_pbr.h
+++ b/zebra/zebra_pbr.h
@@ -145,6 +145,7 @@ struct zebra_pbr_iptable {
uint16_t tcp_mask_flags;
uint8_t dscp_value;
uint8_t fragment;
+ uint8_t protocol;
uint32_t nb_interface;
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index 2df24f75c5..9cfaef3a89 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -36,6 +36,7 @@
#include "thread.h"
#include "vrf.h"
#include "workqueue.h"
+#include "nexthop_group_private.h"
#include "zebra/zebra_router.h"
#include "zebra/connected.h"
@@ -192,7 +193,7 @@ int zebra_check_addr(const struct prefix *p)
/* Add nexthop to the end of a rib node's nexthop list */
void route_entry_nexthop_add(struct route_entry *re, struct nexthop *nexthop)
{
- nexthop_add(&re->ng.nexthop, nexthop);
+ _nexthop_add(&re->ng.nexthop, nexthop);
re->nexthop_num++;
}
@@ -697,7 +698,7 @@ static void rib_uninstall(struct route_node *rn, struct route_entry *re)
/*
* rib_can_delete_dest
*
- * Returns TRUE if the given dest can be deleted from the table.
+ * Returns true if the given dest can be deleted from the table.
*/
static int rib_can_delete_dest(rib_dest_t *dest)
{
@@ -815,7 +816,7 @@ void zebra_rib_evaluate_rn_nexthops(struct route_node *rn, uint32_t seq)
* Garbage collect the rib dest corresponding to the given route node
* if appropriate.
*
- * Returns TRUE if the dest was deleted, FALSE otherwise.
+ * Returns true if the dest was deleted, false otherwise.
*/
int rib_gc_dest(struct route_node *rn)
{
@@ -1589,7 +1590,7 @@ static bool rib_update_re_from_ctx(struct route_entry *re,
*/
nexthop = nexthop_new();
nexthop->type = NEXTHOP_TYPE_IPV4;
- nexthop_add(&(re->fib_ng.nexthop), nexthop);
+ _nexthop_add(&(re->fib_ng.nexthop), nexthop);
}
done:
@@ -2346,9 +2347,11 @@ static void rib_link(struct route_node *rn, struct route_entry *re, int process)
afi = (rn->p.family == AF_INET)
? AFI_IP
: (rn->p.family == AF_INET6) ? AFI_IP6 : AFI_MAX;
- if (is_zebra_import_table_enabled(afi, re->table)) {
+ if (is_zebra_import_table_enabled(afi, re->vrf_id, re->table)) {
+ struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(re->vrf_id);
+
rmap_name = zebra_get_import_table_route_map(afi, re->table);
- zebra_add_import_table_entry(rn, re, rmap_name);
+ zebra_add_import_table_entry(zvrf, rn, re, rmap_name);
} else if (process)
rib_queue_add(rn);
}
@@ -2414,8 +2417,10 @@ void rib_delnode(struct route_node *rn, struct route_entry *re)
afi = (rn->p.family == AF_INET)
? AFI_IP
: (rn->p.family == AF_INET6) ? AFI_IP6 : AFI_MAX;
- if (is_zebra_import_table_enabled(afi, re->table)) {
- zebra_del_import_table_entry(rn, re);
+ if (is_zebra_import_table_enabled(afi, re->vrf_id, re->table)) {
+ struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(re->vrf_id);
+
+ zebra_del_import_table_entry(zvrf, rn, re);
/* Just clean up if non main table */
if (IS_ZEBRA_DEBUG_RIB) {
char buf[SRCDEST2STR_BUFFER];
@@ -2741,11 +2746,14 @@ void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
else
src_buf[0] = '\0';
- if (IS_ZEBRA_DEBUG_RIB)
- zlog_debug("%u:%s%s%s doesn't exist in rib", vrf_id,
- dst_buf,
+ if (IS_ZEBRA_DEBUG_RIB) {
+ struct vrf *vrf = vrf_lookup_by_id(vrf_id);
+
+ zlog_debug("%s[%d]:%s%s%s doesn't exist in rib",
+ vrf->name, table_id, dst_buf,
(src_buf[0] != '\0') ? " from " : "",
src_buf);
+ }
return;
}
@@ -3383,7 +3391,7 @@ void rib_init(void)
*
* Get the first vrf id that is greater than the given vrf id if any.
*
- * Returns TRUE if a vrf id was found, FALSE otherwise.
+ * Returns true if a vrf id was found, false otherwise.
*/
static inline int vrf_id_get_next(vrf_id_t vrf_id, vrf_id_t *next_id_p)
{
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index 1f8eec9cad..74baabbf24 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -2435,7 +2435,7 @@ static int zebra_ip_config(struct vty *vty)
{
int write = 0;
- write += zebra_import_table_config(vty);
+ write += zebra_import_table_config(vty, VRF_DEFAULT);
return write;
}
@@ -2482,7 +2482,8 @@ DEFUN (ip_zebra_import_table_distance,
return CMD_WARNING;
}
- ret = zebra_import_table(AFI_IP, table_id, distance, rmap, 1);
+ ret = zebra_import_table(AFI_IP, VRF_DEFAULT, table_id,
+ distance, rmap, 1);
if (rmap)
XFREE(MTYPE_ROUTE_MAP_NAME, rmap);
@@ -2573,10 +2574,10 @@ DEFUN (no_ip_zebra_import_table,
return CMD_WARNING;
}
- if (!is_zebra_import_table_enabled(AFI_IP, table_id))
+ if (!is_zebra_import_table_enabled(AFI_IP, VRF_DEFAULT, table_id))
return CMD_SUCCESS;
- return (zebra_import_table(AFI_IP, table_id, 0, NULL, 0));
+ return (zebra_import_table(AFI_IP, VRF_DEFAULT, table_id, 0, NULL, 0));
}
static int config_write_protocol(struct vty *vty)
diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c
index a3a630d53e..222d91105e 100644
--- a/zebra/zebra_vxlan.c
+++ b/zebra/zebra_vxlan.c
@@ -9774,22 +9774,35 @@ static void zvni_evpn_cfg_cleanup(struct hash_bucket *bucket, void *ctxt)
zvni->advertise_svi_macip = 0;
zvni->advertise_subnet = 0;
- zvni_neigh_del_all(zvni, 0, 0,
+ zvni_neigh_del_all(zvni, 1, 0,
DEL_REMOTE_NEIGH | DEL_REMOTE_NEIGH_FROM_VTEP);
- zvni_mac_del_all(zvni, 0, 0,
+ zvni_mac_del_all(zvni, 1, 0,
DEL_REMOTE_MAC | DEL_REMOTE_MAC_FROM_VTEP);
- zvni_vtep_del_all(zvni, 0);
+ zvni_vtep_del_all(zvni, 1);
}
/* Cleanup EVPN configuration of a specific VRF */
static void zebra_evpn_vrf_cfg_cleanup(struct zebra_vrf *zvrf)
{
+ zebra_l3vni_t *zl3vni = NULL;
+
zvrf->advertise_all_vni = 0;
zvrf->advertise_gw_macip = 0;
zvrf->advertise_svi_macip = 0;
zvrf->vxlan_flood_ctrl = VXLAN_FLOOD_HEAD_END_REPL;
hash_iterate(zvrf->vni_table, zvni_evpn_cfg_cleanup, NULL);
+
+ if (zvrf->l3vni)
+ zl3vni = zl3vni_lookup(zvrf->l3vni);
+ if (zl3vni) {
+ /* delete and uninstall all rmacs */
+ hash_iterate(zl3vni->rmac_table, zl3vni_del_rmac_hash_entry,
+ zl3vni);
+ /* delete and uninstall all next-hops */
+ hash_iterate(zl3vni->nh_table, zl3vni_del_nh_hash_entry,
+ zl3vni);
+ }
}
/* Cleanup BGP EVPN configuration upon client disconnect */