summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bfdd/bfd.c141
-rw-r--r--bfdd/bfd.h53
-rw-r--r--bfdd/bfdd.c24
-rw-r--r--bfdd/bfdd_cli.c412
-rw-r--r--bfdd/bfdd_northbound.c1210
-rw-r--r--bfdd/bfdd_vty.c395
-rw-r--r--bfdd/subdir.am10
-rw-r--r--bgpd/bgp_evpn_vty.c4
-rw-r--r--bgpd/bgp_network.c11
-rw-r--r--bgpd/bgp_pbr.c21
-rw-r--r--bgpd/bgp_pbr.h1
-rw-r--r--bgpd/bgp_routemap.c95
-rw-r--r--bgpd/bgp_zebra.c1
-rwxr-xr-xconfigure.ac15
-rw-r--r--doc/developer/building-frr-for-omnios.rst2
-rw-r--r--doc/developer/building-libyang.rst2
-rw-r--r--doc/developer/topotests-jsontopo.rst475
-rw-r--r--doc/developer/topotests.rst2
-rw-r--r--doc/user/installation.rst16
-rw-r--r--eigrpd/eigrp_vty.c4
-rw-r--r--lib/northbound.c11
-rw-r--r--lib/northbound.h21
-rw-r--r--lib/northbound_cli.c34
-rw-r--r--lib/pbr.h1
-rw-r--r--tests/topotests/bgp-basic-functionality-topo1/__init__.py0
-rw-r--r--tests/topotests/bgp-basic-functionality-topo1/bgp_basic_functionality.json172
-rwxr-xr-xtests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py595
-rw-r--r--tests/topotests/bgp-path-attributes-topo1/__init__.py0
-rw-r--r--tests/topotests/bgp-path-attributes-topo1/bgp_path_attributes.json220
-rwxr-xr-xtests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py1078
-rw-r--r--tests/topotests/bgp-prefix-list-topo1/__init__.py0
-rw-r--r--tests/topotests/bgp-prefix-list-topo1/prefix_lists.json123
-rwxr-xr-xtests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py1450
-rwxr-xr-xtests/topotests/example-topojson-test/__init__.py0
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_multiple_links/__init__.py0
-rw-r--r--tests/topotests/example-topojson-test/test_topo_json_multiple_links/example_topojson_multiple_links.json152
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py194
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_single_link/__init__.py0
-rw-r--r--tests/topotests/example-topojson-test/test_topo_json_single_link/example_topojson.json153
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py190
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_single_link_loopback/__init__.py0
-rw-r--r--tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/example_topojson.json161
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py205
-rw-r--r--tests/topotests/lib/bgp.py1521
-rw-r--r--tests/topotests/lib/common_config.py1391
-rw-r--r--tests/topotests/lib/topojson.py193
-rw-r--r--tests/topotests/pytest.ini7
-rw-r--r--vrrpd/vrrp_zebra.c2
-rw-r--r--yang/frr-bfdd.yang387
-rw-r--r--yang/frr-eigrpd.yang336
-rw-r--r--yang/subdir.am4
-rw-r--r--zebra/zapi_msg.c1
-rw-r--r--zebra/zebra_pbr.c7
-rw-r--r--zebra/zebra_pbr.h1
54 files changed, 11025 insertions, 479 deletions
diff --git a/bfdd/bfd.c b/bfdd/bfd.c
index 08a70abc1e..5d143d4e5f 100644
--- a/bfdd/bfd.c
+++ b/bfdd/bfd.c
@@ -34,19 +34,12 @@
DEFINE_MTYPE_STATIC(BFDD, BFDD_CONFIG, "long-lived configuration memory")
DEFINE_MTYPE_STATIC(BFDD, BFDD_SESSION_OBSERVER, "Session observer")
DEFINE_MTYPE_STATIC(BFDD, BFDD_VRF, "BFD VRF")
-DEFINE_QOBJ_TYPE(bfd_session)
/*
* Prototypes
*/
-void gen_bfd_key(struct bfd_key *key, struct sockaddr_any *peer,
- struct sockaddr_any *local, bool mhop, const char *ifname,
- const char *vrfname);
-
static uint32_t ptm_bfd_gen_ID(void);
static void ptm_bfd_echo_xmt_TO(struct bfd_session *bfd);
-static void bfd_session_free(struct bfd_session *bs);
-static struct bfd_session *bfd_session_new(void);
static struct bfd_session *bfd_find_disc(struct sockaddr_any *sa,
uint32_t ldisc);
static int bfd_session_update(struct bfd_session *bs, struct bfd_peer_cfg *bpc);
@@ -91,6 +84,8 @@ void gen_bfd_key(struct bfd_key *key, struct sockaddr_any *peer,
strlcpy(key->ifname, ifname, sizeof(key->ifname));
if (vrfname && vrfname[0])
strlcpy(key->vrfname, vrfname, sizeof(key->vrfname));
+ else
+ strlcpy(key->vrfname, VRF_DEFAULT_NAME, sizeof(key->vrfname));
}
struct bfd_session *bs_peer_find(struct bfd_peer_cfg *bpc)
@@ -396,17 +391,13 @@ struct bfd_session *ptm_bfd_sess_find(struct bfd_pkt *cp,
/* Search for session without using discriminator. */
ifp = if_lookup_by_index(ifindex, vrfid);
- if (vrfid == VRF_DEFAULT) {
- /*
- * Don't use the default vrf, otherwise we won't find
- * sessions that doesn't specify it.
- */
- vrf = NULL;
- } else
+ if (vrfid != VRF_DEFAULT)
vrf = vrf_lookup_by_id(vrfid);
+ else
+ vrf = NULL;
gen_bfd_key(&key, peer, local, is_mhop, ifp ? ifp->name : NULL,
- vrf ? vrf->name : NULL);
+ vrf ? vrf->name : VRF_DEFAULT_NAME);
/* XXX maybe remoteDiscr should be checked for remoteHeard cases. */
return bfd_key_lookup(key);
@@ -469,14 +460,12 @@ int bfd_echo_recvtimer_cb(struct thread *t)
return 0;
}
-static struct bfd_session *bfd_session_new(void)
+struct bfd_session *bfd_session_new(void)
{
struct bfd_session *bs;
bs = XCALLOC(MTYPE_BFDD_CONFIG, sizeof(*bs));
- QOBJ_REG(bs, bfd_session);
-
bs->timers.desired_min_tx = BFD_DEFDESIREDMINTX;
bs->timers.required_min_rx = BFD_DEFREQUIREDMINRX;
bs->timers.required_min_echo = BFD_DEF_REQ_MIN_ECHO;
@@ -629,7 +618,7 @@ static int bfd_session_update(struct bfd_session *bs, struct bfd_peer_cfg *bpc)
return 0;
}
-static void bfd_session_free(struct bfd_session *bs)
+void bfd_session_free(struct bfd_session *bs)
{
struct bfd_session_observer *bso;
@@ -650,7 +639,6 @@ static void bfd_session_free(struct bfd_session *bs)
pl_free(bs->pl);
- QOBJ_UNREG(bs);
XFREE(MTYPE_BFDD_CONFIG, bs);
}
@@ -686,6 +674,9 @@ struct bfd_session *ptm_bfd_sess_new(struct bfd_peer_cfg *bpc)
if (bpc->bpc_has_vrfname)
strlcpy(bfd->key.vrfname, bpc->bpc_vrfname,
sizeof(bfd->key.vrfname));
+ else
+ strlcpy(bfd->key.vrfname, VRF_DEFAULT_NAME,
+ sizeof(bfd->key.vrfname));
/* Copy remaining data. */
if (bpc->bpc_ipv4 == false)
@@ -717,6 +708,17 @@ struct bfd_session *ptm_bfd_sess_new(struct bfd_peer_cfg *bpc)
bfd->key.mhop = bpc->bpc_mhop;
+ if (bs_registrate(bfd) == NULL)
+ return NULL;
+
+ /* Apply other configurations. */
+ _bfd_session_update(bfd, bpc);
+
+ return bfd;
+}
+
+struct bfd_session *bs_registrate(struct bfd_session *bfd)
+{
/* Registrate session into data structures. */
bfd_key_insert(bfd);
bfd->discrs.my_discr = ptm_bfd_gen_ID();
@@ -733,9 +735,6 @@ struct bfd_session *ptm_bfd_sess_new(struct bfd_peer_cfg *bpc)
if (bfd->key.ifname[0] || bfd->key.vrfname[0] || bfd->sock == -1)
bs_observer_add(bfd);
- /* Apply other configurations. */
- _bfd_session_update(bfd, bpc);
-
log_info("session-new: %s", bs_to_string(bfd));
control_notify_config(BCM_NOTIFY_CONFIG_ADD, bfd);
@@ -1342,9 +1341,10 @@ struct bfd_key_walk_partial_lookup {
};
/* ignore some parameters */
-static int bfd_key_lookup_ignore_partial_walker(struct hash_bucket *b, void *data)
+static int bfd_key_lookup_ignore_partial_walker(struct hash_bucket *b,
+ void *data)
{
- struct bfd_key_walk_partial_lookup *ctx =
+ struct bfd_key_walk_partial_lookup *ctx =
(struct bfd_key_walk_partial_lookup *)data;
struct bfd_session *given = ctx->given;
struct bfd_session *parsed = b->data;
@@ -1353,7 +1353,8 @@ static int bfd_key_lookup_ignore_partial_walker(struct hash_bucket *b, void *dat
return HASHWALK_CONTINUE;
if (given->key.mhop != parsed->key.mhop)
return HASHWALK_CONTINUE;
- if (memcmp(&given->key.peer, &parsed->key.peer, sizeof(struct in6_addr)))
+ if (memcmp(&given->key.peer, &parsed->key.peer,
+ sizeof(struct in6_addr)))
return HASHWALK_CONTINUE;
if (memcmp(given->key.vrfname, parsed->key.vrfname, MAXNAMELEN))
return HASHWALK_CONTINUE;
@@ -1531,6 +1532,94 @@ void bfd_shutdown(void)
hash_free(bfd_key_hash);
}
+struct bfd_session_iterator {
+ int bsi_stop;
+ bool bsi_mhop;
+ const struct bfd_session *bsi_bs;
+};
+
+static int _bfd_session_next(struct hash_bucket *hb, void *arg)
+{
+ struct bfd_session_iterator *bsi = arg;
+ struct bfd_session *bs = hb->data;
+
+ /* Previous entry signaled stop. */
+ if (bsi->bsi_stop == 1) {
+ /* Match the single/multi hop sessions. */
+ if (bs->key.mhop != bsi->bsi_mhop)
+ return HASHWALK_CONTINUE;
+
+ bsi->bsi_bs = bs;
+ return HASHWALK_ABORT;
+ }
+
+ /* We found the current item, stop in the next one. */
+ if (bsi->bsi_bs == hb->data) {
+ bsi->bsi_stop = 1;
+ /* Set entry to NULL to signal end of list. */
+ bsi->bsi_bs = NULL;
+ } else if (bsi->bsi_bs == NULL && bsi->bsi_mhop == bs->key.mhop) {
+ /* We want the first list item. */
+ bsi->bsi_stop = 1;
+ bsi->bsi_bs = hb->data;
+ return HASHWALK_ABORT;
+ }
+
+ return HASHWALK_CONTINUE;
+}
+
+/*
+ * bfd_session_next: uses the current session to find the next.
+ *
+ * `bs` might point to NULL to get the first item of the data structure.
+ */
+const struct bfd_session *bfd_session_next(const struct bfd_session *bs,
+ bool mhop)
+{
+ struct bfd_session_iterator bsi;
+
+ bsi.bsi_stop = 0;
+ bsi.bsi_bs = bs;
+ bsi.bsi_mhop = mhop;
+ hash_walk(bfd_key_hash, _bfd_session_next, &bsi);
+ if (bsi.bsi_stop == 0)
+ return NULL;
+
+ return bsi.bsi_bs;
+}
+
+static void _bfd_session_remove_manual(struct hash_bucket *hb,
+ void *arg __attribute__((__unused__)))
+{
+ struct bfd_session *bs = hb->data;
+
+ /* Delete only manually configured sessions. */
+ if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) == 0)
+ return;
+
+ bs->refcount--;
+ BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG);
+
+ /* Don't delete sessions still in use. */
+ if (bs->refcount != 0)
+ return;
+
+ bfd_session_free(bs);
+}
+
+/*
+ * bfd_sessions_remove_manual: remove all manually configured sessions.
+ *
+ * NOTE: this function doesn't remove automatically created sessions.
+ */
+void bfd_sessions_remove_manual(void)
+{
+ hash_iterate(bfd_key_hash, _bfd_session_remove_manual, NULL);
+}
+
+/*
+ * VRF related functions.
+ */
static int bfd_vrf_new(struct vrf *vrf)
{
log_debug("VRF Created: %s(%u)", vrf->name, vrf->vrf_id);
diff --git a/bfdd/bfd.h b/bfdd/bfd.h
index ec31c8cbc6..10aeb3e52c 100644
--- a/bfdd/bfd.h
+++ b/bfdd/bfd.h
@@ -252,11 +252,7 @@ struct bfd_session {
struct bfd_timers remote_timers;
uint64_t refcount; /* number of pointers referencing this. */
-
- /* VTY context data. */
- QOBJ_FIELDS
};
-DECLARE_QOBJ_TYPE(bfd_session)
struct peer_label {
TAILQ_ENTRY(peer_label) pl_entry;
@@ -546,6 +542,16 @@ void bs_observer_del(struct bfd_session_observer *bso);
void bs_to_bpc(struct bfd_session *bs, struct bfd_peer_cfg *bpc);
+void gen_bfd_key(struct bfd_key *key, struct sockaddr_any *peer,
+ struct sockaddr_any *local, bool mhop, const char *ifname,
+ const char *vrfname);
+struct bfd_session *bfd_session_new(void);
+struct bfd_session *bs_registrate(struct bfd_session *bs);
+void bfd_session_free(struct bfd_session *bs);
+const struct bfd_session *bfd_session_next(const struct bfd_session *bs,
+ bool mhop);
+void bfd_sessions_remove_manual(void);
+
/* BFD hash data structures interface */
void bfd_initialize(void);
void bfd_shutdown(void);
@@ -585,6 +591,37 @@ void bfdd_vty_init(void);
/*
+ * bfdd_cli.c
+ *
+ * BFD daemon CLI implementation.
+ */
+void bfdd_cli_init(void);
+
+void bfd_cli_show_header(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults);
+void bfd_cli_show_header_end(struct vty *vty, struct lyd_node *dnode);
+void bfd_cli_show_single_hop_peer(struct vty *vty,
+ struct lyd_node *dnode,
+ bool show_defaults);
+void bfd_cli_show_multi_hop_peer(struct vty *vty,
+ struct lyd_node *dnode,
+ bool show_defaults);
+void bfd_cli_show_peer_end(struct vty *vty, struct lyd_node *dnode);
+void bfd_cli_show_mult(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults);
+void bfd_cli_show_tx(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults);
+void bfd_cli_show_rx(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults);
+void bfd_cli_show_shutdown(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults);
+void bfd_cli_show_echo(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults);
+void bfd_cli_show_echo_interval(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults);
+
+
+/*
* ptm_adapter.c
*/
void bfdd_zclient_init(struct zebra_privs_t *bfdd_priv);
@@ -596,4 +633,12 @@ void bfdd_sessions_disable_vrf(struct vrf *vrf);
int ptm_bfd_notify(struct bfd_session *bs);
+
+/*
+ * bfdd_northbound.c
+ *
+ * BFD northbound callbacks.
+ */
+extern const struct frr_yang_module_info frr_bfdd_info;
+
#endif /* _BFD_H_ */
diff --git a/bfdd/bfdd.c b/bfdd/bfdd.c
index 6c277c98f5..5657744f75 100644
--- a/bfdd/bfdd.c
+++ b/bfdd/bfdd.c
@@ -39,6 +39,9 @@ struct thread_master *master;
/* BFDd privileges */
static zebra_capabilities_t _caps_p[] = {ZCAP_BIND, ZCAP_SYS_ADMIN, ZCAP_NET_RAW};
+/* BFD daemon information. */
+static struct frr_daemon_info bfdd_di;
+
void socket_close(int *s)
{
if (*s <= 0)
@@ -78,6 +81,14 @@ static void sigterm_handler(void)
exit(0);
}
+static void sighup_handler(void)
+{
+ zlog_info("SIGHUP received");
+
+ /* Reload config file. */
+ vty_read_config(NULL, bfdd_di.config_file, config_default);
+}
+
static struct quagga_signal_t bfd_signals[] = {
{
.signal = SIGUSR1,
@@ -91,12 +102,23 @@ static struct quagga_signal_t bfd_signals[] = {
.signal = SIGINT,
.handler = &sigterm_handler,
},
+ {
+ .signal = SIGHUP,
+ .handler = &sighup_handler,
+ },
+};
+
+static const struct frr_yang_module_info *bfdd_yang_modules[] = {
+ &frr_interface_info,
+ &frr_bfdd_info,
};
FRR_DAEMON_INFO(bfdd, BFD, .vty_port = 2617,
.proghelp = "Implementation of the BFD protocol.",
.signals = bfd_signals, .n_signals = array_size(bfd_signals),
- .privs = &bglobal.bfdd_privs)
+ .privs = &bglobal.bfdd_privs,
+ .yang_modules = bfdd_yang_modules,
+ .n_yang_modules = array_size(bfdd_yang_modules))
#define OPTION_CTLSOCK 1001
static struct option longopts[] = {
diff --git a/bfdd/bfdd_cli.c b/bfdd/bfdd_cli.c
new file mode 100644
index 0000000000..acb1801cc4
--- /dev/null
+++ b/bfdd/bfdd_cli.c
@@ -0,0 +1,412 @@
+/*
+ * BFD daemon CLI implementation.
+ *
+ * Copyright (C) 2019 Network Device Education Foundation, Inc. ("NetDEF")
+ * Rafael Zalamena
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301 USA.
+ */
+
+#include <zebra.h>
+
+#include "lib/command.h"
+#include "lib/log.h"
+#include "lib/northbound_cli.h"
+
+#ifndef VTYSH_EXTRACT_PL
+#include "bfdd/bfdd_cli_clippy.c"
+#endif /* VTYSH_EXTRACT_PL */
+
+#include "bfd.h"
+
+/*
+ * Definitions.
+ */
+#define PEER_STR "Configure peer\n"
+#define INTERFACE_NAME_STR "Configure interface name to use\n"
+#define PEER_IPV4_STR "IPv4 peer address\n"
+#define PEER_IPV6_STR "IPv6 peer address\n"
+#define MHOP_STR "Configure multihop\n"
+#define LOCAL_STR "Configure local address\n"
+#define LOCAL_IPV4_STR "IPv4 local address\n"
+#define LOCAL_IPV6_STR "IPv6 local address\n"
+#define LOCAL_INTF_STR "Configure local interface name to use\n"
+#define VRF_STR "Configure VRF\n"
+#define VRF_NAME_STR "Configure VRF name\n"
+
+/*
+ * Prototypes.
+ */
+
+/*
+ * Functions.
+ */
+DEFPY_NOSH(
+ bfd_enter, bfd_enter_cmd,
+ "bfd",
+ "Configure BFD peers\n")
+{
+ int ret;
+
+ nb_cli_enqueue_change(vty, "/frr-bfdd:bfdd/bfd", NB_OP_CREATE, NULL);
+ ret = nb_cli_apply_changes(vty, NULL);
+ if (ret == CMD_SUCCESS)
+ VTY_PUSH_XPATH(BFD_NODE, "/frr-bfdd:bfdd/bfd");
+
+ return ret;
+}
+
+DEFUN(
+ bfd_config_reset, bfd_config_reset_cmd,
+ "no bfd",
+ NO_STR
+ "Configure BFD peers\n")
+{
+ nb_cli_enqueue_change(vty, "/frr-bfdd:bfdd/bfd", NB_OP_DESTROY, NULL);
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+void bfd_cli_show_header(struct vty *vty,
+ struct lyd_node *dnode __attribute__((__unused__)),
+ bool show_defaults __attribute__((__unused__)))
+{
+ vty_out(vty, "!\nbfd\n");
+}
+
+void bfd_cli_show_header_end(struct vty *vty,
+ struct lyd_node *dnode __attribute__((__unused__)))
+{
+ vty_out(vty, "!\n");
+}
+
+DEFPY_NOSH(
+ bfd_peer_enter, bfd_peer_enter_cmd,
+ "peer <A.B.C.D|X:X::X:X> [{multihop$multihop|local-address <A.B.C.D|X:X::X:X>|interface IFNAME$ifname|vrf NAME}]",
+ PEER_STR
+ PEER_IPV4_STR
+ PEER_IPV6_STR
+ MHOP_STR
+ LOCAL_STR
+ LOCAL_IPV4_STR
+ LOCAL_IPV6_STR
+ INTERFACE_STR
+ LOCAL_INTF_STR
+ VRF_STR
+ VRF_NAME_STR)
+{
+ int ret, slen;
+ char source_str[INET6_ADDRSTRLEN];
+ char xpath[XPATH_MAXLEN], xpath_srcaddr[XPATH_MAXLEN + 32];
+
+ if (multihop)
+ snprintf(source_str, sizeof(source_str), "[source-addr='%s']",
+ local_address_str);
+ else
+ source_str[0] = 0;
+
+ slen = snprintf(xpath, sizeof(xpath),
+ "/frr-bfdd:bfdd/bfd/sessions/%s%s[dest-addr='%s']",
+ multihop ? "multi-hop" : "single-hop", source_str,
+ peer_str);
+ if (ifname)
+ slen += snprintf(xpath + slen, sizeof(xpath) - slen,
+ "[interface='%s']", ifname);
+ else
+ slen += snprintf(xpath + slen, sizeof(xpath) - slen,
+ "[interface='']");
+ if (vrf)
+ snprintf(xpath + slen, sizeof(xpath) - slen, "[vrf='%s']", vrf);
+ else
+ snprintf(xpath + slen, sizeof(xpath) - slen, "[vrf='%s']",
+ VRF_DEFAULT_NAME);
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
+ if (multihop == NULL && local_address_str != NULL) {
+ snprintf(xpath_srcaddr, sizeof(xpath_srcaddr),
+ "%s/source-addr", xpath);
+ nb_cli_enqueue_change(vty, xpath_srcaddr, NB_OP_MODIFY,
+ local_address_str);
+ }
+
+ /* Apply settings immediately. */
+ ret = nb_cli_apply_changes(vty, NULL);
+ if (ret == CMD_SUCCESS)
+ VTY_PUSH_XPATH(BFD_PEER_NODE, xpath);
+
+ return ret;
+}
+
+DEFPY(
+ bfd_no_peer, bfd_no_peer_cmd,
+ "no peer <A.B.C.D|X:X::X:X> [{multihop$multihop|local-address <A.B.C.D|X:X::X:X>|interface IFNAME$ifname|vrf NAME}]",
+ NO_STR
+ PEER_STR
+ PEER_IPV4_STR
+ PEER_IPV6_STR
+ MHOP_STR
+ LOCAL_STR
+ LOCAL_IPV4_STR
+ LOCAL_IPV6_STR
+ INTERFACE_STR
+ LOCAL_INTF_STR
+ VRF_STR
+ VRF_NAME_STR)
+{
+ int slen;
+ char xpath[XPATH_MAXLEN];
+ char source_str[INET6_ADDRSTRLEN];
+
+ if (multihop)
+ snprintf(source_str, sizeof(source_str), "[source-addr='%s']",
+ local_address_str);
+ else
+ source_str[0] = 0;
+
+ slen = snprintf(xpath, sizeof(xpath),
+ "/frr-bfdd:bfdd/bfd/sessions/%s%s[dest-addr='%s']",
+ multihop ? "multi-hop" : "single-hop", source_str,
+ peer_str);
+ if (ifname)
+ slen += snprintf(xpath + slen, sizeof(xpath) - slen,
+ "[interface='%s']", ifname);
+ else
+ slen += snprintf(xpath + slen, sizeof(xpath) - slen,
+ "[interface='']");
+ if (vrf)
+ snprintf(xpath + slen, sizeof(xpath) - slen, "[vrf='%s']", vrf);
+ else
+ snprintf(xpath + slen, sizeof(xpath) - slen, "[vrf='%s']",
+ VRF_DEFAULT_NAME);
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+
+ /* Apply settings immediatly. */
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+static void _bfd_cli_show_peer(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults __attribute__((__unused__)),
+ bool mhop)
+{
+ const char *vrf = yang_dnode_get_string(dnode, "./vrf");
+ const char *ifname = yang_dnode_get_string(dnode, "./interface");
+
+ vty_out(vty, " peer %s",
+ yang_dnode_get_string(dnode, "./dest-addr"));
+
+ if (mhop)
+ vty_out(vty, " multihop");
+
+ if (yang_dnode_exists(dnode, "./source-addr"))
+ vty_out(vty, " local-address %s",
+ yang_dnode_get_string(dnode, "./source-addr"));
+
+ if (strcmp(vrf, VRF_DEFAULT_NAME))
+ vty_out(vty, " vrf %s", vrf);
+
+ if (ifname[0])
+ vty_out(vty, " interface %s", ifname);
+
+ vty_out(vty, "\n");
+}
+
+void bfd_cli_show_single_hop_peer(struct vty *vty,
+ struct lyd_node *dnode,
+ bool show_defaults)
+{
+ _bfd_cli_show_peer(vty, dnode, show_defaults, false);
+}
+
+void bfd_cli_show_multi_hop_peer(struct vty *vty,
+ struct lyd_node *dnode,
+ bool show_defaults)
+{
+ _bfd_cli_show_peer(vty, dnode, show_defaults, true);
+}
+
+void bfd_cli_show_peer_end(struct vty *vty,
+ struct lyd_node *dnode __attribute__((__unused__)))
+{
+ vty_out(vty, " !\n");
+}
+
+DEFPY(
+ bfd_peer_shutdown, bfd_peer_shutdown_cmd,
+ "[no] shutdown",
+ NO_STR
+ "Disable BFD peer\n")
+{
+ nb_cli_enqueue_change(vty, "./administrative-down", NB_OP_MODIFY,
+ no ? "false" : "true");
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+void bfd_cli_show_shutdown(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults)
+{
+ if (show_defaults)
+ vty_out(vty, " shutdown\n");
+ else
+ vty_out(vty, " %sshutdown\n",
+ yang_dnode_get_bool(dnode, NULL) ? "" : "no ");
+}
+
+DEFPY(
+ bfd_peer_mult, bfd_peer_mult_cmd,
+ "detect-multiplier (2-255)$multiplier",
+ "Configure peer detection multiplier\n"
+ "Configure peer detection multiplier value\n")
+{
+ nb_cli_enqueue_change(vty, "./detection-multiplier", NB_OP_MODIFY,
+ multiplier_str);
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+void bfd_cli_show_mult(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults)
+{
+ if (show_defaults)
+ vty_out(vty, " detect-multiplier %d\n",
+ BFD_DEFDETECTMULT);
+ else
+ vty_out(vty, " detect-multiplier %s\n",
+ yang_dnode_get_string(dnode, NULL));
+}
+
+DEFPY(
+ bfd_peer_rx, bfd_peer_rx_cmd,
+ "receive-interval (10-60000)$interval",
+ "Configure peer receive interval\n"
+ "Configure peer receive interval value in milliseconds\n")
+{
+ char value[32];
+
+ snprintf(value, sizeof(value), "%ld", interval * 1000);
+ nb_cli_enqueue_change(vty, "./required-receive-interval", NB_OP_MODIFY,
+ value);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+void bfd_cli_show_rx(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults)
+{
+ uint32_t value;
+
+ if (show_defaults)
+ vty_out(vty, " receive-interval %d\n",
+ BFD_DEFREQUIREDMINRX);
+ else {
+ value = yang_dnode_get_uint32(dnode, NULL);
+ vty_out(vty, " receive-interval %" PRIu32 "\n", value / 1000);
+ }
+}
+
+DEFPY(
+ bfd_peer_tx, bfd_peer_tx_cmd,
+ "transmit-interval (10-60000)$interval",
+ "Configure peer transmit interval\n"
+ "Configure peer transmit interval value in milliseconds\n")
+{
+ char value[32];
+
+ snprintf(value, sizeof(value), "%ld", interval * 1000);
+ nb_cli_enqueue_change(vty, "./desired-transmission-interval",
+ NB_OP_MODIFY, value);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+void bfd_cli_show_tx(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults)
+{
+ uint32_t value;
+
+ if (show_defaults)
+ vty_out(vty, " transmit-interval %d\n",
+ BFD_DEFDESIREDMINTX);
+ else {
+ value = yang_dnode_get_uint32(dnode, NULL);
+ vty_out(vty, " transmit-interval %" PRIu32 "\n", value / 1000);
+ }
+}
+
+DEFPY(
+ bfd_peer_echo, bfd_peer_echo_cmd,
+ "[no] echo-mode",
+ NO_STR
+ "Configure echo mode\n")
+{
+ nb_cli_enqueue_change(vty, "./echo-mode", NB_OP_MODIFY,
+ no ? "false" : "true");
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+void bfd_cli_show_echo(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults)
+{
+ if (show_defaults)
+ vty_out(vty, " no echo-mode\n");
+ else
+ vty_out(vty, " %secho-mode\n",
+ yang_dnode_get_bool(dnode, NULL) ? "" : "no ");
+}
+
+DEFPY(
+ bfd_peer_echo_interval, bfd_peer_echo_interval_cmd,
+ "echo-interval (10-60000)$interval",
+ "Configure peer echo interval\n"
+ "Configure peer echo interval value in milliseconds\n")
+{
+ char value[32];
+
+ snprintf(value, sizeof(value), "%ld", interval * 1000);
+ nb_cli_enqueue_change(vty, "./desired-echo-transmission-interval",
+ NB_OP_MODIFY, value);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+void bfd_cli_show_echo_interval(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults)
+{
+ uint32_t value;
+
+ if (show_defaults)
+ vty_out(vty, " echo-interval %d\n",
+ BFD_DEF_REQ_MIN_ECHO);
+ else {
+ value = yang_dnode_get_uint32(dnode, NULL);
+ vty_out(vty, " echo-interval %" PRIu32 "\n", value / 1000);
+ }
+}
+
+void
+bfdd_cli_init(void)
+{
+ install_element(CONFIG_NODE, &bfd_enter_cmd);
+ install_element(CONFIG_NODE, &bfd_config_reset_cmd);
+
+ install_element(BFD_NODE, &bfd_peer_enter_cmd);
+ install_element(BFD_NODE, &bfd_no_peer_cmd);
+
+ install_element(BFD_PEER_NODE, &bfd_peer_shutdown_cmd);
+ install_element(BFD_PEER_NODE, &bfd_peer_mult_cmd);
+ install_element(BFD_PEER_NODE, &bfd_peer_rx_cmd);
+ install_element(BFD_PEER_NODE, &bfd_peer_tx_cmd);
+ install_element(BFD_PEER_NODE, &bfd_peer_echo_cmd);
+ install_element(BFD_PEER_NODE, &bfd_peer_echo_interval_cmd);
+}
diff --git a/bfdd/bfdd_northbound.c b/bfdd/bfdd_northbound.c
new file mode 100644
index 0000000000..7cd2fb6b9a
--- /dev/null
+++ b/bfdd/bfdd_northbound.c
@@ -0,0 +1,1210 @@
+/*
+ * BFD daemon northbound implementation.
+ *
+ * Copyright (C) 2019 Network Device Education Foundation, Inc. ("NetDEF")
+ * Rafael Zalamena
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301 USA.
+ */
+
+#include <zebra.h>
+
+#include "lib/log.h"
+#include "lib/northbound.h"
+
+#include "bfd.h"
+
+/*
+ * Helpers.
+ */
+static void bfd_session_get_key(bool mhop, const struct lyd_node *dnode,
+ struct bfd_key *bk)
+{
+ const char *ifname = NULL, *vrfname = NULL;
+ struct sockaddr_any psa, lsa;
+
+ /* Required destination parameter. */
+ strtosa(yang_dnode_get_string(dnode, "./dest-addr"), &psa);
+
+ /* Get optional source address. */
+ memset(&lsa, 0, sizeof(lsa));
+ if (yang_dnode_exists(dnode, "./source-addr"))
+ strtosa(yang_dnode_get_string(dnode, "./source-addr"), &lsa);
+
+ /* Get optional interface and vrf names. */
+ if (yang_dnode_exists(dnode, "./interface"))
+ ifname = yang_dnode_get_string(dnode, "./interface");
+ if (yang_dnode_exists(dnode, "./vrf"))
+ vrfname = yang_dnode_get_string(dnode, "./vrf");
+
+ /* Generate the corresponding key. */
+ gen_bfd_key(bk, &psa, &lsa, mhop, ifname, vrfname);
+}
+
+static int bfd_session_create(enum nb_event event, const struct lyd_node *dnode,
+ union nb_resource *resource, bool mhop)
+{
+ struct bfd_session *bs;
+ struct bfd_key bk;
+
+ switch (event) {
+ case NB_EV_VALIDATE:
+ break;
+
+ case NB_EV_PREPARE:
+ bfd_session_get_key(mhop, dnode, &bk);
+ bs = bfd_key_lookup(bk);
+
+ /* This session was already configured by another daemon. */
+ if (bs != NULL) {
+ /* Now it is configured also by CLI. */
+ BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG);
+ bs->refcount++;
+
+ resource->ptr = bs;
+ break;
+ }
+
+ bs = bfd_session_new();
+ if (bs == NULL)
+ return NB_ERR_RESOURCE;
+
+ /* Fill the session key. */
+ bfd_session_get_key(mhop, dnode, &bs->key);
+
+ /* Set configuration flags. */
+ bs->refcount = 1;
+ BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG);
+ if (mhop)
+ BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_MH);
+ if (bs->key.family == AF_INET6)
+ BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_IPV6);
+
+ resource->ptr = bs;
+ break;
+
+ case NB_EV_APPLY:
+ bs = resource->ptr;
+
+ /* Only attempt to registrate if freshly allocated. */
+ if (bs->discrs.my_discr == 0 && bs_registrate(bs) == NULL)
+ return NB_ERR_RESOURCE;
+
+ nb_running_set_entry(dnode, bs);
+ break;
+
+ case NB_EV_ABORT:
+ bs = resource->ptr;
+ if (bs->refcount <= 1)
+ bfd_session_free(resource->ptr);
+ break;
+ }
+
+ return NB_OK;
+}
+
+static int bfd_session_destroy(enum nb_event event,
+ const struct lyd_node *dnode, bool mhop)
+{
+ struct bfd_session *bs;
+ struct bfd_key bk;
+
+ switch (event) {
+ case NB_EV_VALIDATE:
+ bfd_session_get_key(mhop, dnode, &bk);
+ if (bfd_key_lookup(bk) == NULL)
+ return NB_ERR_INCONSISTENCY;
+ break;
+
+ case NB_EV_PREPARE:
+ /* NOTHING */
+ break;
+
+ case NB_EV_APPLY:
+ bs = nb_running_unset_entry(dnode);
+ /* CLI is not using this session anymore. */
+ if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) == 0)
+ break;
+
+ BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG);
+ bs->refcount--;
+ /* There are still daemons using it. */
+ if (bs->refcount > 0)
+ break;
+
+ bfd_session_free(bs);
+ break;
+
+ case NB_EV_ABORT:
+ /* NOTHING */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd
+ */
+static int bfdd_bfd_create(enum nb_event event,
+ const struct lyd_node *dnode
+ __attribute__((__unused__)),
+ union nb_resource *resource
+ __attribute__((__unused__)))
+{
+ /* NOTHING */
+ return NB_OK;
+}
+
+static int bfdd_bfd_destroy(enum nb_event event, const struct lyd_node *dnode)
+{
+ switch (event) {
+ case NB_EV_VALIDATE:
+ /* NOTHING */
+ return NB_OK;
+
+ case NB_EV_PREPARE:
+ /* NOTHING */
+ return NB_OK;
+
+ case NB_EV_APPLY:
+ bfd_sessions_remove_manual();
+ break;
+
+ case NB_EV_ABORT:
+ /* NOTHING */
+ return NB_OK;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop
+ */
+static int bfdd_bfd_sessions_single_hop_create(enum nb_event event,
+ const struct lyd_node *dnode,
+ union nb_resource *resource)
+{
+ return bfd_session_create(event, dnode, resource, false);
+}
+
+static int bfdd_bfd_sessions_single_hop_destroy(enum nb_event event,
+ const struct lyd_node *dnode)
+{
+ return bfd_session_destroy(event, dnode, false);
+}
+
+static const void *
+bfdd_bfd_sessions_single_hop_get_next(const void *parent_list_entry
+ __attribute__((__unused__)),
+ const void *list_entry)
+{
+ return bfd_session_next(list_entry, false);
+}
+
+static int bfdd_bfd_sessions_single_hop_get_keys(const void *list_entry,
+ struct yang_list_keys *keys)
+{
+ const struct bfd_session *bs = list_entry;
+ char dstbuf[INET6_ADDRSTRLEN];
+
+ inet_ntop(bs->key.family, &bs->key.peer, dstbuf, sizeof(dstbuf));
+
+ keys->num = 3;
+ strlcpy(keys->key[0], dstbuf, sizeof(keys->key[0]));
+ strlcpy(keys->key[1], bs->key.ifname, sizeof(keys->key[1]));
+ strlcpy(keys->key[2], bs->key.vrfname, sizeof(keys->key[2]));
+
+ return NB_OK;
+}
+
+static const void *
+bfdd_bfd_sessions_single_hop_lookup_entry(const void *parent_list_entry
+ __attribute__((__unused__)),
+ const struct yang_list_keys *keys)
+{
+ const char *dest_addr = keys->key[0];
+ const char *ifname = keys->key[1];
+ const char *vrf = keys->key[2];
+ struct sockaddr_any psa, lsa;
+ struct bfd_key bk;
+
+ strtosa(dest_addr, &psa);
+ memset(&lsa, 0, sizeof(lsa));
+ gen_bfd_key(&bk, &psa, &lsa, false, ifname, vrf);
+
+ return bfd_key_lookup(bk);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/source-addr
+ */
+static int bfdd_bfd_sessions_single_hop_source_addr_modify(
+ enum nb_event event __attribute__((__unused__)),
+ const struct lyd_node *dnode __attribute__((__unused__)),
+ union nb_resource *resource __attribute__((__unused__)))
+{
+ return NB_OK;
+}
+
+static int bfdd_bfd_sessions_single_hop_source_addr_destroy(
+ enum nb_event event __attribute__((__unused__)),
+ const struct lyd_node *dnode __attribute__((__unused__)))
+{
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/detection-multiplier
+ */
+static int bfdd_bfd_sessions_single_hop_detection_multiplier_modify(
+ enum nb_event event, const struct lyd_node *dnode,
+ union nb_resource *resource __attribute__((__unused__)))
+{
+ uint8_t detection_multiplier = yang_dnode_get_uint8(dnode, NULL);
+ struct bfd_session *bs;
+
+ switch (event) {
+ case NB_EV_VALIDATE:
+ break;
+
+ case NB_EV_PREPARE:
+ /* NOTHING */
+ break;
+
+ case NB_EV_APPLY:
+ bs = nb_running_get_entry(dnode, NULL, true);
+ bs->detect_mult = detection_multiplier;
+ break;
+
+ case NB_EV_ABORT:
+ /* NOTHING */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/desired-transmission-interval
+ */
+static int bfdd_bfd_sessions_single_hop_desired_transmission_interval_modify(
+ enum nb_event event, const struct lyd_node *dnode,
+ union nb_resource *resource __attribute__((__unused__)))
+{
+ uint32_t tx_interval = yang_dnode_get_uint32(dnode, NULL);
+ struct bfd_session *bs;
+
+ switch (event) {
+ case NB_EV_VALIDATE:
+ if (tx_interval < 10000 || tx_interval > 60000000)
+ return NB_ERR_VALIDATION;
+ break;
+
+ case NB_EV_PREPARE:
+ /* NOTHING */
+ break;
+
+ case NB_EV_APPLY:
+ bs = nb_running_get_entry(dnode, NULL, true);
+ if (tx_interval == bs->timers.desired_min_tx)
+ return NB_OK;
+
+ bs->timers.desired_min_tx = tx_interval;
+ bfd_set_polling(bs);
+ break;
+
+ case NB_EV_ABORT:
+ /* NOTHING */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/required-receive-interval
+ */
+static int bfdd_bfd_sessions_single_hop_required_receive_interval_modify(
+ enum nb_event event, const struct lyd_node *dnode,
+ union nb_resource *resource __attribute__((__unused__)))
+{
+ uint32_t rx_interval = yang_dnode_get_uint32(dnode, NULL);
+ struct bfd_session *bs;
+
+ switch (event) {
+ case NB_EV_VALIDATE:
+ if (rx_interval < 10000 || rx_interval > 60000000)
+ return NB_ERR_VALIDATION;
+ break;
+
+ case NB_EV_PREPARE:
+ /* NOTHING */
+ break;
+
+ case NB_EV_APPLY:
+ bs = nb_running_get_entry(dnode, NULL, true);
+ if (rx_interval == bs->timers.required_min_rx)
+ return NB_OK;
+
+ bs->timers.required_min_rx = rx_interval;
+ bfd_set_polling(bs);
+ break;
+
+ case NB_EV_ABORT:
+ /* NOTHING */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/administrative-down
+ */
+static int bfdd_bfd_sessions_single_hop_administrative_down_modify(
+ enum nb_event event, const struct lyd_node *dnode,
+ union nb_resource *resource __attribute__((__unused__)))
+{
+ bool shutdown = yang_dnode_get_bool(dnode, NULL);
+ struct bfd_session *bs;
+
+ switch (event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ return NB_OK;
+
+ case NB_EV_APPLY:
+ break;
+
+ case NB_EV_ABORT:
+ return NB_OK;
+ }
+
+ bs = nb_running_get_entry(dnode, NULL, true);
+
+ if (shutdown == false) {
+ if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN))
+ return NB_OK;
+
+ BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN);
+
+ /* Change and notify state change. */
+ bs->ses_state = PTM_BFD_DOWN;
+ control_notify(bs);
+
+ /* Enable all timers. */
+ bfd_recvtimer_update(bs);
+ bfd_xmttimer_update(bs, bs->xmt_TO);
+ if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) {
+ bfd_echo_recvtimer_update(bs);
+ bfd_echo_xmttimer_update(bs, bs->echo_xmt_TO);
+ }
+ } else {
+ if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN))
+ return NB_OK;
+
+ BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN);
+
+ /* Disable all events. */
+ bfd_recvtimer_delete(bs);
+ bfd_echo_recvtimer_delete(bs);
+ bfd_xmttimer_delete(bs);
+ bfd_echo_xmttimer_delete(bs);
+
+ /* Change and notify state change. */
+ bs->ses_state = PTM_BFD_ADM_DOWN;
+ control_notify(bs);
+
+ ptm_bfd_snd(bs, 0);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/echo-mode
+ */
+static int bfdd_bfd_sessions_single_hop_echo_mode_modify(
+ enum nb_event event, const struct lyd_node *dnode,
+ union nb_resource *resource __attribute__((__unused__)))
+{
+ bool echo = yang_dnode_get_bool(dnode, NULL);
+ struct bfd_session *bs;
+
+ switch (event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ return NB_OK;
+
+ case NB_EV_APPLY:
+ break;
+
+ case NB_EV_ABORT:
+ return NB_OK;
+ }
+
+ bs = nb_running_get_entry(dnode, NULL, true);
+
+ if (echo == false) {
+ if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO))
+ return NB_OK;
+
+ BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO);
+ ptm_bfd_echo_stop(bs);
+ } else {
+ if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO))
+ return NB_OK;
+
+ BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO);
+ /* Apply setting immediately. */
+ if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN))
+ bs_echo_timer_handler(bs);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-bfdd:bfdd/bfd/sessions/single-hop/desired-echo-transmission-interval
+ */
+static int
+bfdd_bfd_sessions_single_hop_desired_echo_transmission_interval_modify(
+ enum nb_event event, const struct lyd_node *dnode,
+ union nb_resource *resource __attribute__((__unused__)))
+{
+ uint32_t echo_interval = yang_dnode_get_uint32(dnode, NULL);
+ struct bfd_session *bs;
+
+ switch (event) {
+ case NB_EV_VALIDATE:
+ if (echo_interval < 10000 || echo_interval > 60000000)
+ return NB_ERR_VALIDATION;
+ break;
+
+ case NB_EV_PREPARE:
+ /* NOTHING */
+ break;
+
+ case NB_EV_APPLY:
+ bs = nb_running_get_entry(dnode, NULL, true);
+ if (echo_interval == bs->timers.required_min_echo)
+ return NB_OK;
+
+ bs->timers.required_min_echo = echo_interval;
+ break;
+
+ case NB_EV_ABORT:
+ /* NOTHING */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/local-discriminator
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_local_discriminator_get_elem(
+ const char *xpath, const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ return yang_data_new_uint32(xpath, bs->discrs.my_discr);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/local-state
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_local_state_get_elem(const char *xpath,
+ const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ return yang_data_new_enum(xpath, bs->ses_state);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/local-diagnostic
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_local_diagnostic_get_elem(
+ const char *xpath, const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ return yang_data_new_enum(xpath, bs->local_diag);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/local-multiplier
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_local_multiplier_get_elem(
+ const char *xpath, const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ return yang_data_new_int8(xpath, bs->detect_mult);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/remote-discriminator
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_remote_discriminator_get_elem(
+ const char *xpath, const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ if (bs->discrs.remote_discr == 0)
+ return NULL;
+
+ return yang_data_new_uint32(xpath, bs->discrs.remote_discr);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/remote-state
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_remote_state_get_elem(const char *xpath,
+ const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ return yang_data_new_enum(xpath, bs->ses_state);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/remote-diagnostic
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_remote_diagnostic_get_elem(
+ const char *xpath, const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ return yang_data_new_enum(xpath, bs->remote_diag);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/remote-multiplier
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_remote_multiplier_get_elem(
+ const char *xpath, const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ return yang_data_new_int8(xpath, bs->remote_detect_mult);
+}
+
+/*
+ * XPath:
+ * /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/negotiated-transmission-interval
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_negotiated_transmission_interval_get_elem(
+ const char *xpath, const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ return yang_data_new_uint32(xpath, bs->remote_timers.desired_min_tx);
+}
+
+/*
+ * XPath:
+ * /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/negotiated-receive-interval
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_negotiated_receive_interval_get_elem(
+ const char *xpath, const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ return yang_data_new_uint32(xpath, bs->remote_timers.required_min_rx);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/detection-mode
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_detection_mode_get_elem(
+ const char *xpath, const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+ int detection_mode;
+
+ /*
+ * Detection mode:
+ * 1. Async with echo
+ * 2. Async without echo
+ * 3. Demand with echo
+ * 4. Demand without echo
+ *
+ * TODO: support demand mode.
+ */
+ if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO))
+ detection_mode = 1;
+ else
+ detection_mode = 2;
+
+ return yang_data_new_enum(xpath, detection_mode);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/last-down-time
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_last_down_time_get_elem(
+ const char *xpath __attribute__((__unused__)),
+ const void *list_entry __attribute__((__unused__)))
+{
+ /*
+ * TODO: implement me.
+ *
+ * No yang support for time elements yet.
+ */
+ return NULL;
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/last-up-time
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_last_up_time_get_elem(
+ const char *xpath __attribute__((__unused__)),
+ const void *list_entry __attribute__((__unused__)))
+{
+ /*
+ * TODO: implement me.
+ *
+ * No yang support for time elements yet.
+ */
+ return NULL;
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/session-down-count
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_session_down_count_get_elem(
+ const char *xpath, const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ return yang_data_new_uint64(xpath, bs->stats.session_down);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/session-up-count
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_session_up_count_get_elem(
+ const char *xpath, const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ return yang_data_new_uint64(xpath, bs->stats.session_up);
+}
+
+/*
+ * XPath:
+ * /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/control-packet-input-count
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_control_packet_input_count_get_elem(
+ const char *xpath, const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ return yang_data_new_uint64(xpath, bs->stats.rx_ctrl_pkt);
+}
+
+/*
+ * XPath:
+ * /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/control-packet-output-count
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_control_packet_output_count_get_elem(
+ const char *xpath, const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ return yang_data_new_uint64(xpath, bs->stats.tx_ctrl_pkt);
+}
+
+/*
+ * XPath:
+ * /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/negotiated-echo-transmission-interval
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_negotiated_echo_transmission_interval_get_elem(
+ const char *xpath, const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ return yang_data_new_uint32(xpath, bs->remote_timers.required_min_echo);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/echo-packet-input-count
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_echo_packet_input_count_get_elem(
+ const char *xpath, const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ return yang_data_new_uint64(xpath, bs->stats.rx_echo_pkt);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/stats/echo-packet-output-count
+ */
+static struct yang_data *
+bfdd_bfd_sessions_single_hop_stats_echo_packet_output_count_get_elem(
+ const char *xpath, const void *list_entry)
+{
+ const struct bfd_session *bs = list_entry;
+
+ return yang_data_new_uint64(xpath, bs->stats.tx_echo_pkt);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/multi-hop
+ */
+static int bfdd_bfd_sessions_multi_hop_create(enum nb_event event,
+ const struct lyd_node *dnode,
+ union nb_resource *resource)
+{
+ return bfd_session_create(event, dnode, resource, true);
+}
+
+static int bfdd_bfd_sessions_multi_hop_destroy(enum nb_event event,
+ const struct lyd_node *dnode)
+{
+ return bfd_session_destroy(event, dnode, true);
+}
+
+static const void *
+bfdd_bfd_sessions_multi_hop_get_next(const void *parent_list_entry
+ __attribute__((__unused__)),
+ const void *list_entry)
+{
+ return bfd_session_next(list_entry, true);
+}
+
+static int bfdd_bfd_sessions_multi_hop_get_keys(const void *list_entry,
+ struct yang_list_keys *keys)
+{
+ const struct bfd_session *bs = list_entry;
+ char dstbuf[INET6_ADDRSTRLEN], srcbuf[INET6_ADDRSTRLEN];
+
+ inet_ntop(bs->key.family, &bs->key.peer, dstbuf, sizeof(dstbuf));
+ inet_ntop(bs->key.family, &bs->key.local, srcbuf, sizeof(srcbuf));
+
+ keys->num = 4;
+ strlcpy(keys->key[0], srcbuf, sizeof(keys->key[0]));
+ strlcpy(keys->key[1], dstbuf, sizeof(keys->key[1]));
+ strlcpy(keys->key[2], bs->key.ifname, sizeof(keys->key[2]));
+ strlcpy(keys->key[3], bs->key.vrfname, sizeof(keys->key[3]));
+
+ return NB_OK;
+}
+
+static const void *
+bfdd_bfd_sessions_multi_hop_lookup_entry(const void *parent_list_entry
+ __attribute__((__unused__)),
+ const struct yang_list_keys *keys)
+{
+ const char *source_addr = keys->key[0];
+ const char *dest_addr = keys->key[1];
+ const char *ifname = keys->key[2];
+ const char *vrf = keys->key[3];
+ struct sockaddr_any psa, lsa;
+ struct bfd_key bk;
+
+ strtosa(dest_addr, &psa);
+ strtosa(source_addr, &lsa);
+ gen_bfd_key(&bk, &psa, &lsa, true, ifname, vrf);
+
+ return bfd_key_lookup(bk);
+}
+
+/* clang-format off */
+const struct frr_yang_module_info frr_bfdd_info = {
+ .name = "frr-bfdd",
+ .nodes = {
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd",
+ .cbs = {
+ .create = bfdd_bfd_create,
+ .destroy = bfdd_bfd_destroy,
+ .cli_show = bfd_cli_show_header,
+ .cli_show_end = bfd_cli_show_header_end,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop",
+ .cbs = {
+ .create = bfdd_bfd_sessions_single_hop_create,
+ .destroy = bfdd_bfd_sessions_single_hop_destroy,
+ .get_next = bfdd_bfd_sessions_single_hop_get_next,
+ .get_keys = bfdd_bfd_sessions_single_hop_get_keys,
+ .lookup_entry = bfdd_bfd_sessions_single_hop_lookup_entry,
+ .cli_show = bfd_cli_show_single_hop_peer,
+ .cli_show_end = bfd_cli_show_peer_end,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/source-addr",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_source_addr_modify,
+ .destroy = bfdd_bfd_sessions_single_hop_source_addr_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/detection-multiplier",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_detection_multiplier_modify,
+ .cli_show = bfd_cli_show_mult,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/desired-transmission-interval",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_desired_transmission_interval_modify,
+ .cli_show = bfd_cli_show_tx,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/required-receive-interval",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_required_receive_interval_modify,
+ .cli_show = bfd_cli_show_rx,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/administrative-down",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_administrative_down_modify,
+ .cli_show = bfd_cli_show_shutdown,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/echo-mode",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_echo_mode_modify,
+ .cli_show = bfd_cli_show_echo,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/desired-echo-transmission-interval",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_desired_echo_transmission_interval_modify,
+ .cli_show = bfd_cli_show_echo_interval,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/local-discriminator",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_local_discriminator_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/local-state",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_local_state_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/local-diagnostic",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_local_diagnostic_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/local-multiplier",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_local_multiplier_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/remote-discriminator",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_remote_discriminator_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/remote-state",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_remote_state_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/remote-diagnostic",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_remote_diagnostic_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/remote-multiplier",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_remote_multiplier_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/negotiated-transmission-interval",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_negotiated_transmission_interval_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/negotiated-receive-interval",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_negotiated_receive_interval_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/detection-mode",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_detection_mode_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/last-down-time",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_last_down_time_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/last-up-time",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_last_up_time_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/session-down-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_session_down_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/session-up-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_session_up_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/control-packet-input-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_control_packet_input_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/control-packet-output-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_control_packet_output_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/negotiated-echo-transmission-interval",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_negotiated_echo_transmission_interval_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/echo-packet-input-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_echo_packet_input_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/stats/echo-packet-output-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_echo_packet_output_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop",
+ .cbs = {
+ .create = bfdd_bfd_sessions_multi_hop_create,
+ .destroy = bfdd_bfd_sessions_multi_hop_destroy,
+ .get_next = bfdd_bfd_sessions_multi_hop_get_next,
+ .get_keys = bfdd_bfd_sessions_multi_hop_get_keys,
+ .lookup_entry = bfdd_bfd_sessions_multi_hop_lookup_entry,
+ .cli_show = bfd_cli_show_multi_hop_peer,
+ .cli_show_end = bfd_cli_show_peer_end,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/detection-multiplier",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_detection_multiplier_modify,
+ .cli_show = bfd_cli_show_mult,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/desired-transmission-interval",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_desired_transmission_interval_modify,
+ .cli_show = bfd_cli_show_tx,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/required-receive-interval",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_required_receive_interval_modify,
+ .cli_show = bfd_cli_show_rx,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/administrative-down",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_administrative_down_modify,
+ .cli_show = bfd_cli_show_shutdown,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/local-discriminator",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_local_discriminator_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/local-state",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_local_state_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/local-diagnostic",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_local_diagnostic_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/local-multiplier",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_local_multiplier_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/remote-discriminator",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_remote_discriminator_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/remote-state",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_remote_state_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/remote-diagnostic",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_remote_diagnostic_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/remote-multiplier",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_remote_multiplier_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/negotiated-transmission-interval",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_negotiated_transmission_interval_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/negotiated-receive-interval",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_negotiated_receive_interval_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/detection-mode",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_detection_mode_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/last-down-time",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_last_down_time_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/last-up-time",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_last_up_time_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/session-down-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_session_down_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/session-up-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_session_up_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/control-packet-input-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_control_packet_input_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/control-packet-output-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_control_packet_output_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/negotiated-echo-transmission-interval",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_negotiated_echo_transmission_interval_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/echo-packet-input-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_echo_packet_input_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/stats/echo-packet-output-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_echo_packet_output_count_get_elem,
+ }
+ },
+ {
+ .xpath = NULL,
+ },
+ }
+};
diff --git a/bfdd/bfdd_vty.c b/bfdd/bfdd_vty.c
index 75f6632db0..a211f34219 100644
--- a/bfdd/bfdd_vty.c
+++ b/bfdd/bfdd_vty.c
@@ -23,6 +23,7 @@
#include "lib/command.h"
#include "lib/json.h"
#include "lib/log.h"
+#include "lib/northbound_cli.h"
#include "lib/vty.h"
#include "bfd.h"
@@ -34,8 +35,6 @@
/*
* Commands help string definitions.
*/
-#define PEER_STR "Configure peer\n"
-#define INTERFACE_NAME_STR "Configure interface name to use\n"
#define PEER_IPV4_STR "IPv4 peer address\n"
#define PEER_IPV6_STR "IPv6 peer address\n"
#define MHOP_STR "Configure multihop\n"
@@ -43,16 +42,10 @@
#define LOCAL_IPV4_STR "IPv4 local address\n"
#define LOCAL_IPV6_STR "IPv6 local address\n"
#define LOCAL_INTF_STR "Configure local interface name to use\n"
-#define VRF_STR "Configure VRF\n"
-#define VRF_NAME_STR "Configure VRF name\n"
/*
* Prototypes
*/
-static int bfdd_write_config(struct vty *vty);
-static int bfdd_peer_write_config(struct vty *vty);
-static void _bfdd_peer_write_config(struct vty *vty, struct bfd_session *bs);
-static void _bfdd_peer_write_config_iter(struct hash_bucket *hb, void *arg);
static int bfd_configure_peer(struct bfd_peer_cfg *bpc, bool mhop,
const struct sockaddr_any *peer,
const struct sockaddr_any *local,
@@ -79,286 +72,6 @@ _find_peer_or_error(struct vty *vty, int argc, struct cmd_token **argv,
const char *local_str, const char *ifname,
const char *vrfname);
-/*
- * Commands definition.
- */
-DEFUN_NOSH(bfd_enter, bfd_enter_cmd, "bfd", "Configure BFD peers\n")
-{
- vty->node = BFD_NODE;
- return CMD_SUCCESS;
-}
-
-DEFUN_NOSH(
- bfd_peer_enter, bfd_peer_enter_cmd,
- "peer <A.B.C.D|X:X::X:X> [{multihop|local-address <A.B.C.D|X:X::X:X>|interface IFNAME|vrf NAME}]",
- PEER_STR PEER_IPV4_STR PEER_IPV6_STR
- MHOP_STR
- LOCAL_STR LOCAL_IPV4_STR LOCAL_IPV6_STR
- INTERFACE_STR
- LOCAL_INTF_STR
- VRF_STR VRF_NAME_STR)
-{
- bool mhop;
- int idx;
- struct bfd_session *bs;
- const char *peer, *ifname, *local, *vrfname;
- struct bfd_peer_cfg bpc;
- struct sockaddr_any psa, lsa, *lsap;
- char errormsg[128];
-
- vrfname = peer = ifname = local = NULL;
-
- /* Gather all provided information. */
- peer = argv[1]->arg;
-
- idx = 0;
- mhop = argv_find(argv, argc, "multihop", &idx);
-
- idx = 0;
- if (argv_find(argv, argc, "interface", &idx))
- ifname = argv[idx + 1]->arg;
-
- idx = 0;
- if (argv_find(argv, argc, "local-address", &idx))
- local = argv[idx + 1]->arg;
-
- idx = 0;
- if (argv_find(argv, argc, "vrf", &idx))
- vrfname = argv[idx + 1]->arg;
-
- strtosa(peer, &psa);
- if (local) {
- strtosa(local, &lsa);
- lsap = &lsa;
- } else
- lsap = NULL;
-
- if (bfd_configure_peer(&bpc, mhop, &psa, lsap, ifname, vrfname,
- errormsg, sizeof(errormsg))
- != 0) {
- vty_out(vty, "%% Invalid peer configuration: %s\n", errormsg);
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- bs = bs_peer_find(&bpc);
- if (bs == NULL) {
- bs = ptm_bfd_sess_new(&bpc);
- if (bs == NULL) {
- vty_out(vty, "%% Failed to add peer.\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
- }
-
- if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG)) {
- if (bs->refcount)
- vty_out(vty, "%% session peer is now configurable via bfd daemon.\n");
- BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG);
- }
-
- VTY_PUSH_CONTEXT(BFD_PEER_NODE, bs);
-
- return CMD_SUCCESS;
-}
-
-DEFPY(bfd_peer_detectmultiplier, bfd_peer_detectmultiplier_cmd,
- "detect-multiplier (2-255)$multiplier",
- "Configure peer detection multiplier\n"
- "Configure peer detection multiplier value\n")
-{
- struct bfd_session *bs;
-
- bs = VTY_GET_CONTEXT(bfd_session);
- if (bs->detect_mult == multiplier)
- return CMD_SUCCESS;
-
- bs->detect_mult = multiplier;
-
- return CMD_SUCCESS;
-}
-
-DEFPY(bfd_peer_recvinterval, bfd_peer_recvinterval_cmd,
- "receive-interval (10-60000)$interval",
- "Configure peer receive interval\n"
- "Configure peer receive interval value in milliseconds\n")
-{
- struct bfd_session *bs;
-
- bs = VTY_GET_CONTEXT(bfd_session);
- if (bs->timers.required_min_rx == (uint32_t)(interval * 1000))
- return CMD_SUCCESS;
-
- bs->timers.required_min_rx = interval * 1000;
- bfd_set_polling(bs);
-
- return CMD_SUCCESS;
-}
-
-DEFPY(bfd_peer_txinterval, bfd_peer_txinterval_cmd,
- "transmit-interval (10-60000)$interval",
- "Configure peer transmit interval\n"
- "Configure peer transmit interval value in milliseconds\n")
-{
- struct bfd_session *bs;
-
- bs = VTY_GET_CONTEXT(bfd_session);
- if (bs->timers.desired_min_tx == (uint32_t)(interval * 1000))
- return CMD_SUCCESS;
-
- bs->timers.desired_min_tx = interval * 1000;
- bfd_set_polling(bs);
-
- return CMD_SUCCESS;
-}
-
-DEFPY(bfd_peer_echointerval, bfd_peer_echointerval_cmd,
- "echo-interval (10-60000)$interval",
- "Configure peer echo interval\n"
- "Configure peer echo interval value in milliseconds\n")
-{
- struct bfd_session *bs;
-
- bs = VTY_GET_CONTEXT(bfd_session);
- if (bs->timers.required_min_echo == (uint32_t)(interval * 1000))
- return CMD_SUCCESS;
-
- bs->timers.required_min_echo = interval * 1000;
-
- return CMD_SUCCESS;
-}
-
-DEFPY(bfd_peer_shutdown, bfd_peer_shutdown_cmd, "[no] shutdown",
- NO_STR "Disable BFD peer")
-{
- struct bfd_session *bs;
-
- bs = VTY_GET_CONTEXT(bfd_session);
- if (no) {
- if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN))
- return CMD_SUCCESS;
-
- BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN);
-
- /* Change and notify state change. */
- bs->ses_state = PTM_BFD_DOWN;
- control_notify(bs);
-
- /* Enable all timers. */
- bfd_recvtimer_update(bs);
- bfd_xmttimer_update(bs, bs->xmt_TO);
- if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) {
- bfd_echo_recvtimer_update(bs);
- bfd_echo_xmttimer_update(bs, bs->echo_xmt_TO);
- }
- } else {
- if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN))
- return CMD_SUCCESS;
-
- BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN);
-
- /* Disable all events. */
- bfd_recvtimer_delete(bs);
- bfd_echo_recvtimer_delete(bs);
- bfd_xmttimer_delete(bs);
- bfd_echo_xmttimer_delete(bs);
-
- /* Change and notify state change. */
- bs->ses_state = PTM_BFD_ADM_DOWN;
- control_notify(bs);
-
- ptm_bfd_snd(bs, 0);
- }
-
- return CMD_SUCCESS;
-}
-
-DEFPY(bfd_peer_echo, bfd_peer_echo_cmd, "[no] echo-mode",
- NO_STR "Configure echo mode\n")
-{
- struct bfd_session *bs;
-
- bs = VTY_GET_CONTEXT(bfd_session);
- if (no) {
- if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO))
- return CMD_SUCCESS;
-
- BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO);
- ptm_bfd_echo_stop(bs);
- } else {
- if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO))
- return CMD_SUCCESS;
-
- BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO);
- /* Apply setting immediately. */
- if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN))
- bs_echo_timer_handler(bs);
- }
-
- return CMD_SUCCESS;
-}
-
-DEFPY(bfd_peer_label, bfd_peer_label_cmd, "label WORD$label",
- "Register peer label\n"
- "Register peer label identification\n")
-{
- struct bfd_session *bs;
-
- /* Validate label length. */
- if (strlen(label) >= MAXNAMELEN) {
- vty_out(vty, "%% Label name is too long\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- bs = VTY_GET_CONTEXT(bfd_session);
- if (bfd_session_update_label(bs, label) == -1) {
- vty_out(vty, "%% Failed to update peer label.\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- return CMD_SUCCESS;
-}
-
-DEFPY(bfd_no_peer, bfd_no_peer_cmd,
- "no peer <A.B.C.D|X:X::X:X>$peer [{multihop|local-address <A.B.C.D|X:X::X:X>$local|interface IFNAME$ifname|vrf NAME$vrfname}]",
- NO_STR
- PEER_STR PEER_IPV4_STR PEER_IPV6_STR
- MHOP_STR
- LOCAL_STR LOCAL_IPV4_STR LOCAL_IPV6_STR
- INTERFACE_STR
- LOCAL_INTF_STR
- VRF_STR VRF_NAME_STR)
-{
- int idx;
- bool mhop;
- struct bfd_peer_cfg bpc;
- struct sockaddr_any psa, lsa, *lsap;
- char errormsg[128];
-
- strtosa(peer_str, &psa);
- if (local) {
- strtosa(local_str, &lsa);
- lsap = &lsa;
- } else {
- lsap = NULL;
- }
-
- idx = 0;
- mhop = argv_find(argv, argc, "multihop", &idx);
-
- if (bfd_configure_peer(&bpc, mhop, &psa, lsap, ifname, vrfname,
- errormsg, sizeof(errormsg))
- != 0) {
- vty_out(vty, "%% Invalid peer configuration: %s\n", errormsg);
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- if (ptm_bfd_sess_del(&bpc) != 0) {
- vty_out(vty, "%% Failed to remove peer.\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- return CMD_SUCCESS;
-}
-
/*
* Show commands helper functions
@@ -956,12 +669,6 @@ static int bfd_configure_peer(struct bfd_peer_cfg *bpc, bool mhop,
/* Handle interface specification configuration. */
if (ifname) {
- if (bpc->bpc_mhop) {
- snprintf(ebuf, ebuflen,
- "multihop doesn't accept interface names");
- return -1;
- }
-
bpc->bpc_has_localif = true;
if (strlcpy(bpc->bpc_localif, ifname, sizeof(bpc->bpc_localif))
> sizeof(bpc->bpc_localif)) {
@@ -982,60 +689,6 @@ static int bfd_configure_peer(struct bfd_peer_cfg *bpc, bool mhop,
return 0;
}
-static int bfdd_write_config(struct vty *vty)
-{
- vty_out(vty, "bfd\n");
- vty_out(vty, "!\n");
- return 0;
-}
-
-static void _bfdd_peer_write_config(struct vty *vty, struct bfd_session *bs)
-{
- char addr_buf[INET6_ADDRSTRLEN];
-
- vty_out(vty, " peer %s",
- inet_ntop(bs->key.family, &bs->key.peer, addr_buf,
- sizeof(addr_buf)));
-
- if (bs->key.mhop)
- vty_out(vty, " multihop");
-
- if (memcmp(&bs->key.local, &zero_addr, sizeof(bs->key.local)))
- vty_out(vty, " local-address %s",
- inet_ntop(bs->key.family, &bs->key.local, addr_buf,
- sizeof(addr_buf)));
-
- if (bs->key.vrfname[0])
- vty_out(vty, " vrf %s", bs->key.vrfname);
- if (bs->key.ifname[0])
- vty_out(vty, " interface %s", bs->key.ifname);
- vty_out(vty, "\n");
-
- if (bs->sock == -1)
- vty_out(vty,
- " ! vrf, interface or local-address doesn't exist\n");
-
- if (bs->detect_mult != BPC_DEF_DETECTMULTIPLIER)
- vty_out(vty, " detect-multiplier %d\n", bs->detect_mult);
- if (bs->timers.required_min_rx != (BPC_DEF_RECEIVEINTERVAL * 1000))
- vty_out(vty, " receive-interval %" PRIu32 "\n",
- bs->timers.required_min_rx / 1000);
- if (bs->timers.desired_min_tx != (BPC_DEF_TRANSMITINTERVAL * 1000))
- vty_out(vty, " transmit-interval %" PRIu32 "\n",
- bs->timers.desired_min_tx / 1000);
- if (bs->timers.required_min_echo != (BPC_DEF_ECHOINTERVAL * 1000))
- vty_out(vty, " echo-interval %" PRIu32 "\n",
- bs->timers.required_min_echo / 1000);
- if (bs->pl)
- vty_out(vty, " label %s\n", bs->pl->pl_label);
- if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO))
- vty_out(vty, " echo-mode\n");
-
- vty_out(vty, " %sshutdown\n",
- BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) ? "" : "no ");
-
- vty_out(vty, " !\n");
-}
DEFUN_NOSH(show_debugging_bfd,
show_debugging_bfd_cmd,
@@ -1049,24 +702,6 @@ DEFUN_NOSH(show_debugging_bfd,
return CMD_SUCCESS;
}
-static void _bfdd_peer_write_config_iter(struct hash_bucket *hb, void *arg)
-{
- struct vty *vty = arg;
- struct bfd_session *bs = hb->data;
-
- if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG))
- return;
-
- _bfdd_peer_write_config(vty, bs);
-}
-
-static int bfdd_peer_write_config(struct vty *vty)
-{
- bfd_id_iterate(_bfdd_peer_write_config_iter, vty);
-
- return 1;
-}
-
struct cmd_node bfd_node = {
BFD_NODE,
"%s(config-bfd)# ",
@@ -1079,29 +714,35 @@ struct cmd_node bfd_peer_node = {
1,
};
+static int bfdd_write_config(struct vty *vty)
+{
+ struct lyd_node *dnode;
+ int written = 0;
+
+ dnode = yang_dnode_get(running_config->dnode, "/frr-bfdd:bfdd");
+ if (dnode) {
+ nb_cli_show_dnode_cmds(vty, dnode, false);
+ written = 1;
+ }
+
+ return written;
+}
+
void bfdd_vty_init(void)
{
install_element(ENABLE_NODE, &bfd_show_peers_counters_cmd);
install_element(ENABLE_NODE, &bfd_show_peer_counters_cmd);
install_element(ENABLE_NODE, &bfd_show_peers_cmd);
install_element(ENABLE_NODE, &bfd_show_peer_cmd);
- install_element(CONFIG_NODE, &bfd_enter_cmd);
install_element(ENABLE_NODE, &show_debugging_bfd_cmd);
/* Install BFD node and commands. */
install_node(&bfd_node, bfdd_write_config);
install_default(BFD_NODE);
- install_element(BFD_NODE, &bfd_peer_enter_cmd);
- install_element(BFD_NODE, &bfd_no_peer_cmd);
/* Install BFD peer node. */
- install_node(&bfd_peer_node, bfdd_peer_write_config);
+ install_node(&bfd_peer_node, NULL);
install_default(BFD_PEER_NODE);
- install_element(BFD_PEER_NODE, &bfd_peer_detectmultiplier_cmd);
- install_element(BFD_PEER_NODE, &bfd_peer_recvinterval_cmd);
- install_element(BFD_PEER_NODE, &bfd_peer_txinterval_cmd);
- install_element(BFD_PEER_NODE, &bfd_peer_echointerval_cmd);
- install_element(BFD_PEER_NODE, &bfd_peer_shutdown_cmd);
- install_element(BFD_PEER_NODE, &bfd_peer_echo_cmd);
- install_element(BFD_PEER_NODE, &bfd_peer_label_cmd);
+
+ bfdd_cli_init();
}
diff --git a/bfdd/subdir.am b/bfdd/subdir.am
index e88b982ec3..5e3c3d4765 100644
--- a/bfdd/subdir.am
+++ b/bfdd/subdir.am
@@ -7,12 +7,15 @@ noinst_LIBRARIES += bfdd/libbfd.a
sbin_PROGRAMS += bfdd/bfdd
dist_examples_DATA += bfdd/bfdd.conf.sample
vtysh_scan += $(top_srcdir)/bfdd/bfdd_vty.c
+vtysh_scan += $(top_srcdir)/bfdd/bfdd_cli.c
man8 += $(MANBUILD)/bfdd.8
endif
bfdd_libbfd_a_SOURCES = \
bfdd/bfd.c \
+ bfdd/bfdd_northbound.c \
bfdd/bfdd_vty.c \
+ bfdd/bfdd_cli.c \
bfdd/bfd_packet.c \
bfdd/config.c \
bfdd/control.c \
@@ -24,10 +27,17 @@ bfdd_libbfd_a_SOURCES = \
bfdd/bfdd_vty_clippy.c: $(CLIPPY_DEPS)
bfdd/bfdd_vty.$(OBJEXT): bfdd/bfdd_vty_clippy.c
+bfdd/bfdd_cli_clippy.c: $(CLIPPY_DEPS)
+bfdd/bfdd_cli.$(OBJEXT): bfdd/bfdd_cli_clippy.c
+
noinst_HEADERS += \
bfdd/bfdctl.h \
bfdd/bfd.h \
# end
+nodist_bfdd_bfdd_SOURCES = \
+ yang/frr-bfdd.yang.c \
+ # end
+
bfdd_bfdd_SOURCES = bfdd/bfdd.c
bfdd_bfdd_LDADD = bfdd/libbfd.a lib/libfrr.la
diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c
index 44e9375dc9..a22082c072 100644
--- a/bgpd/bgp_evpn_vty.c
+++ b/bgpd/bgp_evpn_vty.c
@@ -2480,7 +2480,9 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type,
bgp_evpn_show_route_header(vty, bgp,
tbl_ver,
json);
- vty_out(vty, "%19s Extended Community\n"
+ if (!json)
+ vty_out(vty,
+ "%19s Extended Community\n"
, " ");
header = 0;
}
diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c
index 8e18ed7529..1dadf00e8f 100644
--- a/bgpd/bgp_network.c
+++ b/bgpd/bgp_network.c
@@ -440,6 +440,17 @@ static int bgp_accept(struct thread *thread)
return -1;
}
+ /* Check whether max prefix restart timer is set for the peer */
+ if (peer1->t_pmax_restart) {
+ if (bgp_debug_neighbor_events(peer1))
+ zlog_debug(
+ "%s - incoming conn rejected - "
+ "peer max prefix timer is active",
+ peer1->host);
+ close(bgp_sock);
+ return -1;
+ }
+
if (bgp_debug_neighbor_events(peer1))
zlog_debug("[Event] BGP connection from host %s fd %d",
inet_sutop(&su, buf), bgp_sock);
diff --git a/bgpd/bgp_pbr.c b/bgpd/bgp_pbr.c
index 5eef6ac6cc..2d50d1c9ea 100644
--- a/bgpd/bgp_pbr.c
+++ b/bgpd/bgp_pbr.c
@@ -698,6 +698,7 @@ int bgp_pbr_build_and_validate_entry(struct prefix *p,
int valid_prefix = 0;
afi_t afi = AFI_IP;
struct bgp_pbr_entry_action *api_action_redirect_ip = NULL;
+ bool discard_action_found = false;
/* extract match from flowspec entries */
ret = bgp_flowspec_match_rules_fill((uint8_t *)p->u.prefix_flowspec.ptr,
@@ -805,10 +806,22 @@ int bgp_pbr_build_and_validate_entry(struct prefix *p,
api_action);
if (ret != 0)
continue;
+ if ((api_action->action == ACTION_TRAFFICRATE) &&
+ api->actions[i].u.r.rate == 0)
+ discard_action_found = true;
}
api->action_num++;
}
}
+ /* if ECOMMUNITY_TRAFFIC_RATE = 0 as action
+ * then reduce the API action list to that action
+ */
+ if (api->action_num > 1 && discard_action_found) {
+ api->action_num = 1;
+ memset(&api->actions[0], 0,
+ sizeof(struct bgp_pbr_entry_action));
+ api->actions[0].action = ACTION_TRAFFICRATE;
+ }
/* validate if incoming matc/action is compatible
* with our policy routing engine
@@ -977,6 +990,7 @@ uint32_t bgp_pbr_match_hash_key(const void *arg)
key = jhash(&pbm->tcp_mask_flags, 2, key);
key = jhash(&pbm->dscp_value, 1, key);
key = jhash(&pbm->fragment, 1, key);
+ key = jhash(&pbm->protocol, 1, key);
return jhash_1word(pbm->type, key);
}
@@ -1016,6 +1030,9 @@ bool bgp_pbr_match_hash_equal(const void *arg1, const void *arg2)
if (r1->fragment != r2->fragment)
return false;
+
+ if (r1->protocol != r2->protocol)
+ return false;
return true;
}
@@ -2162,6 +2179,10 @@ static void bgp_pbr_policyroute_add_to_zebra_unit(struct bgp *bgp,
temp.flags |= MATCH_FRAGMENT_INVERSE_SET;
temp.fragment = bpf->fragment->val;
}
+ if (bpf->protocol) {
+ temp.protocol = bpf->protocol;
+ temp.flags |= MATCH_PROTOCOL_SET;
+ }
temp.action = bpa;
bpm = hash_get(bgp->pbr_match_hash, &temp,
bgp_pbr_match_alloc_intern);
diff --git a/bgpd/bgp_pbr.h b/bgpd/bgp_pbr.h
index b368d8892d..393b08da48 100644
--- a/bgpd/bgp_pbr.h
+++ b/bgpd/bgp_pbr.h
@@ -186,6 +186,7 @@ struct bgp_pbr_match {
uint16_t tcp_mask_flags;
uint8_t dscp_value;
uint8_t fragment;
+ uint8_t protocol;
vrf_id_t vrf_id;
diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c
index e43f9486f6..1f90fa742a 100644
--- a/bgpd/bgp_routemap.c
+++ b/bgpd/bgp_routemap.c
@@ -60,6 +60,8 @@
#include "bgpd/bgp_evpn_private.h"
#include "bgpd/bgp_evpn_vty.h"
#include "bgpd/bgp_mplsvpn.h"
+#include "bgpd/bgp_pbr.h"
+#include "bgpd/bgp_flowspec_util.h"
#if ENABLE_BGP_VNC
#include "bgpd/rfapi/bgp_rfapi_cfg.h"
@@ -569,24 +571,67 @@ struct route_map_rule_cmd route_match_ip_route_source_cmd = {
"ip route-source", route_match_ip_route_source,
route_match_ip_route_source_compile, route_match_ip_route_source_free};
-/* `match ip address prefix-list PREFIX_LIST' */
+static route_map_result_t route_match_prefix_list_flowspec(afi_t afi,
+ struct prefix_list *plist,
+ const struct prefix *p)
+{
+ int ret;
+ struct bgp_pbr_entry_main api;
+
+ memset(&api, 0, sizeof(api));
+
+ /* extract match from flowspec entries */
+ ret = bgp_flowspec_match_rules_fill(
+ (uint8_t *)p->u.prefix_flowspec.ptr,
+ p->u.prefix_flowspec.prefixlen, &api);
+ if (ret < 0)
+ return RMAP_NOMATCH;
+ if (api.match_bitmask & PREFIX_DST_PRESENT ||
+ api.match_bitmask_iprule & PREFIX_DST_PRESENT) {
+ if (family2afi((&api.dst_prefix)->family) != afi)
+ return RMAP_NOMATCH;
+ return prefix_list_apply(plist, &api.dst_prefix) == PREFIX_DENY
+ ? RMAP_NOMATCH
+ : RMAP_MATCH;
+ } else if (api.match_bitmask & PREFIX_SRC_PRESENT ||
+ api.match_bitmask_iprule & PREFIX_SRC_PRESENT) {
+ if (family2afi((&api.src_prefix)->family) != afi)
+ return RMAP_NOMATCH;
+ return (prefix_list_apply(plist, &api.src_prefix) == PREFIX_DENY
+ ? RMAP_NOMATCH
+ : RMAP_MATCH);
+ }
+ return RMAP_NOMATCH;
+}
+/* `match ip address prefix-list PREFIX_LIST' */
static route_map_result_t
-route_match_ip_address_prefix_list(void *rule, const struct prefix *prefix,
- route_map_object_t type, void *object)
+route_match_address_prefix_list(void *rule, afi_t afi,
+ const struct prefix *prefix,
+ route_map_object_t type, void *object)
{
struct prefix_list *plist;
- if (type == RMAP_BGP && prefix->family == AF_INET) {
- plist = prefix_list_lookup(AFI_IP, (char *)rule);
- if (plist == NULL)
- return RMAP_NOMATCH;
+ if (type != RMAP_BGP)
+ return RMAP_NOMATCH;
- return (prefix_list_apply(plist, prefix) == PREFIX_DENY
- ? RMAP_NOMATCH
- : RMAP_MATCH);
- }
- return RMAP_NOMATCH;
+ plist = prefix_list_lookup(afi, (char *)rule);
+ if (plist == NULL)
+ return RMAP_NOMATCH;
+
+ if (prefix->family == AF_FLOWSPEC)
+ return route_match_prefix_list_flowspec(afi, plist,
+ prefix);
+ return (prefix_list_apply(plist, prefix) == PREFIX_DENY ? RMAP_NOMATCH
+ : RMAP_MATCH);
+}
+
+static route_map_result_t
+route_match_ip_address_prefix_list(void *rule, const struct prefix *prefix,
+ route_map_object_t type, void *object)
+{
+ return route_match_address_prefix_list(rule, AFI_IP, prefix, type,
+ object);
}
static void *route_match_ip_address_prefix_list_compile(const char *arg)
@@ -2540,18 +2585,8 @@ static route_map_result_t
route_match_ipv6_address_prefix_list(void *rule, const struct prefix *prefix,
route_map_object_t type, void *object)
{
- struct prefix_list *plist;
-
- if (type == RMAP_BGP && prefix->family == AF_INET6) {
- plist = prefix_list_lookup(AFI_IP6, (char *)rule);
- if (plist == NULL)
- return RMAP_NOMATCH;
-
- return (prefix_list_apply(plist, prefix) == PREFIX_DENY
- ? RMAP_NOMATCH
- : RMAP_MATCH);
- }
- return RMAP_NOMATCH;
+ return route_match_address_prefix_list(rule, AFI_IP6, prefix, type,
+ object);
}
static void *route_match_ipv6_address_prefix_list_compile(const char *arg)
@@ -4309,9 +4344,12 @@ DEFUN (set_community_delete,
"Delete matching communities\n")
{
int idx_comm_list = 2;
+ char *args;
+ args = argv_concat(argv, argc, idx_comm_list);
generic_set_add(vty, VTY_GET_CONTEXT(route_map_index), "comm-list",
- argv[idx_comm_list]->arg);
+ args);
+ XFREE(MTYPE_TMP, args);
return CMD_SUCCESS;
}
@@ -4401,8 +4439,13 @@ DEFUN (set_lcommunity_delete,
"Large Community-list name\n"
"Delete matching large communities\n")
{
+ int idx_lcomm_list = 2;
+ char *args;
+
+ args = argv_concat(argv, argc, idx_lcomm_list);
generic_set_add(vty, VTY_GET_CONTEXT(route_map_index),
- "large-comm-list", argv[2]->arg);
+ "large-comm-list", args);
+ XFREE(MTYPE_TMP, args);
return CMD_SUCCESS;
}
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index c0f2dfca17..71f7f6d0e3 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -2436,6 +2436,7 @@ static void bgp_encode_pbr_iptable_match(struct stream *s,
stream_putw(s, pbm->tcp_mask_flags);
stream_putc(s, pbm->dscp_value);
stream_putc(s, pbm->fragment);
+ stream_putc(s, pbm->protocol);
}
/* BGP has established connection with Zebra. */
diff --git a/configure.ac b/configure.ac
index 0911b96900..961336fbd0 100755
--- a/configure.ac
+++ b/configure.ac
@@ -139,13 +139,6 @@ AC_ARG_WITH([yangmodelsdir], [AS_HELP_STRING([--with-yangmodelsdir=DIR], [yang m
])
AC_SUBST([yangmodelsdir])
-AC_ARG_WITH([libyang_pluginsdir], [AS_HELP_STRING([--with-libyang-pluginsdir=DIR], [yangmodule plugins directory (${libdir}/frr/libyang_plugins)])], [
- libyang_pluginsdir="$withval"
-], [
- libyang_pluginsdir="\${libdir}/frr/libyang_plugins"
-])
-AC_SUBST([libyang_pluginsdir])
-
AC_ARG_ENABLE(tcmalloc,
AS_HELP_STRING([--enable-tcmalloc], [Turn on tcmalloc]),
[case "${enableval}" in
@@ -1608,8 +1601,8 @@ AC_SUBST([SNMP_CFLAGS])
dnl ---------------
dnl libyang
dnl ---------------
-PKG_CHECK_MODULES([LIBYANG], [libyang >= 0.16.7], , [
- AC_MSG_ERROR([libyang (>= 0.16.7) was not found on your system.])
+PKG_CHECK_MODULES([LIBYANG], [libyang >= 0.16.105], , [
+ AC_MSG_ERROR([libyang (>= 0.16.105) was not found on your system.])
])
ac_cflags_save="$CFLAGS"
CFLAGS="$CFLAGS $LIBYANG_CFLAGS"
@@ -2166,24 +2159,20 @@ CFG_SBIN="$sbindir"
CFG_STATE="$frr_statedir"
CFG_MODULE="$moduledir"
CFG_YANGMODELS="$yangmodelsdir"
-CFG_LIBYANG_PLUGINS="$libyang_pluginsdir"
for I in 1 2 3 4 5 6 7 8 9 10; do
eval CFG_SYSCONF="\"$CFG_SYSCONF\""
eval CFG_SBIN="\"$CFG_SBIN\""
eval CFG_STATE="\"$CFG_STATE\""
eval CFG_MODULE="\"$CFG_MODULE\""
eval CFG_YANGMODELS="\"$CFG_YANGMODELS\""
- eval CFG_LIBYANG_PLUGINS="\"$CFG_LIBYANG_PLUGINS\""
done
AC_SUBST([CFG_SYSCONF])
AC_SUBST([CFG_SBIN])
AC_SUBST([CFG_STATE])
AC_SUBST([CFG_MODULE])
AC_SUBST([CFG_YANGMODELS])
-AC_SUBST([CFG_LIBYANG_PLUGINS])
AC_DEFINE_UNQUOTED([MODULE_PATH], ["$CFG_MODULE"], [path to modules])
AC_DEFINE_UNQUOTED([YANG_MODELS_PATH], ["$CFG_YANGMODELS"], [path to YANG data models])
-AC_DEFINE_UNQUOTED([LIBYANG_PLUGINS_PATH], ["$CFG_LIBYANG_PLUGINS"], [path to libyang plugins])
AC_DEFINE_UNQUOTED([WATCHFRR_SH_PATH], ["${CFG_SBIN%/}/watchfrr.sh"], [path to watchfrr.sh])
dnl various features
diff --git a/doc/developer/building-frr-for-omnios.rst b/doc/developer/building-frr-for-omnios.rst
index ffc7a078e5..3a69279b0c 100644
--- a/doc/developer/building-frr-for-omnios.rst
+++ b/doc/developer/building-frr-for-omnios.rst
@@ -60,7 +60,7 @@ Add pytest:
::
- pip install pytest
+ pip install "pytest<5"
Install Sphinx:::
diff --git a/doc/developer/building-libyang.rst b/doc/developer/building-libyang.rst
index ed3e029908..f50b8cf72d 100644
--- a/doc/developer/building-libyang.rst
+++ b/doc/developer/building-libyang.rst
@@ -10,7 +10,7 @@ The FRR project builds binary ``libyang`` packages, which we offer for download
.. warning::
- ``libyang`` version 0.16.74 or newer is required to build FRR.
+ ``libyang`` version 0.16.105 or newer is required to build FRR.
.. note::
diff --git a/doc/developer/topotests-jsontopo.rst b/doc/developer/topotests-jsontopo.rst
new file mode 100644
index 0000000000..65bdcbe9cf
--- /dev/null
+++ b/doc/developer/topotests-jsontopo.rst
@@ -0,0 +1,475 @@
+.. role:: raw-html-m2r(raw)
+ :format: html
+
+*************************************
+FRRouting Topology Tests with Mininet
+*************************************
+
+Overview
+########
+
+On top of current topotests framework following enhancements are done:
+
+
+#.
+ Creating the topology and assigning IPs to router' interfaces dynamically.\ :raw-html-m2r:`<br>`
+ It is achieved by using json file, in which user specify the number of routers,
+ links to each router, interfaces for the routers and protocol configurations for
+ all routers.
+
+#.
+ Creating the configurations dynamically. It is achieved by using
+ /usr/lib/frr/frr-reload.py utility, which takes running configuration and the
+ newly created configuration for any particular router and creates a delta
+ file(diff file) and loads it to router.
+
+
+Logging of test case executions
+###############################
+
+
+#. User can enable logging of testcases execution messages into log file by
+ adding "frrtest_log_dir = /tmp/topotests/" in pytest.ini file
+#. Router's current configuration can be displyed on console or sent to logs by
+ adding "show_router_config = True" in pytest.ini file
+
+Log file name will be displayed when we start execution:
+root@test:~/topotests/example-topojson-test/test_topo_json_single_link# python
+test_topo_json_single_link.py Logs will be sent to logfile:
+/tmp/topotests/test_topo_json_single_link_11:57:01.353797
+
+Note: directory "/tmp/topotests/" is created by topotests by default, making
+use of same directory to save execution logs.
+
+
+Guidelines
+##########
+
+Writing New Tests
+=================
+
+
+This section will guide you in all recommended steps to produce a standard topology test.
+
+This is the recommended test writing routine:
+
+
+* Create a json file , which will have routers and protocol configurations
+* Create topology from json
+* Create configuration from json
+* Write the tests
+* Create a Pull Request
+
+File Hierarchy
+==============
+
+Before starting to write any tests one must know the file hierarchy. The
+repository hierarchy looks like this:
+
+.. code-block::
+
+ $ cd path/to/topotests
+ $ find ./*
+ ...
+ ./example-topojson-test # the basic example test topology-1
+ ./example-topojson-test/test_example_topojson.json # input json file, having
+ topology, interfaces, bgp and other configuration
+ ./example-topojson-test/test_example_topojson.py # test script to write and
+ execute testcases
+ ...
+ ./lib # shared test/topology functions
+ ./lib/topojson.py # library to create topology and configurations dynamically
+ from json file
+ ./lib/common_config.py # library to create protocol's common configurations ex-
+ static_routes, prefix_lists, route_maps etc.
+ ./lib/bgp.py # library to create only bgp configurations
+
+Defining the Topology and initial configuration in JSON file
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+The first step to write a new test is to define the topology and initial
+configuration. User has to define topology and initial configuration in JSON
+file. Here is an example of JSON file.
+
+.. code-block::
+
+ BGP neihghborship with single phy-link, sample JSON file:
+ {
+ "ipv4base": "192.168.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {"ipv4": "192.168.0.0", "v4mask": 30, "ipv6": "fd00::", "v6mask": 64},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32, "ipv6": "2001:DB8:F::", "v6mask": 128},
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "64512",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "64512",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ...
+
+
+ BGP neighboship with loopback interface, sample JSON file:
+ {
+ "ipv4base": "192.168.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {"ipv4": "192.168.0.0", "v4mask": 30, "ipv6": "fd00::", "v6mask": 64},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32, "ipv6": "2001:DB8:F::", "v6mask": 128},
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback",
+ "add_static_route":"yes"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "64512",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "lo": {
+ "source_link": "lo"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "1.0.2.17/32",
+ "next_hop": "192.168.0.1
+ }
+ ]
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback",
+ "add_static_route":"yes"},
+ "r1": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "64512",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "lo": {
+ "source_link": "lo"
+ }
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "lo": {
+ "source_link": "lo"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "192.0.20.1/32",
+ "no_of_ip": 9,
+ "admin_distance": 100,
+ "next_hop": "192.168.0.1",
+ "tag": 4001
+ }
+ ],
+ }
+ ...
+
+ BGP neighborship with Multiple phy-links, sample JSON file:
+ {
+ "ipv4base": "192.168.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {"ipv4": "192.168.0.0", "v4mask": 30, "ipv6": "fd00::", "v6mask": 64},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32, "ipv6": "2001:DB8:F::", "v6mask": 128},
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r2-link2": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "64512",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link2": {"ipv4": "auto", "ipv6": "auto"},
+ "r3-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r3-link2": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "64512",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ...
+
+
+JSON file explained
+"""""""""""""""""""
+
+Mandatory keywords/options in JSON:
+
+
+* "ipv4base" : base ipv4 address to generate ips, ex - 192.168.0.0
+* "ipv4mask" : mask for ipv4 address, ex - 30
+* "ipv6base" : base ipv6 address to generate ips, ex - fd00:
+* "ipv6mask" : mask for ipv6 address, ex - 64
+* "link_ip_start" : physical interface base ipv4 and ipv6 address
+* "lo_prefix" : loopback interface base ipv4 and ipv6 address
+* "routers" : user can add number of routers as per topology, router's name
+ can be any logical name, ex- r1 or a0.
+* "r1" : name of the router
+* "lo" : loopback interface dict, ipv4 and/or ipv6 addresses generated automatically
+* "type" : type of interface, to identify loopback interface
+* "links" : physical interfaces dict, ipv4 and/or ipv6 addresses generated
+ automatically
+* "r2-link1" : it will be used when routers have multiple links. 'r2' is router
+ name, 'link' is any logical name, '1' is to identify link number,
+ router name and link must be seperated by hyphen ("-"), ex- a0-peer1
+
+Optional keywords/options in JSON:
+
+* "bgp" : bgp configuration
+* "local_as" : Local AS number
+* "unicast" : All SAFI configuration
+* "neighbor": All neighbor details
+* "dest_link" : Destination link to which router will connect
+* "router_id" : bgp router-id
+* "source_link" : if user wants to establish bgp neighborship with loopback
+ interface, add "source_link": "lo"
+* "keepalivetimer" : Keep alive timer for BGP neighbor
+* "holddowntimer" : Hold down timer for BGP neighbor
+* "static_routes" : create static routes for routers
+* "redistribute" : redistribute static and/or connected routes
+* "prefix_lists" : create Prefix-lists for routers
+
+Building topology and configurations
+""""""""""""""""""""""""""""""""""""
+
+Topology and initial configuration will be created in setup_module(). Following
+is the sample code:
+
+.. code-block::
+
+ class TemplateTopo(Topo):
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+ def setup_module(mod):
+ tgen = Topogen(TemplateTopo, mod.__name__)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ def teardown_module(mod):
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ stop_topology(tgen)
+
+
+* Note: Topology will be created in setup module but routers will not be
+ started until we load zebra.conf and bgpd.conf to routers. For all routers
+ dirs will be created in /tmp/topotests/<test_folder_name>/<router_name>
+ zebra.conf and bgpd.conf empty files will be created and laoded to routers.
+ All folder and files are deleted in teardown module..
+
+Creating configuration files
+""""""""""""""""""""""""""""
+
+Router's configuration would be saved in config file frr_json.conf. Common
+configurations are like, static routes, prefixlists and route maps etc configs,
+these configs can be used by any other protocols as it is.
+BGP config will be specific to BGP protocol testing.
+
+* JSON file is passed to API build_config_from_json(), which looks for
+ configuration tags in JSON file.
+* If tag is found in JSON, configuration is created as per input and written
+ to file frr_json.conf
+* Once JSON parsing is over, frr_json.conf is loaded onto respective router.
+ Config loading is done using 'vtysh -f <file>'. Initial config at this point
+ is also saved frr_json_initial.conf. This file can be used to reset
+ configuration on router, during the course of execution.
+* Reset of configuration is done using frr "reload.py" utility, which
+ calculates the difference between router's running config and user's config
+ and loads delta file to router. API used - reset_config_on_router()
+
+Writing Tests
+"""""""""""""
+
+Test topologies should always be bootstrapped from the
+example-test/test_example.py, because it contains important boilerplate code
+that can't be avoided, like:
+
+imports: os, sys, pytest, topotest/topogen and mininet topology class
+
+The global variable CWD (Current Working directory): which is most likely going
+to be used to reference the routers configuration file location
+
+Example:
+
+
+* The topology class that inherits from Mininet Topo class
+
+.. code-block::
+
+ class TemplateTopo(Topo):
+ def build(self, *_args, **_opts):
+ tgen = get_topogen(self)
+ # topology build code
+
+
+* pytest setup_module() and teardown_module() to start the topology
+
+.. code-block::
+
+ def setup_module(_m):
+ tgen = Topogen(TemplateTopo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, CWD)
+
+ def teardown_module(_m):
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ stop_topology(tgen, CWD)
+
+
+* __main__ initialization code (to support running the script directly)
+
+.. code-block::
+
+ if **name** == '\ **main**\ ':
+ sys.exit(pytest.main(["-s"]))
+
diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst
index e12bc37256..a0a574a79c 100644
--- a/doc/developer/topotests.rst
+++ b/doc/developer/topotests.rst
@@ -22,7 +22,7 @@ Installing Mininet Infrastructure
apt-get install python-pip
apt-get install iproute
pip install ipaddr
- pip install pytest
+ pip install "pytest<5"
pip install exabgp==3.4.17 (Newer 4.0 version of exabgp is not yet
supported)
useradd -d /var/run/exabgp/ -s /bin/false exabgp
diff --git a/doc/user/installation.rst b/doc/user/installation.rst
index 64949fc8ea..b45c83ca1c 100644
--- a/doc/user/installation.rst
+++ b/doc/user/installation.rst
@@ -331,22 +331,6 @@ options to the configuration script.
Look for YANG modules in `dir` [`prefix`/share/yang]. Note that the FRR
YANG modules will be installed here.
-.. option:: --with-libyang-pluginsdir <dir>
-
- Look for libyang plugins in `dir` [`prefix`/lib/frr/libyang_plugins].
- Note that the FRR libyang plugins will be installed here.
-
- This option is meaningless with libyang 0.16.74 or newer and will be
- removed once support for older libyang versions is dropped.
-
-When it's desired to run FRR without installing it in the system, it's possible
-to configure it as follows to look for YANG modules and libyang plugins in the
-compile directory:
-.. code-block:: shell
-
- ./configure --with-libyang-pluginsdir="`pwd`/yang/libyang_plugins/.libs" \
- --with-yangmodelsdir="`pwd`/yang"
-
Python dependency, documentation and tests
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/eigrpd/eigrp_vty.c b/eigrpd/eigrp_vty.c
index 1a1634ca91..d51faaac59 100644
--- a/eigrpd/eigrp_vty.c
+++ b/eigrpd/eigrp_vty.c
@@ -326,7 +326,7 @@ DEFUN (eigrp_timers_active,
"timers active-time <(1-65535)|disabled>",
"Adjust routing timers\n"
"Time limit for active state\n"
- "Active state time limit in minutes\n"
+ "Active state time limit in seconds\n"
"Disable time limit for active state\n")
{
// struct eigrp *eigrp = vty->index;
@@ -341,7 +341,7 @@ DEFUN (no_eigrp_timers_active,
NO_STR
"Adjust routing timers\n"
"Time limit for active state\n"
- "Active state time limit in minutes\n"
+ "Active state time limit in seconds\n"
"Disable time limit for active state\n")
{
// struct eigrp *eigrp = vty->index;
diff --git a/lib/northbound.c b/lib/northbound.c
index b2ae1f66cb..48b450e969 100644
--- a/lib/northbound.c
+++ b/lib/northbound.c
@@ -1386,19 +1386,12 @@ int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
*/
ly_errno = 0;
dnode = lyd_new_path(NULL, ly_native_ctx, xpath, NULL, 0,
- LYD_PATH_OPT_UPDATE);
- if (!dnode && ly_errno) {
+ LYD_PATH_OPT_UPDATE | LYD_PATH_OPT_NOPARENTRET);
+ if (!dnode) {
flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
__func__);
return NB_ERR;
}
- /*
- * We can remove the following two lines once we depend on
- * libyang-v0.16-r2, which has the LYD_PATH_OPT_NOPARENTRET flag for
- * lyd_new_path().
- */
- dnode = yang_dnode_get(dnode, xpath);
- assert(dnode);
/*
* Create a linked list to sort the data nodes starting from the root.
diff --git a/lib/northbound.h b/lib/northbound.h
index 8f6753506b..69d7c8e0ee 100644
--- a/lib/northbound.h
+++ b/lib/northbound.h
@@ -337,6 +337,27 @@ struct nb_callbacks {
*/
void (*cli_show)(struct vty *vty, struct lyd_node *dnode,
bool show_defaults);
+
+ /*
+ * Optional callback to show the CLI node end for lists or containers.
+ *
+ * vty
+ * The vty terminal to dump the configuration to.
+ *
+ * dnode
+ * libyang data node that should be shown in the form of a CLI
+ * command.
+ *
+ * show_defaults
+ * Specify whether to display default configuration values or not.
+ * This parameter can be ignored most of the time since the
+ * northbound doesn't call this callback for default leaves or
+ * non-presence containers that contain only default child nodes.
+ * The exception are commands associated to multiple configuration
+ * nodes, in which case it might be desirable to hide one or more
+ * parts of the command when this parameter is set to false.
+ */
+ void (*cli_show_end)(struct vty *vty, struct lyd_node *dnode);
};
/*
diff --git a/lib/northbound_cli.c b/lib/northbound_cli.c
index 7b7b526af0..c691bb27aa 100644
--- a/lib/northbound_cli.c
+++ b/lib/northbound_cli.c
@@ -434,10 +434,29 @@ static int nb_cli_candidate_load_transaction(struct vty *vty,
return CMD_SUCCESS;
}
+/*
+ * ly_iter_next_is_up: detects when iterating up on the yang model.
+ *
+ * This function detects whether next node in the iteration is upwards,
+ * then return the node otherwise return NULL.
+ */
+static struct lyd_node *ly_iter_next_up(const struct lyd_node *elem)
+{
+ /* Are we going downwards? Is this still not a leaf? */
+ if (!(elem->schema->nodetype & (LYS_LEAF | LYS_LEAFLIST | LYS_ANYDATA)))
+ return NULL;
+
+ /* Are there still leaves in this branch? */
+ if (elem->next != NULL)
+ return NULL;
+
+ return elem->parent;
+}
+
void nb_cli_show_dnode_cmds(struct vty *vty, struct lyd_node *root,
bool with_defaults)
{
- struct lyd_node *next, *child;
+ struct lyd_node *next, *child, *parent;
LY_TREE_DFS_BEGIN (root, next, child) {
struct nb_node *nb_node;
@@ -452,6 +471,19 @@ void nb_cli_show_dnode_cmds(struct vty *vty, struct lyd_node *root,
(*nb_node->cbs.cli_show)(vty, child, with_defaults);
next:
+ /*
+ * When transiting upwards in the yang model we should
+ * give the previous container/list node a chance to
+ * print its close vty output (e.g. "!" or "end-family"
+ * etc...).
+ */
+ parent = ly_iter_next_up(child);
+ if (parent != NULL) {
+ nb_node = parent->schema->priv;
+ if (nb_node->cbs.cli_show_end)
+ (*nb_node->cbs.cli_show_end)(vty, parent);
+ }
+
LY_TREE_DFS_END(root, next, child);
}
}
diff --git a/lib/pbr.h b/lib/pbr.h
index 1425e679c5..ecd50447e5 100644
--- a/lib/pbr.h
+++ b/lib/pbr.h
@@ -121,6 +121,7 @@ struct pbr_rule {
#define MATCH_PKT_LEN_INVERSE_SET (1 << 8)
#define MATCH_FRAGMENT_INVERSE_SET (1 << 9)
#define MATCH_ICMP_SET (1 << 10)
+#define MATCH_PROTOCOL_SET (1 << 11)
extern int zapi_pbr_rule_encode(uint8_t cmd, struct stream *s,
struct pbr_rule *zrule);
diff --git a/tests/topotests/bgp-basic-functionality-topo1/__init__.py b/tests/topotests/bgp-basic-functionality-topo1/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp-basic-functionality-topo1/__init__.py
diff --git a/tests/topotests/bgp-basic-functionality-topo1/bgp_basic_functionality.json b/tests/topotests/bgp-basic-functionality-topo1/bgp_basic_functionality.json
new file mode 100644
index 0000000000..c778ae4bed
--- /dev/null
+++ b/tests/topotests/bgp-basic-functionality-topo1/bgp_basic_functionality.json
@@ -0,0 +1,172 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 30,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py
new file mode 100755
index 0000000000..095ebe3344
--- /dev/null
+++ b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py
@@ -0,0 +1,595 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test BGP basic functionality:
+
+Test steps
+- Create topology (setup module)
+ Creating 4 routers topology, r1, r2, r3 are in IBGP and
+ r3, r4 are in EBGP
+- Bring up topology
+- Verify for bgp to converge
+- Modify/Delete and verify router-id
+- Modify and verify bgp timers
+- Create and verify static routes
+- Modify and verify admin distance for existing static routes
+- Test advertise network using network command
+- Verify clear bgp
+- Test bgp convergence with loopback interface
+- Test advertise network using network command
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+from copy import deepcopy
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, '../lib/'))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer, reset_config_on_routers, create_static_routes,
+ verify_rib, verify_admin_distance_for_static_routes
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence, create_router_bgp, verify_router_id,
+ modify_as_number, verify_as_numbers, clear_bgp_and_verify,
+ verify_bgp_timers_and_functionality
+)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/bgp_basic_functionality.json".format(CWD)
+try:
+ with open(jsonFile, 'r') as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+
+class CreateTopo(Topo):
+ """
+ Test BasicTopo - topology 1
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function"""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ global BGP_CONVERGENCE
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}". \
+ format(BGP_CONVERGENCE)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info("Testsuite end time: {}".
+ format(time.asctime(time.localtime(time.time()))))
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_modify_and_delete_router_id(request):
+ """ Test to modify, delete and verify router-id. """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Modify router id
+ input_dict = {
+ 'r1': {
+ "bgp": {
+ 'router_id': '12.12.12.12'
+ }
+ },
+ 'r2': {
+ "bgp": {
+ 'router_id': '22.22.22.22'
+ }
+ },
+ 'r3': {
+ "bgp": {
+ 'router_id': '33.33.33.33'
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".\
+ format(tc_name, result)
+
+ # Verifying router id once modified
+ result = verify_router_id(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".\
+ format(tc_name, result)
+
+ # Delete router id
+ input_dict = {
+ 'r1': {
+ "bgp": {
+ 'del_router_id': True
+ }
+ },
+ 'r2': {
+ "bgp": {
+ 'del_router_id': True
+ }
+ },
+ 'r3': {
+ "bgp": {
+ 'del_router_id': True
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ # Verifying router id once deleted
+ # Once router-id is deleted, highest interface ip should become
+ # router-id
+ result = verify_router_id(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_config_with_4byte_as_number(request):
+ """
+ Configure BGP with 4 byte ASN and verify it works fine
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "local_as": 131079
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": 131079
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 131079
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": 131080
+ }
+ }
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ result = verify_as_numbers(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_timers_functionality(request):
+ """
+ Test to modify bgp timers and verify timers functionality.
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to modfiy BGP timerse
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link":{
+ "r1": {
+ "keepalivetimer": 60,
+ "holddowntimer": 180,
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, deepcopy(input_dict))
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ # Api call to clear bgp, so timer modification would take place
+ clear_bgp_and_verify(tgen, topo, 'r1')
+
+ # Verifying bgp timers functionality
+ result = verify_bgp_timers_and_functionality(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+
+
+def test_static_routes(request):
+ """ Test to create and verify static routes. """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to create static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 9,
+ "admin_distance": 100,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ # Api call to redistribute static routes
+ input_dict_1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ # Verifying RIB routes
+ dut = 'r3'
+ protocol = 'bgp'
+ next_hop = '10.0.0.2'
+ result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop,
+ protocol=protocol)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_admin_distance_for_existing_static_routes(request):
+ """ Test to modify and verify admin distance for existing static routes."""
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "admin_distance": 10,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ # Verifying admin distance once modified
+ result = verify_admin_distance_for_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_advertise_network_using_network_command(request):
+ """ Test advertise networks using network command."""
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to advertise networks
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "20.0.0.0/32",
+ "no_of_network": 10
+ },
+ {
+ "network": "30.0.0.0/32",
+ "no_of_network": 10
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ # Verifying RIB routes
+ dut = 'r2'
+ protocol = "bgp"
+ result = verify_rib(tgen, 'ipv4', dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_clear_bgp_and_verify(request):
+ """
+ Created few static routes and verified all routes are learned via BGP
+ cleared BGP and verified all routes are intact
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # clear ip bgp
+ result = clear_bgp_and_verify(tgen, topo, 'r1')
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_with_loopback_interface(request):
+ """
+ Test BGP with loopback interface
+
+ Adding keys:value pair "dest_link": "lo" and "source_link": "lo"
+ peer dict of input json file for all router's creating config using
+ loopback interface. Once BGP neighboship is up then verifying BGP
+ convergence
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ for routerN in sorted(topo['routers'].keys()):
+ for bgp_neighbor in \
+ topo['routers'][routerN]['bgp']['address_family']['ipv4'][
+ 'unicast']['neighbor'].keys():
+
+ # Adding ['source_link'] = 'lo' key:value pair
+ topo['routers'][routerN]['bgp']['address_family']['ipv4'][
+ 'unicast']['neighbor'][bgp_neighbor]["dest_link"] = {
+ 'lo': {
+ "source_link": "lo",
+ }
+ }
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "1.0.2.17/32",
+ "next_hop": "10.0.0.2"
+ },
+ {
+ "network": "1.0.3.17/32",
+ "next_hop": "10.0.0.6"
+ }
+ ]
+ },
+ "r2": {
+ "static_routes": [{
+ "network": "1.0.1.17/32",
+ "next_hop": "10.0.0.1"
+ },
+ {
+ "network": "1.0.3.17/32",
+ "next_hop": "10.0.0.10"
+ }
+ ]
+ },
+ "r3": {
+ "static_routes": [{
+ "network": "1.0.1.17/32",
+ "next_hop": "10.0.0.5"
+ },
+ {
+ "network": "1.0.2.17/32",
+ "next_hop": "10.0.0.9"
+ },
+ {
+ "network": "1.0.4.17/32",
+ "next_hop": "10.0.0.14"
+ }
+ ]
+ },
+ "r4": {
+ "static_routes": [{
+ "network": "1.0.3.17/32",
+ "next_hop": "10.0.0.13"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ # Api call verify whether BGP is converged
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == '__main__':
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp-path-attributes-topo1/__init__.py b/tests/topotests/bgp-path-attributes-topo1/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp-path-attributes-topo1/__init__.py
diff --git a/tests/topotests/bgp-path-attributes-topo1/bgp_path_attributes.json b/tests/topotests/bgp-path-attributes-topo1/bgp_path_attributes.json
new file mode 100644
index 0000000000..15b7ec13be
--- /dev/null
+++ b/tests/topotests/bgp-path-attributes-topo1/bgp_path_attributes.json
@@ -0,0 +1,220 @@
+{
+ "ipv4base":"10.0.0.0",
+ "ipv4mask":30,
+ "ipv6base":"fd00::",
+ "ipv6mask":64,
+ "link_ip_start":{"ipv4":"10.0.0.0", "v4mask":30, "ipv6":"fd00::", "v6mask":64},
+ "lo_prefix":{"ipv4":"1.0.", "v4mask":32, "ipv6":"2001:DB8:F::", "v6mask":128},
+ "routers":{
+ "r1":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2":{"ipv4":"auto", "ipv6":"auto"},
+ "r3":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"555",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"},
+ "r4-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r4-link2": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp":{
+ "local_as":"555",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1":{"ipv4":"auto", "ipv6":"auto"},
+ "r2":{"ipv4":"auto", "ipv6":"auto"},
+ "r5":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"555",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r2-link2": {"ipv4": "auto", "ipv6": "auto"},
+ "r6": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "666",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r4-link1": {}
+ }
+ },
+ "r6": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r5":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"},
+ "r7": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp":{
+ "local_as":"666",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5": {}
+ }
+ },
+ "r7": {
+ "dest_link": {
+ "r5": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r6":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r4": {"ipv4": "auto", "ipv6": "auto"},
+ "r7": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp":{
+ "local_as":"777",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r6": {}
+ }
+ },
+ "r7": {
+ "dest_link": {
+ "r6": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r7":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r5": {"ipv4": "auto", "ipv6": "auto"},
+ "r6": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp":{
+ "local_as":"888",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r5": {
+ "dest_link": {
+ "r7": {}
+ }
+ },
+ "r6": {
+ "dest_link": {
+ "r7": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py b/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py
new file mode 100755
index 0000000000..abd6b396d1
--- /dev/null
+++ b/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py
@@ -0,0 +1,1078 @@
+#!/usr/bin/env python
+
+#
+# Modified work Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Original work Copyright (c) 2018 by Network Device Education
+# Foundation, Inc. ("NetDEF")
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test AS-Path functionality:
+
+Setup module:
+- Create topology (setup module)
+- Bring up topology
+- Verify BGP convergence
+
+Test cases:
+1. Test next_hop attribute and verify best path is installed as per
+ reachable next_hop
+2. Test aspath attribute and verify best path is installed as per
+ shortest AS-Path
+3. Test localpref attribute and verify best path is installed as per
+ shortest local-preference
+4. Test weight attribute and and verify best path is installed as per
+ highest weight
+5. Test origin attribute and verify best path is installed as per
+ IGP>EGP>INCOMPLETE rule
+6. Test med attribute and verify best path is installed as per lowest
+ med value
+7. Test admin distance and verify best path is installed as per lowest
+ admin distance
+
+Teardown module:
+- Bring down the topology
+- stop routers
+
+"""
+
+import os
+import sys
+import pdb
+import json
+import time
+import inspect
+import ipaddress
+from time import sleep
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from mininet.topo import Topo
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+
+# Required to instantiate the topology builder class.
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer, reset_config_on_routers,
+ verify_rib, create_static_routes,
+ create_prefix_lists, verify_prefix_lists,
+ create_route_maps
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence, create_router_bgp,
+ clear_bgp_and_verify, verify_best_path_as_per_bgp_attribute,
+ verify_best_path_as_per_admin_distance
+)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/bgp_path_attributes.json".format(CWD)
+
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+
+####
+class CreateTopo(Topo):
+ """
+ Test CreateTopo - topology 1
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # Building topology and configuration from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: %s", testsuite_run_time)
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Checking BGP convergence
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, ("setup_module :Failed \n Error:"
+ " {}".format(result))
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """
+ Teardown the pytest environment
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info("Testsuite end time: %s",
+ time.asctime(time.localtime(time.time())))
+ logger.info("=" * 40)
+
+
+#####################################################
+##
+## Testcases
+##
+#####################################################
+def test_next_hop_attribute(request):
+ """
+ Verifying route are not getting installed in, as next_hop is
+ unreachable, Making next hop reachable using next_hop_self
+ command and verifying routes are installed.
+ """
+
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to advertise networks
+ input_dict = {
+ "r7": {
+ "bgp":{
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r1"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: %s", result)
+
+ # Configure next-hop-self to bgp neighbor
+ input_dict_1 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r1"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_aspath_attribute(request):
+ " Verifying AS_PATH attribute functionality"
+
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to advertise networks
+ input_dict = {
+ "r7": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying best path
+ dut = "r1"
+ attribute = "aspath"
+ result = verify_best_path_as_per_bgp_attribute(tgen, "ipv4", dut,
+ input_dict, attribute)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_localpref_attribute(request):
+ " Verifying LOCAL PREFERENCE attribute functionality"
+
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to advertise networks
+ input_dict = {
+ "r7": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create Prefix list
+ input_dict_2 = {
+ "r2": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_ls_1": [{
+ "seqid": 10,
+ "network": "200.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create route map
+ input_dict_3 = {
+ "r2": {
+ "route_maps": {
+ "RMAP_LOCAL_PREF": [{
+ "action": "permit",
+ "match": {
+ "ipv4": {
+ "prefix_lists": "pf_ls_1"
+ }
+ },
+ "set": {
+ "localpref": 1000
+ }
+ }]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure neighbor for route map
+ input_dict_4 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {"name": "RMAP_LOCAL_PREF",
+ "direction": "in"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying best path
+ dut = "r1"
+ attribute = "localpref"
+ result = verify_best_path_as_per_bgp_attribute(tgen, "ipv4", dut,
+ input_dict, attribute)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_weight_attribute(request):
+ " Verifying WEIGHT attribute functionality"
+
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to advertise networks
+ input_dict = {
+ "r7": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create Prefix list
+ input_dict_2 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_ls_1": [{
+ "seqid": 10,
+ "network": "200.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create route map
+ input_dict_3 = {
+ "r1": {
+ "route_maps": {
+ "RMAP_WEIGHT": [{
+ "action": "permit",
+ "match": {
+ "ipv4": {
+ "prefix_lists": "pf_ls_1"
+ }
+ },
+ "set": {
+ "weight": 500
+ }
+ }]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure neighbor for route map
+ input_dict_4 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {"name": "RMAP_WEIGHT",
+ "direction": "in"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying best path
+ dut = "r1"
+ attribute = "weight"
+ result = verify_best_path_as_per_bgp_attribute(tgen, "ipv4", dut,
+ input_dict, attribute)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_origin_attribute(request):
+ " Verifying ORIGIN attribute functionality"
+
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to advertise networks
+ input_dict = {
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r5": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to create static routes
+ input_dict_3 = {
+ "r5": {
+ "static_routes": [
+ {
+ "network": "200.50.2.0/32",
+ "next_hop": "10.0.0.26"
+ },
+ {
+ "network": "200.60.2.0/32",
+ "next_hop": "10.0.0.26"
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+
+ # Configure next-hop-self to bgp neighbor
+
+ # Verifying best path
+ dut = "r1"
+ attribute = "origin"
+ result = verify_best_path_as_per_bgp_attribute(tgen, "ipv4", dut,
+ input_dict, attribute)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_med_attribute(request):
+ " Verifying MED attribute functionality"
+
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to advertise networks
+ input_dict = {
+ "r4": {
+ "bgp":{
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to advertise networks
+
+
+ # Configure next-hop-self to bgp neighbor
+
+
+ # Create Prefix list
+ input_dict_3 = {
+ "r2": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_ls_r2": [{
+ "seqid": 10,
+ "network": "200.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ },
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_ls_r3": [{
+ "seqid": 10,
+ "network": "200.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create route map
+ input_dict_3 = {
+ "r2": {
+ "route_maps": {
+ "RMAP_MED_R2": [{
+ "action": "permit",
+ "match": {
+ "ipv4": {
+ "prefix_lists": "pf_ls_r2"
+ }
+ },
+ "set": {
+ "med": 100
+ }
+ }]
+ }
+ },
+ "r3": {
+ "route_maps": {
+ "RMAP_MED_R3": [{
+ "action": "permit",
+ "match": {
+ "ipv4": {
+ "prefix_lists": "pf_ls_r3"
+ }
+ },
+ "set": {
+ "med": 10
+ }
+ }]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure neighbor for route map
+ input_dict_4 = {
+ "r5": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r2-link1": {
+ "route_maps": [
+ {"name": "RMAP_MED_R2",
+ "direction": "in"}
+ ]
+ }
+ }
+ },
+ "r1": {
+ "dest_link": {
+ "r2": {"next_hop_self": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {"next_hop_self": True}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {"name": "RMAP_MED_R3",
+ "direction": "in"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying best path
+ dut = "r1"
+ attribute = "med"
+ result = verify_best_path_as_per_bgp_attribute(tgen, "ipv4", dut,
+ input_dict, attribute)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ logger.info("Testcase %s :Passed \n", tc_name)
+
+ # Uncomment next line for debugging
+ # tgen.mininet_cli()
+
+
+def test_admin_distance(request):
+ " Verifying admin distance functionality"
+
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Api call to create static routes
+ input_dict = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": "200.50.2.0/32",
+ "admin_distance": 80,
+ "next_hop": "10.0.0.14"
+ },
+ {
+ "network": "200.50.2.0/32",
+ "admin_distance": 60,
+ "next_hop": "10.0.0.18"
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+ input_dict_2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying best path
+ dut = "r1"
+ attribute = "admin_distance"
+ result = verify_best_path_as_per_admin_distance(tgen, "ipv4", dut,
+ input_dict, attribute)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp-prefix-list-topo1/__init__.py b/tests/topotests/bgp-prefix-list-topo1/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp-prefix-list-topo1/__init__.py
diff --git a/tests/topotests/bgp-prefix-list-topo1/prefix_lists.json b/tests/topotests/bgp-prefix-list-topo1/prefix_lists.json
new file mode 100644
index 0000000000..3bb07ad994
--- /dev/null
+++ b/tests/topotests/bgp-prefix-list-topo1/prefix_lists.json
@@ -0,0 +1,123 @@
+{
+ "address_types": ["ipv4"],
+ "ipv4base":"10.0.0.0",
+ "ipv4mask":30,
+ "ipv6base":"fd00::",
+ "ipv6mask":64,
+ "link_ip_start":{"ipv4":"10.0.0.0", "v4mask":30, "ipv6":"fd00::", "v6mask":64},
+ "lo_prefix":{"ipv4":"1.0.", "v4mask":32, "ipv6":"2001:DB8:F::", "v6mask":128},
+ "routers":{
+ "r1":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback", "add_static_route":"yes"},
+ "r2":{"ipv4":"auto", "ipv6":"auto"},
+ "r3":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback", "add_static_route":"yes"},
+ "r1":{"ipv4":"auto", "ipv6":"auto"},
+ "r3":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback", "add_static_route":"yes"},
+ "r1":{"ipv4":"auto", "ipv6":"auto"},
+ "r2":{"ipv4":"auto", "ipv6":"auto"},
+ "r4":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback", "add_static_route":"yes"},
+ "r3":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py b/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py
new file mode 100755
index 0000000000..25a346f20d
--- /dev/null
+++ b/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py
@@ -0,0 +1,1450 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test prefix-list functionality:
+
+Test steps
+- Create topology (setup module)
+ Creating 4 routers topology, r1, r2, r3 are in IBGP and
+ r3, r4 are in EBGP
+- Bring up topology
+- Verify for bgp to converge
+
+IP prefix-list tests
+- Test ip prefix-lists IN permit
+- Test ip prefix-lists OUT permit
+- Test ip prefix-lists IN deny and permit any
+- Test delete ip prefix-lists
+- Test ip prefix-lists OUT deny and permit any
+- Test modify ip prefix-lists IN permit to deny
+- Test modify ip prefix-lists IN deny to permit
+- Test modify ip prefix-lists OUT permit to deny
+- Test modify prefix-lists OUT deny to permit
+- Test ip prefix-lists implicit deny
+"""
+
+import sys
+import json
+import time
+import os
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from mininet.topo import Topo
+from lib.topogen import Topogen, get_topogen
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer, reset_config_on_routers,
+ verify_rib, create_static_routes,
+ create_prefix_lists, verify_prefix_lists
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence, create_router_bgp,
+ clear_bgp_and_verify
+)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/prefix_lists.json".format(CWD)
+
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+# Global variables
+bgp_convergence = False
+
+
+class BGPPrefixListTopo(Topo):
+ """
+ Test BGPPrefixListTopo - topology 1
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("="*40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(BGPPrefixListTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Checking BGP convergence
+ global BGP_CONVERGENCE
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:"
+ " {}".format(BGP_CONVERGENCE))
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info("Testsuite end time: {}".
+ format(time.asctime(time.localtime(time.time()))))
+ logger.info("="*40)
+
+#####################################################
+#
+# Tests starting
+#
+#####################################################
+
+
+def test_ip_prefix_lists_in_permit(request):
+ """
+ Create ip prefix list and test permit prefixes IN direction
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Create Static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "20.0.20.1/32",
+ "no_of_ip": 1,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create ip prefix list
+ input_dict_2 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [{
+ "seqid": 10,
+ "network": "any",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure bgp neighbor with prefix list
+ input_dict_3 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "in"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ip_prefix_lists_out_permit(request):
+ """
+ Create ip prefix list and test permit prefixes out direction
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create Static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 1,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create Static routes
+ input_dict_1 = {
+ "r1": {
+ "static_routes": [{
+ "network": "20.0.20.1/32",
+ "no_of_ip": 1,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_5 = {
+ "r3": {
+ "static_routes": [{
+ "network": "10.0.0.2/30",
+ "no_of_ip": 1,
+ "next_hop": "10.0.0.9"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_5)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+
+ # Create ip prefix list
+ input_dict_2 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [{
+ "seqid": 10,
+ "network": "20.0.20.1/32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure prefix list to bgp neighbor
+ # Configure bgp neighbor with prefix list
+ input_dict_3 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "out"
+ }
+ ]
+ }
+ }
+ }
+ },
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+ write_test_footer(tc_name)
+
+
+def test_ip_prefix_lists_in_deny_and_permit_any(request):
+ """
+ Create ip prefix list and test permit/deny prefixes IN direction
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create Static Routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 1,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+ # Create ip prefix list
+ input_dict_2 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.20.1/32",
+ "action": "deny"
+ },
+ {
+ "seqid": "11",
+ "network": "any",
+ "action": "permit"
+ }
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure bgp neighbor with prefix list
+ input_dict_3 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "in"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ # Configure prefix list to bgp neighbor
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_delete_prefix_lists(request):
+ """
+ Delete ip prefix list
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create ip prefix list
+ input_dict_2 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.20.1/32",
+ "action": "deny"
+ }
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_prefix_lists(tgen, input_dict_2)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+ logger.info(result)
+
+ # Delete prefix list
+ input_dict_2 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.20.1/32",
+ "action": "deny",
+ "delete": True
+ }
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ip_prefix_lists_out_deny_and_permit_any(request):
+ """
+ Create ip prefix list and test deny/permit any prefixes OUT direction
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create Static Routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 9,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create Static Routes
+ input_dict_1 = {
+ "r2": {
+ "static_routes": [{
+ "network": "20.0.20.1/32",
+ "no_of_ip": 9,
+ "next_hop": "10.0.0.1"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+
+ # Create ip prefix list
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "deny"
+ },
+ {
+ "seqid": "11",
+ "network": "any",
+ "action": "permit"
+ }
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure prefix list to bgp neighbor
+ input_dict_4 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "out"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r4"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r4"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_modify_prefix_lists_in_permit_to_deny(request):
+ """
+ Modify ip prefix list and test permit to deny prefixes IN direction
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create Static Routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 9,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+
+ # Create ip prefix list
+ input_dict_2 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [{
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure prefix list to bgp neighbor
+ input_dict_3 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link":{
+ "r3": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "in"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Modify prefix list
+ input_dict_1 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "deny"
+ },
+ {
+ "seqid": "11",
+ "network": "any",
+ "action": "permit"
+ }
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to clear bgp, so config changes would be reflected
+ dut = "r3"
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_modify_prefix_lists_in_deny_to_permit(request):
+ """
+ Modify ip prefix list and test deny to permit prefixes IN direction
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create Static Routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 9,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+
+ # Create ip prefix list
+ input_dict_1 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "deny"
+ },
+ {
+ "seqid": "11",
+ "network": "any",
+ "action": "permit"
+ }
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure prefix list to bgp neighbor
+ input_dict_2 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "in"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Modify ip prefix list
+ input_dict_1 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [{
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+
+ }
+ result = create_prefix_lists(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to clear bgp, so config changes would be reflected
+ dut = "r3"
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_modify_prefix_lists_out_permit_to_deny(request):
+ """
+ Modify ip prefix list and test permit to deny prefixes OUT direction
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create Static Routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 9,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+
+ # Create ip prefix list
+ input_dict_1 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [{
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+
+ }
+ result = create_prefix_lists(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure prefix list to bgp neighbor
+ input_dict_2 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "out"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r4"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Modify ip prefix list
+ input_dict_1 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "deny"
+ },
+ {
+ "seqid": "11",
+ "network": "any",
+ "action": "permit"
+ }
+ ]
+ }
+ }
+ }
+
+ }
+ result = create_prefix_lists(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to clear bgp, so config changes would be reflected
+ dut = "r3"
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r4"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_modify_prefix_lists_out_deny_to_permit(request):
+ """
+ Modify ip prefix list and test deny to permit prefixes OUT direction
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create Static Routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 9,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+ # Create ip prefix list
+ input_dict_1 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "deny"
+ },
+ {
+ "seqid": "11",
+ "network": "any",
+ "action": "permit"
+ }
+ ]
+ }
+ }
+ }
+
+ }
+ result = create_prefix_lists(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure prefix list to bgp neighbor
+ input_dict_2 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link":{
+ "r3": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "out"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r4"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Modify ip prefix list
+ input_dict_1 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [{
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+
+ }
+ result = create_prefix_lists(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to clear bgp, so config changes would be reflected
+ dut = "r3"
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r4"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ip_prefix_lists_implicit_deny(request):
+ """
+ Create ip prefix list and test implicit deny
+ """
+
+ tgen = get_topogen()
+ if BGP_CONVERGENCE is not True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ # Create Static Routes
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "no_of_ip": 9,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Create Static Routes
+ input_dict_1 = {
+ "r2": {
+ "static_routes": [{
+ "network": "20.0.20.1/32",
+ "no_of_ip": 9,
+ "next_hop": "10.0.0.1"
+ }]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Api call to redistribute static routes
+ # Create ip prefix list
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1": [{
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "permit"
+ }]
+ }
+ }
+ }
+
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Configure prefix list to bgp neighbor
+ input_dict_4 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "out"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r4"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r4"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/example-topojson-test/__init__.py b/tests/topotests/example-topojson-test/__init__.py
new file mode 100755
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/example-topojson-test/__init__.py
diff --git a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/__init__.py b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/__init__.py
new file mode 100755
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/__init__.py
diff --git a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/example_topojson_multiple_links.json b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/example_topojson_multiple_links.json
new file mode 100644
index 0000000000..3968348b1f
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/example_topojson_multiple_links.json
@@ -0,0 +1,152 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 30,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r1-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "100.0.20.1/32",
+ "no_of_ip": 9,
+ "admin_distance": 100,
+ "next_hop": "10.0.0.1"
+ }
+ ]
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "10.0.0.1/30",
+ "next_hop": "10.0.0.5"
+ }
+ ]
+ }
+ }
+}
+
diff --git a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py
new file mode 100755
index 0000000000..8e794b9946
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py
@@ -0,0 +1,194 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+<example>.py: Test <example tests>.
+"""
+
+import os
+import sys
+import json
+import time
+import inspect
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, '../../'))
+
+# pylint: disable=C0413
+from lib.topogen import Topogen, get_topogen
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer, verify_rib
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence
+)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/example_topojson_multiple_links.json".format(CWD)
+try:
+ with open(jsonFile, 'r') as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+# Global variables
+bgp_convergence = False
+input_dict = {}
+
+
+class TemplateTopo(Topo):
+ """
+ Test topology builder
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # This function only purpose is to create topology
+ # as defined in input json file.
+ #
+ # Example
+ #
+ # Creating 2 routers having 2 links in between,
+ # one is used to establised BGP neighborship
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # This function only purpose is to create configuration
+ # as defined in input json file.
+ #
+ # Example
+ #
+ # Creating configuration defined in input JSON
+ # file, example, BGP config, interface config, static routes
+ # config, prefix list config
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def test_bgp_convergence(request):
+ " Test BGP daemon convergence "
+
+ tgen = get_topogen()
+ global bgp_convergence
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "test_bgp_convergence failed.. \n" \
+ " Error: {}".format(bgp_convergence)
+
+ logger.info("BGP is converged successfully \n")
+ write_test_footer(tc_name)
+
+
+def test_static_routes(request):
+ " Test to create and verify static routes. "
+
+ tgen = get_topogen()
+ if bgp_convergence is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Static routes are created as part of initial configuration,
+ # verifying RIB
+ dut = 'r3'
+ protocol = 'bgp'
+ next_hop = '10.0.0.1'
+ input_dict = {"r1": topo["routers"]["r1"]}
+
+ # Uncomment below to debug
+ # tgen.mininet_cli()
+ result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == '__main__':
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link/__init__.py b/tests/topotests/example-topojson-test/test_topo_json_single_link/__init__.py
new file mode 100755
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link/__init__.py
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link/example_topojson.json b/tests/topotests/example-topojson-test/test_topo_json_single_link/example_topojson.json
new file mode 100644
index 0000000000..629d2d6d78
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link/example_topojson.json
@@ -0,0 +1,153 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 30,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "100.0.20.1/32",
+ "no_of_ip": 9,
+ "admin_distance": 100,
+ "next_hop": "10.0.0.1"
+ }
+ ]
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "10.0.0.1/30",
+ "next_hop": "10.0.0.5"
+ }
+ ]
+ }
+ }
+}
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py
new file mode 100755
index 0000000000..315c7b3f2d
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+<example>.py: Test <example tests>.
+"""
+
+import os
+import sys
+import time
+import json
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, '../../'))
+
+# pylint: disable=C0413
+from lib.topogen import Topogen, get_topogen
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer, verify_rib
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence
+)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/example_topojson.json".format(CWD)
+
+try:
+ with open(jsonFile, 'r') as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+# Global variables
+bgp_convergence = False
+input_dict = {}
+
+class TemplateTopo(Topo):
+ """
+ Test topology builder
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # This function only purpose is to create topology
+ # as defined in input json file.
+ #
+ # Example
+ #
+ # Creating 2 routers having single links in between,
+ # which is used to establised BGP neighborship
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("="*40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # This function only purpose is to create configuration
+ # as defined in input json file.
+ #
+ # Example
+ #
+ # Creating configuration defined in input JSON
+ # file, example, BGP config, interface config, static routes
+ # config, prefix list config
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ logger.info("Running setup_module() done")
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def test_bgp_convergence(request):
+ " Test BGP daemon convergence "
+
+ tgen = get_topogen()
+ global bgp_convergence
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "test_bgp_convergence failed.. \n"\
+ " Error: {}".format(bgp_convergence)
+
+ logger.info("BGP is converged successfully \n")
+ write_test_footer(tc_name)
+
+
+def test_static_routes(request):
+ " Test to create and verify static routes. "
+
+ tgen = get_topogen()
+ if bgp_convergence is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Static routes are created as part of initial configuration,
+ # verifying RIB
+ dut = 'r3'
+ next_hop = '10.0.0.1'
+ input_dict = {"r1": topo["routers"]["r1"]}
+
+ # Uncomment below to debug
+ # tgen.mininet_cli()
+ result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == '__main__':
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/__init__.py b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/__init__.py
new file mode 100755
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/__init__.py
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/example_topojson.json b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/example_topojson.json
new file mode 100644
index 0000000000..c76c6264be
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/example_topojson.json
@@ -0,0 +1,161 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 30,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "lo": {
+ "source_link": "lo"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "1.0.2.17/32",
+ "next_hop": "10.0.0.2"
+ }
+ ]
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "lo": {
+ "source_link": "lo"
+ }
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "lo": {
+ "source_link": "lo"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "100.0.20.1/32",
+ "no_of_ip": 9,
+ "admin_distance": 100,
+ "next_hop": "10.0.0.1"
+ },
+ {
+ "network": "1.0.1.17/32",
+ "next_hop": "10.0.0.1"
+ },
+ {
+ "network": "1.0.3.17/32",
+ "next_hop": "10.0.0.6"
+ }
+ ]
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "lo": {
+ "source_link": "lo"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "1.0.2.17/32",
+ "next_hop": "10.0.0.5"
+ },
+ {
+ "network": "10.0.0.1/30",
+ "next_hop": "10.0.0.5"
+ }
+ ]
+ }
+ }
+}
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py
new file mode 100755
index 0000000000..b794b96a63
--- /dev/null
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py
@@ -0,0 +1,205 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+<example>.py: Test <example tests>.
+"""
+
+import os
+import sys
+import time
+import json
+import inspect
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, '../../'))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer, verify_rib
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence
+)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/example_topojson.json".format(CWD)
+
+try:
+ with open(jsonFile, 'r') as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+# Global variables
+bgp_convergence = False
+input_dict = {}
+
+
+class TemplateTopo(Topo):
+ """
+ Test topology builder
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # This function only purpose is to create topology
+ # as defined in input json file.
+ #
+ # Example
+ #
+ # Creating 2 routers having single links in between,
+ # which is used to establised BGP neighborship
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("="*40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # This function only purpose is to create configuration
+ # as defined in input json file.
+ #
+ # Example
+ #
+ # Creating configuration defined in input JSON
+ # file, example, BGP config, interface config, static routes
+ # config, prefix list config
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def test_bgp_convergence(request):
+ " Test BGP daemon convergence "
+
+ tgen = get_topogen()
+ global bgp_convergence
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "test_bgp_convergence failed.. \n"\
+ " Error: {}".format(bgp_convergence)
+
+ logger.info("BGP is converged successfully \n")
+ write_test_footer(tc_name)
+
+
+def test_static_routes(request):
+ " Test to create and verify static routes. "
+
+ tgen = get_topogen()
+ if bgp_convergence is not True:
+ pytest.skip('skipped because of BGP Convergence failure')
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Static routes are created as part of initial configuration,
+ # verifying RIB
+ dut = 'r3'
+ next_hop = '10.0.0.1'
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": "100.0.20.1/32",
+ "no_of_ip": 9,
+ "admin_distance": 100,
+ "next_hop": "10.0.0.1"
+ }
+ ]
+ }
+ }
+ # Uncomment below to debug
+ # tgen.mininet_cli()
+ result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == '__main__':
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
new file mode 100644
index 0000000000..13f8824976
--- /dev/null
+++ b/tests/topotests/lib/bgp.py
@@ -0,0 +1,1521 @@
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+from copy import deepcopy
+from time import sleep
+import traceback
+import ipaddr
+from lib import topotest
+
+from lib.topolog import logger
+
+# Import common_config to use commomnly used APIs
+from lib.common_config import (create_common_configuration,
+ InvalidCLIError,
+ load_config_to_router,
+ check_address_types,
+ generate_ips,
+ find_interface_with_greater_ip)
+
+BGP_CONVERGENCE_TIMEOUT = 10
+
+
+def create_router_bgp(tgen, topo, input_dict=None, build=False):
+ """
+ API to configure bgp on router
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "local_as": "200",
+ "router_id": "22.22.22.22",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "advertise_networks": [
+ {
+ "network": "20.0.0.0/32",
+ "no_of_network": 10
+ },
+ {
+ "network": "30.0.0.0/32",
+ "no_of_network": 10
+ }
+ ],
+ "neighbor": {
+ "r3": {
+ "keepalivetimer": 60,
+ "holddowntimer": 180,
+ "dest_link": {
+ "r4": {
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "in"
+ }
+ ],
+ "route_maps": [
+ {"name": "RMAP_MED_R3",
+ "direction": "in"}
+ ],
+ "next_hop_self": True
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ Returns
+ -------
+ True or False
+ """
+ logger.debug("Entering lib API: create_router_bgp()")
+ result = False
+ if not input_dict:
+ input_dict = deepcopy(topo)
+ else:
+ topo = topo["routers"]
+ for router in input_dict.keys():
+ if "bgp" not in input_dict[router]:
+ logger.debug("Router %s: 'bgp' not present in input_dict", router)
+ continue
+
+ result = __create_bgp_global(tgen, input_dict, router, build)
+ if result is True:
+ bgp_data = input_dict[router]["bgp"]
+
+ bgp_addr_data = bgp_data.setdefault("address_family", {})
+
+ if not bgp_addr_data:
+ logger.debug("Router %s: 'address_family' not present in "
+ "input_dict for BGP", router)
+ else:
+
+ ipv4_data = bgp_addr_data.setdefault("ipv4", {})
+ ipv6_data = bgp_addr_data.setdefault("ipv6", {})
+
+ neigh_unicast = True if ipv4_data.setdefault("unicast", {}) \
+ or ipv6_data.setdefault("unicast", {}) else False
+
+ if neigh_unicast:
+ result = __create_bgp_unicast_neighbor(
+ tgen, topo, input_dict, router, build)
+
+ logger.debug("Exiting lib API: create_router_bgp()")
+ return result
+
+
+def __create_bgp_global(tgen, input_dict, router, build=False):
+ """
+ Helper API to create bgp global configuration.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured.
+ * `build` : Only for initial setup phase this is set as True.
+
+ Returns
+ -------
+ True or False
+ """
+
+ result = False
+ logger.debug("Entering lib API: __create_bgp_global()")
+ try:
+
+ bgp_data = input_dict[router]["bgp"]
+ del_bgp_action = bgp_data.setdefault("delete", False)
+ if del_bgp_action:
+ config_data = ["no router bgp"]
+ result = create_common_configuration(tgen, router, config_data,
+ "bgp", build=build)
+ return result
+
+ config_data = []
+
+ if "local_as" not in bgp_data and build:
+ logger.error("Router %s: 'local_as' not present in input_dict"
+ "for BGP", router)
+ return False
+
+ local_as = bgp_data.setdefault("local_as", "")
+ cmd = "router bgp {}".format(local_as)
+ vrf_id = bgp_data.setdefault("vrf", None)
+ if vrf_id:
+ cmd = "{} vrf {}".format(cmd, vrf_id)
+
+ config_data.append(cmd)
+
+ router_id = bgp_data.setdefault("router_id", None)
+ del_router_id = bgp_data.setdefault("del_router_id", False)
+ if del_router_id:
+ config_data.append("no bgp router-id")
+ if router_id:
+ config_data.append("bgp router-id {}".format(
+ router_id))
+
+ aggregate_address = bgp_data.setdefault("aggregate_address",
+ {})
+ if aggregate_address:
+ network = aggregate_address.setdefault("network", None)
+ if not network:
+ logger.error("Router %s: 'network' not present in "
+ "input_dict for BGP", router)
+ else:
+ cmd = "aggregate-address {}".format(network)
+
+ as_set = aggregate_address.setdefault("as_set", False)
+ summary = aggregate_address.setdefault("summary", False)
+ del_action = aggregate_address.setdefault("delete", False)
+ if as_set:
+ cmd = "{} {}".format(cmd, "as-set")
+ if summary:
+ cmd = "{} {}".format(cmd, "summary")
+
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+
+ result = create_common_configuration(tgen, router, config_data,
+ "bgp", build=build)
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: create_bgp_global()")
+ return result
+
+
+def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, build=False):
+ """
+ Helper API to create configuration for address-family unicast
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured.
+ * `build` : Only for initial setup phase this is set as True.
+ """
+
+ result = False
+ logger.debug("Entering lib API: __create_bgp_unicast_neighbor()")
+ try:
+ config_data = ["router bgp"]
+ bgp_data = input_dict[router]["bgp"]["address_family"]
+
+ for addr_type, addr_dict in bgp_data.iteritems():
+ if not addr_dict:
+ continue
+
+ if not check_address_types(addr_type):
+ continue
+
+ config_data.append("address-family {} unicast".format(
+ addr_type
+ ))
+ addr_data = addr_dict["unicast"]
+ advertise_network = addr_data.setdefault("advertise_networks",
+ [])
+ for advertise_network_dict in advertise_network:
+ network = advertise_network_dict["network"]
+ if type(network) is not list:
+ network = [network]
+
+ if "no_of_network" in advertise_network_dict:
+ no_of_network = advertise_network_dict["no_of_network"]
+ else:
+ no_of_network = 1
+
+ del_action = advertise_network_dict.setdefault("delete",
+ False)
+
+ # Generating IPs for verification
+ prefix = str(
+ ipaddr.IPNetwork(unicode(network[0])).prefixlen)
+ network_list = generate_ips(network, no_of_network)
+ for ip in network_list:
+ ip = str(ipaddr.IPNetwork(unicode(ip)).network)
+
+ cmd = "network {}/{}\n".format(ip, prefix)
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+
+ max_paths = addr_data.setdefault("maximum_paths", {})
+ if max_paths:
+ ibgp = max_paths.setdefault("ibgp", None)
+ ebgp = max_paths.setdefault("ebgp", None)
+ if ibgp:
+ config_data.append("maximum-paths ibgp {}".format(
+ ibgp
+ ))
+ if ebgp:
+ config_data.append("maximum-paths {}".format(
+ ebgp
+ ))
+
+ aggregate_address = addr_data.setdefault("aggregate_address",
+ {})
+ if aggregate_address:
+ ip = aggregate_address("network", None)
+ attribute = aggregate_address("attribute", None)
+ if ip:
+ cmd = "aggregate-address {}".format(ip)
+ if attribute:
+ cmd = "{} {}".format(cmd, attribute)
+
+ config_data.append(cmd)
+
+ redistribute_data = addr_data.setdefault("redistribute", {})
+ if redistribute_data:
+ for redistribute in redistribute_data:
+ if "redist_type" not in redistribute:
+ logger.error("Router %s: 'redist_type' not present in "
+ "input_dict", router)
+ else:
+ cmd = "redistribute {}".format(
+ redistribute["redist_type"])
+ redist_attr = redistribute.setdefault("attribute",
+ None)
+ if redist_attr:
+ cmd = "{} {}".format(cmd, redist_attr)
+ del_action = redistribute.setdefault("delete", False)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if "neighbor" in addr_data:
+ neigh_data = __create_bgp_neighbor(topo, input_dict,
+ router, addr_type)
+ config_data.extend(neigh_data)
+
+ for addr_type, addr_dict in bgp_data.iteritems():
+ if not addr_dict or not check_address_types(addr_type):
+ continue
+
+ addr_data = addr_dict["unicast"]
+ if "neighbor" in addr_data:
+ neigh_addr_data = __create_bgp_unicast_address_family(
+ topo, input_dict, router, addr_type)
+
+ config_data.extend(neigh_addr_data)
+
+ result = create_common_configuration(tgen, router, config_data,
+ None, build=build)
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: __create_bgp_unicast_neighbor()")
+ return result
+
+
+def __create_bgp_neighbor(topo, input_dict, router, addr_type):
+ """
+ Helper API to create neighbor specific configuration
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured
+ """
+
+ config_data = []
+ logger.debug("Entering lib API: __create_bgp_neighbor()")
+
+ bgp_data = input_dict[router]["bgp"]["address_family"]
+ neigh_data = bgp_data[addr_type]["unicast"]["neighbor"]
+
+ for name, peer_dict in neigh_data.iteritems():
+ for dest_link, peer in peer_dict["dest_link"].iteritems():
+ nh_details = topo[name]
+ remote_as = nh_details["bgp"]["local_as"]
+ update_source = None
+
+ if dest_link in nh_details["links"].keys():
+ ip_addr = \
+ nh_details["links"][dest_link][addr_type].split("/")[0]
+ # Loopback interface
+ if "source_link" in peer and peer["source_link"] == "lo":
+ update_source = topo[router]["links"]["lo"][
+ addr_type].split("/")[0]
+
+ neigh_cxt = "neighbor {}".format(ip_addr)
+
+ config_data.append("{} remote-as {}".format(neigh_cxt, remote_as))
+ if addr_type == "ipv6":
+ config_data.append("address-family ipv6 unicast")
+ config_data.append("{} activate".format(neigh_cxt))
+
+ disable_connected = peer.setdefault("disable_connected_check",
+ False)
+ keep_alive = peer.setdefault("keep_alive", 60)
+ hold_down = peer.setdefault("hold_down", 180)
+ password = peer.setdefault("password", None)
+ max_hop_limit = peer.setdefault("ebgp_multihop", 1)
+
+ if update_source:
+ config_data.append("{} update-source {}".format(
+ neigh_cxt, update_source))
+ if disable_connected:
+ config_data.append("{} disable-connected-check".format(
+ disable_connected))
+ if update_source:
+ config_data.append("{} update-source {}".format(neigh_cxt,
+ update_source))
+ if int(keep_alive) != 60 and int(hold_down) != 180:
+ config_data.append(
+ "{} timers {} {}".format(neigh_cxt, keep_alive,
+ hold_down))
+ if password:
+ config_data.append(
+ "{} password {}".format(neigh_cxt, password))
+
+ if max_hop_limit > 1:
+ config_data.append("{} ebgp-multihop {}".format(neigh_cxt,
+ max_hop_limit))
+ config_data.append("{} enforce-multihop".format(neigh_cxt))
+
+ logger.debug("Exiting lib API: __create_bgp_unicast_neighbor()")
+ return config_data
+
+
+def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type):
+ """
+ API prints bgp global config to bgp_json file.
+
+ Parameters
+ ----------
+ * `bgp_cfg` : BGP class variables have BGP config saved in it for
+ particular router,
+ * `local_as_no` : Local as number
+ * `router_id` : Router-id
+ * `ecmp_path` : ECMP max path
+ * `gr_enable` : BGP global gracefull restart config
+ """
+
+ config_data = []
+ logger.debug("Entering lib API: __create_bgp_unicast_neighbor()")
+
+ bgp_data = input_dict[router]["bgp"]["address_family"]
+ neigh_data = bgp_data[addr_type]["unicast"]["neighbor"]
+
+ for name, peer_dict in deepcopy(neigh_data).iteritems():
+ for dest_link, peer in peer_dict["dest_link"].iteritems():
+ deactivate = None
+ nh_details = topo[name]
+ # Loopback interface
+ if "source_link" in peer and peer["source_link"] == "lo":
+ for destRouterLink, data in sorted(nh_details["links"].
+ iteritems()):
+ if "type" in data and data["type"] == "loopback":
+ if dest_link == destRouterLink:
+ ip_addr = \
+ nh_details["links"][destRouterLink][
+ addr_type].split("/")[0]
+
+ # Physical interface
+ else:
+ if dest_link in nh_details["links"].keys():
+
+ ip_addr = nh_details["links"][dest_link][
+ addr_type].split("/")[0]
+ if addr_type == "ipv4" and bgp_data["ipv6"]:
+ deactivate = nh_details["links"][
+ dest_link]["ipv6"].split("/")[0]
+
+ neigh_cxt = "neighbor {}".format(ip_addr)
+ config_data.append("address-family {} unicast".format(
+ addr_type
+ ))
+ if deactivate:
+ config_data.append(
+ "no neighbor {} activate".format(deactivate))
+
+ next_hop_self = peer.setdefault("next_hop_self", None)
+ send_community = peer.setdefault("send_community", None)
+ prefix_lists = peer.setdefault("prefix_lists", {})
+ route_maps = peer.setdefault("route_maps", {})
+
+ # next-hop-self
+ if next_hop_self:
+ config_data.append("{} next-hop-self".format(neigh_cxt))
+ # no_send_community
+ if send_community:
+ config_data.append("{} send-community".format(neigh_cxt))
+
+ if prefix_lists:
+ for prefix_list in prefix_lists:
+ name = prefix_list.setdefault("name", {})
+ direction = prefix_list.setdefault("direction", "in")
+ del_action = prefix_list.setdefault("delete", False)
+ if not name:
+ logger.info("Router %s: 'name' not present in "
+ "input_dict for BGP neighbor prefix lists",
+ router)
+ else:
+ cmd = "{} prefix-list {} {}".format(neigh_cxt, name,
+ direction)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if route_maps:
+ for route_map in route_maps:
+ name = route_map.setdefault("name", {})
+ direction = route_map.setdefault("direction", "in")
+ del_action = route_map.setdefault("delete", False)
+ if not name:
+ logger.info("Router %s: 'name' not present in "
+ "input_dict for BGP neighbor route name",
+ router)
+ else:
+ cmd = "{} route-map {} {}".format(neigh_cxt, name,
+ direction)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ return config_data
+
+
+#############################################
+# Verification APIs
+#############################################
+def verify_router_id(tgen, topo, input_dict):
+ """
+ Running command "show ip bgp json" for DUT and reading router-id
+ from input_dict and verifying with command output.
+ 1. Statically modfified router-id should take place
+ 2. When static router-id is deleted highest loopback should
+ become router-id
+ 3. When loopback intf is down then highest physcial intf
+ should become router-id
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `input_dict`: input dictionary, have details of Device Under Test, for
+ which user wants to test the data
+ Usage
+ -----
+ # Verify if router-id for r1 is 12.12.12.12
+ input_dict = {
+ "r1":{
+ "router_id": "12.12.12.12"
+ }
+ # Verify that router-id for r1 is highest interface ip
+ input_dict = {
+ "routers": ["r1"]
+ }
+ result = verify_router_id(tgen, topo, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: verify_router_id()")
+ for router in input_dict.keys():
+ if router not in tgen.routers():
+ continue
+
+ rnode = tgen.routers()[router]
+
+ del_router_id = input_dict[router]["bgp"].setdefault(
+ "del_router_id", False)
+
+ logger.info("Checking router %s router-id", router)
+ show_bgp_json = rnode.vtysh_cmd("show ip bgp json",
+ isjson=True)
+ router_id_out = show_bgp_json["routerId"]
+ router_id_out = ipaddr.IPv4Address(unicode(router_id_out))
+
+ # Once router-id is deleted, highest interface ip should become
+ # router-id
+ if del_router_id:
+ router_id = find_interface_with_greater_ip(topo, router)
+ else:
+ router_id = input_dict[router]["bgp"]["router_id"]
+ router_id = ipaddr.IPv4Address(unicode(router_id))
+
+ if router_id == router_id_out:
+ logger.info("Found expected router-id %s for router %s",
+ router_id, router)
+ else:
+ errormsg = "Router-id for router:{} mismatch, expected:" \
+ " {} but found:{}".format(router, router_id,
+ router_id_out)
+ return errormsg
+
+ logger.info("Exiting lib API: verify_router_id()")
+ return True
+
+
+def verify_bgp_convergence(tgen, topo):
+ """
+ API will verify if BGP is converged with in the given time frame.
+ Running "show bgp summary json" command and verify bgp neighbor
+ state is established,
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type`: ip_type, ipv4/ipv6
+
+ Usage
+ -----
+ # To veriry is BGP is converged for all the routers used in
+ topology
+ results = verify_bgp_convergence(tgen, topo, "ipv4")
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: verify_bgp_confergence()")
+ for router, rnode in tgen.routers().iteritems():
+ logger.info("Verifying BGP Convergence on router %s:", router)
+
+ for retry in range(1, 11):
+ show_bgp_json = rnode.vtysh_cmd("show bgp summary json",
+ isjson=True)
+ # Verifying output dictionary show_bgp_json is empty or not
+ if not bool(show_bgp_json):
+ errormsg = "BGP is not running"
+ return errormsg
+
+ # To find neighbor ip type
+ total_peer = 0
+
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+ for addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor in bgp_neighbors:
+ total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
+
+ for addr_type in bgp_addr_type.keys():
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ no_of_peer = 0
+ for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
+ for dest_link in peer_data["dest_link"].keys():
+ data = topo["routers"][bgp_neighbor]["links"]
+ if dest_link in data:
+ neighbor_ip = \
+ data[dest_link][addr_type].split("/")[0]
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"][
+ "peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"][
+ "peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+
+ if nh_state == "Established":
+ no_of_peer += 1
+ if no_of_peer == total_peer:
+ logger.info("BGP is Converged for router %s", router)
+ break
+ else:
+ logger.warning("BGP is not yet Converged for router %s",
+ router)
+ sleeptime = 2 * retry
+ if sleeptime <= BGP_CONVERGENCE_TIMEOUT:
+ # Waiting for BGP to converge
+ logger.info("Waiting for %s sec for BGP to converge on"
+ " router %s...", sleeptime, router)
+ sleep(sleeptime)
+ else:
+ show_bgp_summary = rnode.vtysh_cmd("show bgp summary")
+ errormsg = "TIMEOUT!! BGP is not converged in {} " \
+ "seconds for router {} \n {}".format(
+ BGP_CONVERGENCE_TIMEOUT, router,
+ show_bgp_summary)
+ return errormsg
+
+ logger.info("Exiting API: verify_bgp_confergence()")
+ return True
+
+
+def modify_as_number(tgen, topo, input_dict):
+ """
+ API reads local_as and remote_as from user defined input_dict and
+ modify router"s ASNs accordingly. Router"s config is modified and
+ recent/changed config is loadeded to router.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : defines for which router ASNs needs to be modified
+
+ Usage
+ -----
+ To modify ASNs for router r1
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "local_as": 131079
+ }
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: modify_as_number()")
+ try:
+
+ new_topo = deepcopy(topo["routers"])
+ router_dict = {}
+ for router in input_dict.keys():
+ # Remove bgp configuration
+
+ router_dict.update({
+ router: {
+ "bgp": {
+ "delete": True
+ }
+ }
+ })
+
+ new_topo[router]["bgp"]["local_as"] = \
+ input_dict[router]["bgp"]["local_as"]
+
+ logger.info("Removing bgp configuration")
+ create_router_bgp(tgen, topo, router_dict)
+
+ logger.info("Applying modified bgp configuration")
+ create_router_bgp(tgen, new_topo)
+
+ except Exception as e:
+ # handle any exception
+ logger.error("Error %s occured. Arguments %s.", e.message, e.args)
+
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.info("Exiting lib API: modify_as_number()")
+
+ return True
+
+
+def verify_as_numbers(tgen, topo, input_dict):
+ """
+ This API is to verify AS numbers for given DUT by running
+ "show ip bgp neighbor json" command. Local AS and Remote AS
+ will ve verified with input_dict data and command output.
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type` : ip type, ipv4/ipv6
+ * `input_dict`: defines - for which router, AS numbers needs to be verified
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "local_as": 131079
+ }
+ }
+ }
+ result = verify_as_numbers(tgen, topo, addr_type, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: verify_as_numbers()")
+ for router in input_dict.keys():
+ if router not in tgen.routers():
+ continue
+
+ rnode = tgen.routers()[router]
+
+ logger.info("Verifying AS numbers for dut %s:", router)
+
+ show_ip_bgp_neighbor_json = rnode.vtysh_cmd(
+ "show ip bgp neighbor json", isjson=True)
+ local_as = input_dict[router]["bgp"]["local_as"]
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+
+ for addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"][
+ "neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
+ remote_as = input_dict[bgp_neighbor]["bgp"]["local_as"]
+ for dest_link, peer_dict in peer_data["dest_link"].iteritems():
+ neighbor_ip = None
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type]. \
+ split("/")[0]
+ neigh_data = show_ip_bgp_neighbor_json[neighbor_ip]
+ # Verify Local AS for router
+ if neigh_data["localAs"] != local_as:
+ errormsg = "Failed: Verify local_as for dut {}," \
+ " found: {} but expected: {}".format(
+ router, neigh_data["localAs"],
+ local_as)
+ return errormsg
+ else:
+ logger.info("Verified local_as for dut %s, found"
+ " expected: %s", router, local_as)
+
+ # Verify Remote AS for neighbor
+ if neigh_data["remoteAs"] != remote_as:
+ errormsg = "Failed: Verify remote_as for dut " \
+ "{}'s neighbor {}, found: {} but " \
+ "expected: {}".format(
+ router, bgp_neighbor,
+ neigh_data["remoteAs"], remote_as)
+ return errormsg
+ else:
+ logger.info("Verified remote_as for dut %s's "
+ "neighbor %s, found expected: %s",
+ router, bgp_neighbor, remote_as)
+
+ logger.info("Exiting lib API: verify_AS_numbers()")
+ return True
+
+
+def clear_bgp_and_verify(tgen, topo, router):
+ """
+ This API is to clear bgp neighborship and verify bgp neighborship
+ is coming up(BGP is converged) usinf "show bgp summary json" command
+ and also verifying for all bgp neighbors uptime before and after
+ clear bgp sessions is different as the uptime must be changed once
+ bgp sessions are cleared using "clear ip bgp */clear bgp ipv6 *" cmd.
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `router`: device under test
+
+ Usage
+ -----
+ result = clear_bgp_and_verify(tgen, topo, addr_type, dut)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: clear_bgp_and_verify()")
+
+ if router not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[router]
+
+ peer_uptime_before_clear_bgp = {}
+ # Verifying BGP convergence before bgp clear command
+ for retry in range(1, 11):
+ sleeptime = 2 * retry
+ if sleeptime <= BGP_CONVERGENCE_TIMEOUT:
+ # Waiting for BGP to converge
+ logger.info("Waiting for %s sec for BGP to converge on router"
+ " %s...", sleeptime, router)
+ sleep(sleeptime)
+ else:
+ errormsg = "TIMEOUT!! BGP is not converged in {} seconds for" \
+ " router {}".format(BGP_CONVERGENCE_TIMEOUT, router)
+ return errormsg
+
+ show_bgp_json = rnode.vtysh_cmd("show bgp summary json",
+ isjson=True)
+ logger.info(show_bgp_json)
+ # Verifying output dictionary show_bgp_json is empty or not
+ if not bool(show_bgp_json):
+ errormsg = "BGP is not running"
+ return errormsg
+
+ # To find neighbor ip type
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+ total_peer = 0
+ for addr_type in bgp_addr_type.keys():
+
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor in bgp_neighbors:
+ total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
+
+ no_of_peer = 0
+ for addr_type in bgp_addr_type:
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
+ for dest_link, peer_dict in peer_data["dest_link"].iteritems():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"][
+ "peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+
+ # Peer up time dictionary
+ peer_uptime_before_clear_bgp[bgp_neighbor] = \
+ ipv4_data[neighbor_ip]["peerUptime"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"][
+ "peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+
+ # Peer up time dictionary
+ peer_uptime_before_clear_bgp[bgp_neighbor] = \
+ ipv6_data[neighbor_ip]["peerUptime"]
+
+ if nh_state == "Established":
+ no_of_peer += 1
+
+ if no_of_peer == total_peer:
+ logger.info("BGP is Converged for router %s before bgp"
+ " clear", router)
+ break
+ else:
+ logger.warning("BGP is not yet Converged for router %s "
+ "before bgp clear", router)
+
+ # Clearing BGP
+ logger.info("Clearing BGP neighborship for router %s..", router)
+ for addr_type in bgp_addr_type.keys():
+ if addr_type == "ipv4":
+ rnode.vtysh_cmd("clear ip bgp *")
+ elif addr_type == "ipv6":
+ rnode.vtysh_cmd("clear bgp ipv6 *")
+
+ peer_uptime_after_clear_bgp = {}
+ # Verifying BGP convergence after bgp clear command
+ for retry in range(1, 11):
+ sleeptime = 2 * retry
+ if sleeptime <= BGP_CONVERGENCE_TIMEOUT:
+ # Waiting for BGP to converge
+ logger.info("Waiting for %s sec for BGP to converge on router"
+ " %s...", sleeptime, router)
+ sleep(sleeptime)
+ else:
+ errormsg = "TIMEOUT!! BGP is not converged in {} seconds for" \
+ " router {}".format(BGP_CONVERGENCE_TIMEOUT, router)
+ return errormsg
+
+ show_bgp_json = rnode.vtysh_cmd("show bgp summary json",
+ isjson=True)
+ # Verifying output dictionary show_bgp_json is empty or not
+ if not bool(show_bgp_json):
+ errormsg = "BGP is not running"
+ return errormsg
+
+ # To find neighbor ip type
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+ total_peer = 0
+ for addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor in bgp_neighbors:
+ total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
+
+ no_of_peer = 0
+ for addr_type in bgp_addr_type:
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
+ for dest_link, peer_dict in peer_data["dest_link"].iteritems():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].\
+ split("/")[0]
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"][
+ "peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ peer_uptime_after_clear_bgp[bgp_neighbor] = \
+ ipv4_data[neighbor_ip]["peerUptime"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"][
+ "peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+ # Peer up time dictionary
+ peer_uptime_after_clear_bgp[bgp_neighbor] = \
+ ipv6_data[neighbor_ip]["peerUptime"]
+
+ if nh_state == "Established":
+ no_of_peer += 1
+
+ if no_of_peer == total_peer:
+ logger.info("BGP is Converged for router %s after bgp clear",
+ router)
+ break
+ else:
+ logger.warning("BGP is not yet Converged for router %s after"
+ " bgp clear", router)
+
+ # Compariung peerUptime dictionaries
+ if peer_uptime_before_clear_bgp != peer_uptime_after_clear_bgp:
+ logger.info("BGP neighborship is reset after clear BGP on router %s",
+ router)
+ else:
+ errormsg = "BGP neighborship is not reset after clear bgp on router" \
+ " {}".format(router)
+ return errormsg
+
+ logger.info("Exiting lib API: clear_bgp_and_verify()")
+ return True
+
+
+def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
+ """
+ To verify BGP timer config, execute "show ip bgp neighbor json" command
+ and verify bgp timers with input_dict data.
+ To veirfy bgp timers functonality, shutting down peer interface
+ and verify BGP neighborship status.
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type`: ip type, ipv4/ipv6
+ * `input_dict`: defines for which router, bgp timers needs to be verified
+
+ Usage:
+ # To verify BGP timers for neighbor r2 of router r1
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "bgp_neighbors":{
+ "r2":{
+ "keepalivetimer": 5,
+ "holddowntimer": 15,
+ }}}}}
+ result = verify_bgp_timers_and_functionality(tgen, topo, "ipv4",
+ input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: verify_bgp_timers_and_functionality()")
+ sleep(5)
+ router_list = tgen.routers()
+ for router in input_dict.keys():
+ if router not in router_list:
+ continue
+
+ rnode = router_list[router]
+
+ logger.info("Verifying bgp timers functionality, DUT is %s:",
+ router)
+
+ show_ip_bgp_neighbor_json = \
+ rnode.vtysh_cmd("show ip bgp neighbor json", isjson=True)
+
+ bgp_addr_type = input_dict[router]["bgp"]["address_family"]
+
+ for addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"][
+ "neighbor"]
+ for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
+ for dest_link, peer_dict in peer_data["dest_link"].iteritems():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ keepalivetimer = peer_dict["keepalivetimer"]
+ holddowntimer = peer_dict["holddowntimer"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type]. \
+ split("/")[0]
+ neighbor_intf = data[dest_link]["interface"]
+
+ # Verify HoldDownTimer for neighbor
+ bgpHoldTimeMsecs = show_ip_bgp_neighbor_json[
+ neighbor_ip]["bgpTimerHoldTimeMsecs"]
+ if bgpHoldTimeMsecs != holddowntimer * 1000:
+ errormsg = "Verifying holddowntimer for bgp " \
+ "neighbor {} under dut {}, found: {} " \
+ "but expected: {}".format(
+ neighbor_ip, router,
+ bgpHoldTimeMsecs,
+ holddowntimer * 1000)
+ return errormsg
+
+ # Verify KeepAliveTimer for neighbor
+ bgpKeepAliveTimeMsecs = show_ip_bgp_neighbor_json[
+ neighbor_ip]["bgpTimerKeepAliveIntervalMsecs"]
+ if bgpKeepAliveTimeMsecs != keepalivetimer * 1000:
+ errormsg = "Verifying keepalivetimer for bgp " \
+ "neighbor {} under dut {}, found: {} " \
+ "but expected: {}".format(
+ neighbor_ip, router,
+ bgpKeepAliveTimeMsecs,
+ keepalivetimer * 1000)
+ return errormsg
+
+ ####################
+ # Shutting down peer interface after keepalive time and
+ # after some time bringing up peer interface.
+ # verifying BGP neighborship in (hold down-keep alive)
+ # time, it should not go down
+ ####################
+
+ # Wait till keep alive time
+ logger.info("=" * 20)
+ logger.info("Scenario 1:")
+ logger.info("Shutdown and bring up peer interface: %s "
+ "in keep alive time : %s sec and verify "
+ " BGP neighborship is intact in %s sec ",
+ neighbor_intf, keepalivetimer,
+ (holddowntimer - keepalivetimer))
+ logger.info("=" * 20)
+ logger.info("Waiting for %s sec..", keepalivetimer)
+ sleep(keepalivetimer)
+
+ # Shutting down peer ineterface
+ logger.info("Shutting down interface %s on router %s",
+ neighbor_intf, bgp_neighbor)
+ topotest.interface_set_status(
+ router_list[bgp_neighbor], neighbor_intf,
+ ifaceaction=False)
+
+ # Bringing up peer interface
+ sleep(5)
+ logger.info("Bringing up interface %s on router %s..",
+ neighbor_intf, bgp_neighbor)
+ topotest.interface_set_status(
+ router_list[bgp_neighbor], neighbor_intf,
+ ifaceaction=True)
+
+ # Verifying BGP neighborship is intact in
+ # (holddown - keepalive) time
+ for timer in range(keepalivetimer, holddowntimer,
+ int(holddowntimer / 3)):
+ logger.info("Waiting for %s sec..", keepalivetimer)
+ sleep(keepalivetimer)
+ sleep(2)
+ show_bgp_json = \
+ rnode.vtysh_cmd("show bgp summary json",
+ isjson=True)
+
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"]["peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"]["peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+
+ if timer == \
+ (holddowntimer - keepalivetimer):
+ if nh_state != "Established":
+ errormsg = "BGP neighborship has not gone " \
+ "down in {} sec for neighbor {}\n" \
+ "show_bgp_json: \n {} ".format(
+ timer, bgp_neighbor,
+ show_bgp_json)
+ return errormsg
+ else:
+ logger.info("BGP neighborship is intact in %s"
+ " sec for neighbor %s \n "
+ "show_bgp_json : \n %s",
+ timer, bgp_neighbor,
+ show_bgp_json)
+
+ ####################
+ # Shutting down peer interface and verifying that BGP
+ # neighborship is going down in holddown time
+ ####################
+ logger.info("=" * 20)
+ logger.info("Scenario 2:")
+ logger.info("Shutdown peer interface: %s and verify BGP"
+ " neighborship has gone down in hold down "
+ "time %s sec", neighbor_intf, holddowntimer)
+ logger.info("=" * 20)
+
+ logger.info("Shutting down interface %s on router %s..",
+ neighbor_intf, bgp_neighbor)
+ topotest.interface_set_status(router_list[bgp_neighbor],
+ neighbor_intf,
+ ifaceaction=False)
+
+ # Verifying BGP neighborship is going down in holddown time
+ for timer in range(keepalivetimer,
+ (holddowntimer + keepalivetimer),
+ int(holddowntimer / 3)):
+ logger.info("Waiting for %s sec..", keepalivetimer)
+ sleep(keepalivetimer)
+ sleep(2)
+ show_bgp_json = \
+ rnode.vtysh_cmd("show bgp summary json",
+ isjson=True)
+
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"]["peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"]["peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+
+ if timer == holddowntimer:
+ if nh_state == "Established":
+ errormsg = "BGP neighborship has not gone " \
+ "down in {} sec for neighbor {}\n" \
+ "show_bgp_json: \n {} ".format(
+ timer, bgp_neighbor,
+ show_bgp_json)
+ return errormsg
+ else:
+ logger.info("BGP neighborship has gone down in"
+ " %s sec for neighbor %s \n"
+ "show_bgp_json : \n %s",
+ timer, bgp_neighbor,
+ show_bgp_json)
+
+ logger.info("Exiting lib API: verify_bgp_timers_and_functionality()")
+ return True
+
+
+def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict,
+ attribute):
+ """
+ API is to verify best path according to BGP attributes for given routes.
+ "show bgp ipv4/6 json" command will be run and verify best path according
+ to shortest as-path, highest local-preference and med, lowest weight and
+ route origin IGP>EGP>INCOMPLETE.
+
+ Parameters
+ ----------
+ * `tgen` : topogen object
+ * `addr_type` : ip type, ipv4/ipv6
+ * `tgen` : topogen object
+ * `attribute` : calculate best path using this attribute
+ * `input_dict`: defines different routes to calculate for which route
+ best path is selected
+
+ Usage
+ -----
+ # To verify best path for routes 200.50.2.0/32 and 200.60.2.0/32 from
+ router r7 to router r1(DUT) as per shortest as-path attribute
+ input_dict = {
+ "r7": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ attribute = "localpref"
+ result = verify_best_path_as_per_bgp_attribute(tgen, "ipv4", dut, \
+ input_dict, attribute)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: verify_best_path_as_per_bgp_attribute()")
+ if router not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[router]
+
+ # TODO get addr_type from address
+ # Verifying show bgp json
+ command = "show bgp {} json".format(addr_type)
+
+ sleep(2)
+ logger.info("Verifying router %s RIB for best path:", router)
+ sh_ip_bgp_json = rnode.vtysh_cmd(command, isjson=True)
+
+ for route_val in input_dict.values():
+ net_data = route_val["bgp"]["address_family"]["ipv4"]["unicast"]
+ networks = net_data["advertise_networks"]
+ for network in networks:
+ route = network["network"]
+
+ route_attributes = sh_ip_bgp_json["routes"][route]
+ _next_hop = None
+ compare = None
+ attribute_dict = {}
+ for route_attribute in route_attributes:
+ next_hops = route_attribute["nexthops"]
+ for next_hop in next_hops:
+ next_hop_ip = next_hop["ip"]
+ attribute_dict[next_hop_ip] = route_attribute[attribute]
+
+ # AS_PATH attribute
+ if attribute == "aspath":
+ # Find next_hop for the route have minimum as_path
+ _next_hop = min(attribute_dict, key=lambda x: len(set(
+ attribute_dict[x])))
+ compare = "SHORTEST"
+
+ # LOCAL_PREF attribute
+ elif attribute == "localpref":
+ # Find next_hop for the route have highest local preference
+ _next_hop = max(attribute_dict, key=(lambda k:
+ attribute_dict[k]))
+ compare = "HIGHEST"
+
+ # WEIGHT attribute
+ elif attribute == "weight":
+ # Find next_hop for the route have highest weight
+ _next_hop = max(attribute_dict, key=(lambda k:
+ attribute_dict[k]))
+ compare = "HIGHEST"
+
+ # ORIGIN attribute
+ elif attribute == "origin":
+ # Find next_hop for the route have IGP as origin, -
+ # - rule is IGP>EGP>INCOMPLETE
+ _next_hop = [key for (key, value) in
+ attribute_dict.iteritems()
+ if value == "IGP"][0]
+ compare = ""
+
+ # MED attribute
+ elif attribute == "med":
+ # Find next_hop for the route have LOWEST MED
+ _next_hop = min(attribute_dict, key=(lambda k:
+ attribute_dict[k]))
+ compare = "LOWEST"
+
+ # Show ip route
+ if addr_type == "ipv4":
+ command = "show ip route json"
+ else:
+ command = "show ipv6 route json"
+
+ rib_routes_json = rnode.vtysh_cmd(command, isjson=True)
+
+ # Verifying output dictionary rib_routes_json is not empty
+ if not bool(rib_routes_json):
+ errormsg = "No route found in RIB of router {}..". \
+ format(router)
+ return errormsg
+
+ st_found = False
+ nh_found = False
+ # Find best is installed in RIB
+ if route in rib_routes_json:
+ st_found = True
+ # Verify next_hop in rib_routes_json
+ if rib_routes_json[route][0]["nexthops"][0]["ip"] == \
+ _next_hop:
+ nh_found = True
+ else:
+ errormsg = "Incorrect Nexthop for BGP route {} in " \
+ "RIB of router {}, Expected: {}, Found:" \
+ " {}\n".format(route, router,
+ rib_routes_json[route][0][
+ "nexthops"][0]["ip"],
+ _next_hop)
+ return errormsg
+
+ if st_found and nh_found:
+ logger.info(
+ "Best path for prefix: %s with next_hop: %s is "
+ "installed according to %s %s: (%s) in RIB of "
+ "router %s", route, _next_hop, compare,
+ attribute, attribute_dict[_next_hop], router)
+
+ logger.debug("Exiting lib API: verify_best_path_as_per_bgp_attribute()")
+ return True
+
+
+def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict,
+ attribute):
+ """
+ API is to verify best path according to admin distance for given
+ route. "show ip/ipv6 route json" command will be run and verify
+ best path accoring to shortest admin distanc.
+
+ Parameters
+ ----------
+ * `addr_type` : ip type, ipv4/ipv6
+ * `dut`: Device Under Test
+ * `tgen` : topogen object
+ * `attribute` : calculate best path using admin distance
+ * `input_dict`: defines different routes with different admin distance
+ to calculate for which route best path is selected
+ Usage
+ -----
+ # To verify best path for route 200.50.2.0/32 from router r2 to
+ router r1(DUT) as per shortest admin distance which is 60.
+ input_dict = {
+ "r2": {
+ "static_routes": [{"network": "200.50.2.0/32", \
+ "admin_distance": 80, "next_hop": "10.0.0.14"},
+ {"network": "200.50.2.0/32", \
+ "admin_distance": 60, "next_hop": "10.0.0.18"}]
+ }}
+ attribute = "localpref"
+ result = verify_best_path_as_per_admin_distance(tgen, "ipv4", dut, \
+ input_dict, attribute):
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: verify_best_path_as_per_admin_distance()")
+ router_list = tgen.routers()
+ if router not in router_list:
+ return False
+
+ rnode = tgen.routers()[router]
+
+ sleep(2)
+ logger.info("Verifying router %s RIB for best path:", router)
+
+ # Show ip route cmd
+ if addr_type == "ipv4":
+ command = "show ip route json"
+ else:
+ command = "show ipv6 route json"
+
+ for routes_from_router in input_dict.keys():
+ sh_ip_route_json = router_list[routes_from_router].vtysh_cmd(
+ command, isjson=True)
+ networks = input_dict[routes_from_router]["static_routes"]
+ for network in networks:
+ route = network["network"]
+
+ route_attributes = sh_ip_route_json[route]
+ _next_hop = None
+ compare = None
+ attribute_dict = {}
+ for route_attribute in route_attributes:
+ next_hops = route_attribute["nexthops"]
+ for next_hop in next_hops:
+ next_hop_ip = next_hop["ip"]
+ attribute_dict[next_hop_ip] = route_attribute["distance"]
+
+ # Find next_hop for the route have LOWEST Admin Distance
+ _next_hop = min(attribute_dict, key=(lambda k:
+ attribute_dict[k]))
+ compare = "LOWEST"
+
+ # Show ip route
+ rib_routes_json = rnode.vtysh_cmd(command, isjson=True)
+
+ # Verifying output dictionary rib_routes_json is not empty
+ if not bool(rib_routes_json):
+ errormsg = "No route found in RIB of router {}..".format(router)
+ return errormsg
+
+ st_found = False
+ nh_found = False
+ # Find best is installed in RIB
+ if route in rib_routes_json:
+ st_found = True
+ # Verify next_hop in rib_routes_json
+ if rib_routes_json[route][0]["nexthops"][0]["ip"] == \
+ _next_hop:
+ nh_found = True
+ else:
+ errormsg = ("Nexthop {} is Missing for BGP route {}"
+ " in RIB of router {}\n".format(_next_hop,
+ route, router))
+ return errormsg
+
+ if st_found and nh_found:
+ logger.info("Best path for prefix: %s is installed according"
+ " to %s %s: (%s) in RIB of router %s", route,
+ compare, attribute,
+ attribute_dict[_next_hop], router)
+
+ logger.info(
+ "Exiting lib API: verify_best_path_as_per_admin_distance()")
+ return True
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
new file mode 100644
index 0000000000..d2c1d82430
--- /dev/null
+++ b/tests/topotests/lib/common_config.py
@@ -0,0 +1,1391 @@
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+from collections import OrderedDict
+from datetime import datetime
+from time import sleep
+from subprocess import call
+from subprocess import STDOUT as SUB_STDOUT
+import StringIO
+import os
+import ConfigParser
+import traceback
+import socket
+import ipaddr
+
+from lib.topolog import logger, logger_config
+from lib.topogen import TopoRouter
+
+
+FRRCFG_FILE = "frr_json.conf"
+FRRCFG_BKUP_FILE = "frr_json_initial.conf"
+
+ERROR_LIST = ["Malformed", "Failure", "Unknown"]
+
+####
+CD = os.path.dirname(os.path.realpath(__file__))
+PYTESTINI_PATH = os.path.join(CD, "../pytest.ini")
+
+# Creating tmp dir with testsuite name to avoid conflict condition when
+# multiple testsuites run together. All temporary files would be created
+# in this dir and this dir would be removed once testsuite run is
+# completed
+LOGDIR = "/tmp/topotests/"
+TMPDIR = None
+
+# NOTE: to save execution logs to log file frrtest_log_dir must be configured
+# in `pytest.ini`.
+config = ConfigParser.ConfigParser()
+config.read(PYTESTINI_PATH)
+
+config_section = "topogen"
+
+if config.has_option("topogen", "verbosity"):
+ loglevel = config.get("topogen", "verbosity")
+ loglevel = loglevel.upper()
+else:
+ loglevel = "INFO"
+
+if config.has_option("topogen", "frrtest_log_dir"):
+ frrtest_log_dir = config.get("topogen", "frrtest_log_dir")
+ time_stamp = datetime.time(datetime.now())
+ logfile_name = "frr_test_bgp_"
+ frrtest_log_file = frrtest_log_dir + logfile_name + str(time_stamp)
+ print("frrtest_log_file..", frrtest_log_file)
+
+ logger = logger_config.get_logger(name="test_execution_logs",
+ log_level=loglevel,
+ target=frrtest_log_file)
+ print("Logs will be sent to logfile: {}".format(frrtest_log_file))
+
+if config.has_option("topogen", "show_router_config"):
+ show_router_config = config.get("topogen", "show_router_config")
+else:
+ show_router_config = False
+
+# env variable for setting what address type to test
+ADDRESS_TYPES = os.environ.get("ADDRESS_TYPES")
+
+
+# Saves sequence id numbers
+SEQ_ID = {
+ "prefix_lists": {},
+ "route_maps": {}
+}
+
+
+def get_seq_id(obj_type, router, obj_name):
+ """
+ Generates and saves sequence number in interval of 10
+
+ Parameters
+ ----------
+ * `obj_type`: prefix_lists or route_maps
+ * `router`: router name
+ *` obj_name`: name of the prefix-list or route-map
+
+ Returns
+ --------
+ Sequence number generated
+ """
+
+ router_data = SEQ_ID[obj_type].setdefault(router, {})
+ obj_data = router_data.setdefault(obj_name, {})
+ seq_id = obj_data.setdefault("seq_id", 0)
+
+ seq_id = int(seq_id) + 10
+ obj_data["seq_id"] = seq_id
+
+ return seq_id
+
+
+def set_seq_id(obj_type, router, id, obj_name):
+ """
+ Saves sequence number if not auto-generated and given by user
+
+ Parameters
+ ----------
+ * `obj_type`: prefix_lists or route_maps
+ * `router`: router name
+ *` obj_name`: name of the prefix-list or route-map
+ """
+ router_data = SEQ_ID[obj_type].setdefault(router, {})
+ obj_data = router_data.setdefault(obj_name, {})
+ seq_id = obj_data.setdefault("seq_id", 0)
+
+ seq_id = int(seq_id) + int(id)
+ obj_data["seq_id"] = seq_id
+
+
+class InvalidCLIError(Exception):
+ """Raise when the CLI command is wrong"""
+ pass
+
+
+def create_common_configuration(tgen, router, data, config_type=None,
+ build=False):
+ """
+ API to create object of class FRRConfig and also create frr_json.conf
+ file. It will create interface and common configurations and save it to
+ frr_json.conf and load to router
+
+ Parameters
+ ----------
+ * `tgen`: tgen onject
+ * `data`: Congiguration data saved in a list.
+ * `router` : router id to be configured.
+ * `config_type` : Syntactic information while writing configuration. Should
+ be one of the value as mentioned in the config_map below.
+ * `build` : Only for initial setup phase this is set as True
+
+ Returns
+ -------
+ True or False
+ """
+ TMPDIR = os.path.join(LOGDIR, tgen.modname)
+
+ fname = "{}/{}/{}".format(TMPDIR, router, FRRCFG_FILE)
+
+ config_map = OrderedDict({
+ "general_config": "! FRR General Config\n",
+ "interface_config": "! Interfaces Config\n",
+ "static_route": "! Static Route Config\n",
+ "prefix_list": "! Prefix List Config\n",
+ "route_maps": "! Route Maps Config\n",
+ "bgp": "! BGP Config\n"
+ })
+
+ if build:
+ mode = "a"
+ else:
+ mode = "w"
+
+ try:
+ frr_cfg_fd = open(fname, mode)
+ if config_type:
+ frr_cfg_fd.write(config_map[config_type])
+ for line in data:
+ frr_cfg_fd.write("{} \n".format(str(line)))
+
+ except IOError as err:
+ logger.error("Unable to open FRR Config File. error(%s): %s" %
+ (err.errno, err.strerror))
+ return False
+ finally:
+ frr_cfg_fd.close()
+
+ # If configuration applied from build, it will done at last
+ if not build:
+ load_config_to_router(tgen, router)
+
+ return True
+
+
+def reset_config_on_routers(tgen, routerName=None):
+ """
+ Resets configuration on routers to the snapshot created using input JSON
+ file. It replaces existing router configuration with FRRCFG_BKUP_FILE
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `routerName` : router config is to be reset
+ """
+
+ logger.debug("Entering API: reset_config_on_routers")
+
+ router_list = tgen.routers()
+ for rname, router in router_list.iteritems():
+ if routerName and routerName != rname:
+ continue
+
+ cfg = router.run("vtysh -c 'show running'")
+ fname = "{}/{}/frr.sav".format(TMPDIR, rname)
+ dname = "{}/{}/delta.conf".format(TMPDIR, rname)
+ f = open(fname, "w")
+ for line in cfg.split("\n"):
+ line = line.strip()
+
+ if (line == "Building configuration..." or
+ line == "Current configuration:" or
+ not line):
+ continue
+ f.write(line)
+ f.write("\n")
+
+ f.close()
+
+ command = "/usr/lib/frr/frr-reload.py --input {}/{}/frr.sav" \
+ " --test {}/{}/frr_json_initial.conf > {}". \
+ format(TMPDIR, rname, TMPDIR, rname, dname)
+ result = call(command, shell=True, stderr=SUB_STDOUT)
+
+ # Assert if command fail
+ if result > 0:
+ errormsg = ("Command:{} is failed due to non-zero exit"
+ " code".format(command))
+ return errormsg
+
+ f = open(dname, "r")
+ delta = StringIO.StringIO()
+ delta.write("configure terminal\n")
+ t_delta = f.read()
+ for line in t_delta.split("\n"):
+ line = line.strip()
+ if (line == "Lines To Delete" or
+ line == "===============" or
+ line == "Lines To Add" or
+ line == "============" or
+ not line):
+ continue
+ delta.write(line)
+ delta.write("\n")
+
+ delta.write("end\n")
+ output = router.vtysh_multicmd(delta.getvalue(),
+ pretty_output=False)
+ logger.info("New configuration for router {}:".format(rname))
+ delta.close()
+ delta = StringIO.StringIO()
+ cfg = router.run("vtysh -c 'show running'")
+ for line in cfg.split("\n"):
+ line = line.strip()
+ delta.write(line)
+ delta.write("\n")
+
+ # Router current configuration to log file or console if
+ # "show_router_config" is defined in "pytest.ini"
+ if show_router_config:
+ logger.info(delta.getvalue())
+ delta.close()
+
+ logger.debug("Exting API: reset_config_on_routers")
+ return True
+
+
+def load_config_to_router(tgen, routerName, save_bkup=False):
+ """
+ Loads configuration on router from the file FRRCFG_FILE.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `routerName` : router for which configuration to be loaded
+ * `save_bkup` : If True, Saves snapshot of FRRCFG_FILE to FRRCFG_BKUP_FILE
+ """
+
+ logger.debug("Entering API: load_config_to_router")
+
+ router_list = tgen.routers()
+ for rname, router in router_list.iteritems():
+ if rname == routerName:
+ try:
+ frr_cfg_file = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_FILE)
+ frr_cfg_bkup = "{}/{}/{}".format(TMPDIR, rname,
+ FRRCFG_BKUP_FILE)
+ with open(frr_cfg_file, "r") as cfg:
+ data = cfg.read()
+ if save_bkup:
+ with open(frr_cfg_bkup, "w") as bkup:
+ bkup.write(data)
+
+ output = router.vtysh_multicmd(data, pretty_output=False)
+ for out_err in ERROR_LIST:
+ if out_err.lower() in output.lower():
+ raise InvalidCLIError("%s" % output)
+ except IOError as err:
+ errormsg = ("Unable to open config File. error(%s):"
+ " %s", (err.errno, err.strerror))
+ return errormsg
+
+ logger.info("New configuration for router {}:".format(rname))
+ new_config = router.run("vtysh -c 'show running'")
+
+ # Router current configuration to log file or console if
+ # "show_router_config" is defined in "pytest.ini"
+ if show_router_config:
+ logger.info(new_config)
+
+ logger.debug("Exting API: load_config_to_router")
+ return True
+
+
+def start_topology(tgen):
+ """
+ Starting topology, create tmp files which are loaded to routers
+ to start deamons and then start routers
+ * `tgen` : topogen object
+ """
+
+ global TMPDIR
+ # Starting topology
+ tgen.start_topology()
+
+ # Starting deamons
+ router_list = tgen.routers()
+ TMPDIR = os.path.join(LOGDIR, tgen.modname)
+
+ for rname, router in router_list.iteritems():
+ try:
+ os.chdir(TMPDIR)
+
+ # Creating rouer named dir and empty zebra.conf bgpd.conf files
+ # inside the current directory
+
+ if os.path.isdir('{}'.format(rname)):
+ os.system("rm -rf {}".format(rname))
+ os.mkdir('{}'.format(rname))
+ os.system('chmod -R go+rw {}'.format(rname))
+ os.chdir('{}/{}'.format(TMPDIR, rname))
+ os.system('touch zebra.conf bgpd.conf')
+ else:
+ os.mkdir('{}'.format(rname))
+ os.system('chmod -R go+rw {}'.format(rname))
+ os.chdir('{}/{}'.format(TMPDIR, rname))
+ os.system('touch zebra.conf bgpd.conf')
+
+ except IOError as (errno, strerror):
+ logger.error("I/O error({0}): {1}".format(errno, strerror))
+
+ # Loading empty zebra.conf file to router, to start the zebra deamon
+ router.load_config(
+ TopoRouter.RD_ZEBRA,
+ '{}/{}/zebra.conf'.format(TMPDIR, rname)
+ # os.path.join(tmpdir, '{}/zebra.conf'.format(rname))
+ )
+ # Loading empty bgpd.conf file to router, to start the bgp deamon
+ router.load_config(
+ TopoRouter.RD_BGP,
+ '{}/{}/bgpd.conf'.format(TMPDIR, rname)
+ # os.path.join(tmpdir, '{}/bgpd.conf'.format(rname))
+ )
+
+ # Starting routers
+ logger.info("Starting all routers once topology is created")
+ tgen.start_router()
+
+
+def number_to_row(routerName):
+ """
+ Returns the number for the router.
+ Calculation based on name a0 = row 0, a1 = row 1, b2 = row 2, z23 = row 23
+ etc
+ """
+ return int(routerName[1:])
+
+
+def number_to_column(routerName):
+ """
+ Returns the number for the router.
+ Calculation based on name a0 = columnn 0, a1 = column 0, b2= column 1,
+ z23 = column 26 etc
+ """
+ return ord(routerName[0]) - 97
+
+
+#############################################
+# Common APIs, will be used by all protocols
+#############################################
+
+def validate_ip_address(ip_address):
+ """
+ Validates the type of ip address
+
+ Parameters
+ ----------
+ * `ip_address`: IPv4/IPv6 address
+
+ Returns
+ -------
+ Type of address as string
+ """
+
+ if "/" in ip_address:
+ ip_address = ip_address.split("/")[0]
+
+ v4 = True
+ v6 = True
+ try:
+ socket.inet_aton(ip_address)
+ except socket.error as error:
+ logger.debug("Not a valid IPv4 address")
+ v4 = False
+ else:
+ return "ipv4"
+
+ try:
+ socket.inet_pton(socket.AF_INET6, ip_address)
+ except socket.error as error:
+ logger.debug("Not a valid IPv6 address")
+ v6 = False
+ else:
+ return "ipv6"
+
+ if not v4 and not v6:
+ raise Exception("InvalidIpAddr", "%s is neither valid IPv4 or IPv6"
+ " address" % ip_address)
+
+
+def check_address_types(addr_type):
+ """
+ Checks environment variable set and compares with the current address type
+ """
+ global ADDRESS_TYPES
+ if ADDRESS_TYPES is None:
+ ADDRESS_TYPES = "dual"
+
+ if ADDRESS_TYPES == "dual":
+ ADDRESS_TYPES = ["ipv4", "ipv6"]
+ elif ADDRESS_TYPES == "ipv4":
+ ADDRESS_TYPES = ["ipv4"]
+ elif ADDRESS_TYPES == "ipv6":
+ ADDRESS_TYPES = ["ipv6"]
+
+ if addr_type not in ADDRESS_TYPES:
+ logger.error("{} not in supported/configured address types {}".
+ format(addr_type, ADDRESS_TYPES))
+ return False
+
+ return ADDRESS_TYPES
+
+
+def generate_ips(network, no_of_ips):
+ """
+ Returns list of IPs.
+ based on start_ip and no_of_ips
+
+ * `network` : from here the ip will start generating, start_ip will be
+ first ip
+ * `no_of_ips` : these many IPs will be generated
+
+ Limitation: It will generate IPs only for ip_mask 32
+
+ """
+ ipaddress_list = []
+ if type(network) is not list:
+ network = [network]
+
+ for start_ipaddr in network:
+ if "/" in start_ipaddr:
+ start_ip = start_ipaddr.split("/")[0]
+ mask = int(start_ipaddr.split("/")[1])
+
+ addr_type = validate_ip_address(start_ip)
+ if addr_type == "ipv4":
+ start_ip = ipaddr.IPv4Address(unicode(start_ip))
+ step = 2 ** (32 - mask)
+ if addr_type == "ipv6":
+ start_ip = ipaddr.IPv6Address(unicode(start_ip))
+ step = 2 ** (128 - mask)
+
+ next_ip = start_ip
+ count = 0
+ while count < no_of_ips:
+ ipaddress_list.append("{}/{}".format(next_ip, mask))
+ if addr_type == "ipv6":
+ next_ip = ipaddr.IPv6Address(int(next_ip) + step)
+ else:
+ next_ip += step
+ count += 1
+
+ return ipaddress_list
+
+
+def find_interface_with_greater_ip(topo, router, loopback=True,
+ interface=True):
+ """
+ Returns highest interface ip for ipv4/ipv6. If loopback is there then
+ it will return highest IP from loopback IPs otherwise from physical
+ interface IPs.
+
+ * `topo` : json file data
+ * `router` : router for which hightest interface should be calculated
+ """
+
+ link_data = topo["routers"][router]["links"]
+ lo_list = []
+ interfaces_list = []
+ lo_exists = False
+ for destRouterLink, data in sorted(link_data.iteritems()):
+ if loopback:
+ if "type" in data and data["type"] == "loopback":
+ lo_exists = True
+ ip_address = topo["routers"][router]["links"][
+ destRouterLink]["ipv4"].split("/")[0]
+ lo_list.append(ip_address)
+ if interface:
+ ip_address = topo["routers"][router]["links"][
+ destRouterLink]["ipv4"].split("/")[0]
+ interfaces_list.append(ip_address)
+
+ if lo_exists:
+ return sorted(lo_list)[-1]
+
+ return sorted(interfaces_list)[-1]
+
+
+def write_test_header(tc_name):
+ """ Display message at beginning of test case"""
+ count = 20
+ logger.info("*"*(len(tc_name)+count))
+ logger.info("START -> Testcase : %s", tc_name)
+ logger.info("*"*(len(tc_name)+count))
+
+
+def write_test_footer(tc_name):
+ """ Display message at end of test case"""
+ count = 21
+ logger.info("="*(len(tc_name)+count))
+ logger.info("PASSED -> Testcase : %s", tc_name)
+ logger.info("="*(len(tc_name)+count))
+
+
+#############################################
+# These APIs, will used by testcase
+#############################################
+def create_interfaces_cfg(tgen, topo, build=False):
+ """
+ Create interface configuration for created topology. Basic Interface
+ configuration is provided in input json file.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `build` : Only for initial setup phase this is set as True.
+
+ Returns
+ -------
+ True or False
+ """
+ result = False
+
+ try:
+ for c_router, c_data in topo.iteritems():
+ interface_data = []
+ for destRouterLink, data in sorted(c_data["links"].iteritems()):
+ # Loopback interfaces
+ if "type" in data and data["type"] == "loopback":
+ interface_name = destRouterLink
+ else:
+ interface_name = data["interface"]
+ interface_data.append("interface {}\n".format(
+ str(interface_name)
+ ))
+ if "ipv4" in data:
+ intf_addr = c_data["links"][destRouterLink]["ipv4"]
+ interface_data.append("ip address {}\n".format(
+ intf_addr
+ ))
+ if "ipv6" in data:
+ intf_addr = c_data["links"][destRouterLink]["ipv6"]
+ interface_data.append("ipv6 address {}\n".format(
+ intf_addr
+ ))
+ result = create_common_configuration(tgen, c_router,
+ interface_data,
+ "interface_config",
+ build=build)
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ return result
+
+
+def create_static_routes(tgen, input_dict, build=False):
+ """
+ Create static routes for given router as defined in input_dict
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+
+ Usage
+ -----
+ input_dict should be in the format below:
+ # static_routes: list of all routes
+ # network: network address
+ # no_of_ip: number of next-hop address that will be configured
+ # admin_distance: admin distance for route/routes.
+ # next_hop: starting next-hop address
+ # tag: tag id for static routes
+ # delete: True if config to be removed. Default False.
+
+ Example:
+ "routers": {
+ "r1": {
+ "static_routes": [
+ {
+ "network": "100.0.20.1/32",
+ "no_of_ip": 9,
+ "admin_distance": 100,
+ "next_hop": "10.0.0.1",
+ "tag": 4001
+ "delete": true
+ }
+ ]
+ }
+ }
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+ result = False
+ logger.debug("Entering lib API: create_static_routes()")
+ try:
+ for router in input_dict.keys():
+ if "static_routes" not in input_dict[router]:
+ errormsg = "static_routes not present in input_dict"
+ logger.info(errormsg)
+ continue
+
+ static_routes_list = []
+
+ static_routes = input_dict[router]["static_routes"]
+ for static_route in static_routes:
+ del_action = static_route.setdefault("delete", False)
+ # No of IPs
+ no_of_ip = static_route.setdefault("no_of_ip", 1)
+ admin_distance = static_route.setdefault("admin_distance",
+ None)
+ tag = static_route.setdefault("tag", None)
+ if "next_hop" not in static_route or \
+ "network" not in static_route:
+ errormsg = "'next_hop' or 'network' missing in" \
+ " input_dict"
+ return errormsg
+
+ next_hop = static_route["next_hop"]
+ network = static_route["network"]
+ ip_list = generate_ips([network], no_of_ip)
+ for ip in ip_list:
+ addr_type = validate_ip_address(ip)
+ if addr_type == "ipv4":
+ cmd = "ip route {} {}".format(ip, next_hop)
+ else:
+ cmd = "ipv6 route {} {}".format(ip, next_hop)
+
+ if tag:
+ cmd = "{} {}".format(cmd, str(tag))
+ if admin_distance:
+ cmd = "{} {}".format(cmd, admin_distance)
+
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ static_routes_list.append(cmd)
+
+ result = create_common_configuration(tgen, router,
+ static_routes_list,
+ "static_route",
+ build=build)
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: create_static_routes()")
+ return result
+
+
+def create_prefix_lists(tgen, input_dict, build=False):
+ """
+ Create ip prefix lists as per the config provided in input
+ JSON or input_dict
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+
+ Usage
+ -----
+ # pf_lists_1: name of prefix-list, user defined
+ # seqid: prefix-list seqid, auto-generated if not given by user
+ # network: criteria for applying prefix-list
+ # action: permit/deny
+ # le: less than or equal number of bits
+ # ge: greater than or equal number of bits
+
+ Example
+ -------
+ input_dict = {
+ "r1": {
+ "prefix_lists":{
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": 10,
+ "network": "any",
+ "action": "permit",
+ "le": "32",
+ "ge": "30",
+ "delete": True
+ }
+ ]
+ }
+ }
+ }
+ }
+
+ Returns
+ -------
+ errormsg or True
+ """
+
+ logger.debug("Entering lib API: create_prefix_lists()")
+ result = False
+ try:
+ for router in input_dict.keys():
+ if "prefix_lists" not in input_dict[router]:
+ errormsg = "prefix_lists not present in input_dict"
+ logger.info(errormsg)
+ continue
+
+ config_data = []
+ prefix_lists = input_dict[router]["prefix_lists"]
+ for addr_type, prefix_data in prefix_lists.iteritems():
+ if not check_address_types(addr_type):
+ continue
+
+ for prefix_name, prefix_list in prefix_data.iteritems():
+ for prefix_dict in prefix_list:
+ if "action" not in prefix_dict or \
+ "network" not in prefix_dict:
+ errormsg = "'action' or network' missing in" \
+ " input_dict"
+ return errormsg
+
+ network_addr = prefix_dict["network"]
+ action = prefix_dict["action"]
+ le = prefix_dict.setdefault("le", None)
+ ge = prefix_dict.setdefault("ge", None)
+ seqid = prefix_dict.setdefault("seqid", None)
+ del_action = prefix_dict.setdefault("delete", False)
+ if seqid is None:
+ seqid = get_seq_id("prefix_lists", router,
+ prefix_name)
+ else:
+ set_seq_id("prefix_lists", router, seqid,
+ prefix_name)
+
+ if addr_type == "ipv4":
+ protocol = "ip"
+ else:
+ protocol = "ipv6"
+
+ cmd = "{} prefix-list {} seq {} {} {}".format(
+ protocol, prefix_name, seqid, action, network_addr
+ )
+ if le:
+ cmd = "{} le {}".format(cmd, le)
+ if ge:
+ cmd = "{} ge {}".format(cmd, ge)
+
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+ result = create_common_configuration(tgen, router,
+ config_data,
+ "prefix_list",
+ build=build)
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: create_prefix_lists()")
+ return result
+
+
+def create_route_maps(tgen, input_dict, build=False):
+ """
+ Create route-map on the devices as per the arguments passed
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+
+ Usage
+ -----
+ # route_maps: key, value pair for route-map name and its attribute
+ # rmap_match_prefix_list_1: user given name for route-map
+ # action: PERMIT/DENY
+ # match: key,value pair for match criteria. prefix_list, community-list,
+ large-community-list or tag. Only one option at a time.
+ # prefix_list: name of prefix list
+ # large-community-list: name of large community list
+ # community-ist: name of community list
+ # tag: tag id for static routes
+ # set: key, value pair for modifying route attributes
+ # localpref: preference value for the network
+ # med: metric value advertised for AS
+ # aspath: set AS path value
+ # weight: weight for the route
+ # community: standard community value to be attached
+ # large_community: large community value to be attached
+ # community_additive: if set to "additive", adds community/large-community
+ value to the existing values of the network prefix
+
+ Example:
+ --------
+ input_dict = {
+ "r1": {
+ "route_maps": {
+ "rmap_match_prefix_list_1": [
+ {
+ "action": "PERMIT",
+ "match": {
+ "ipv4": {
+ "prefix_list": "pf_list_1"
+ }
+ "ipv6": {
+ "prefix_list": "pf_list_1"
+ }
+
+ "large-community-list": "{
+ "id": "community_1",
+ "exact_match": True
+ }
+ "community": {
+ "id": "community_2",
+ "exact_match": True
+ }
+ "tag": "tag_id"
+ },
+ "set": {
+ "localpref": 150,
+ "med": 30,
+ "aspath": {
+ "num": 20000,
+ "action": "prepend",
+ },
+ "weight": 500,
+ "community": {
+ "num": "1:2 2:3",
+ "action": additive
+ }
+ "large_community": {
+ "num": "1:2:3 4:5;6",
+ "action": additive
+ },
+ }
+ }
+ ]
+ }
+ }
+ }
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ result = False
+ logger.debug("Entering lib API: create_route_maps()")
+
+ try:
+ for router in input_dict.keys():
+ if "route_maps" not in input_dict[router]:
+ errormsg = "route_maps not present in input_dict"
+ logger.info(errormsg)
+ continue
+ rmap_data = []
+ for rmap_name, rmap_value in \
+ input_dict[router]["route_maps"].iteritems():
+
+ for rmap_dict in rmap_value:
+ del_action = rmap_dict.setdefault("delete", False)
+
+ if del_action:
+ rmap_data.append("no route-map {}".format(rmap_name))
+ continue
+
+ if "action" not in rmap_dict:
+ errormsg = "action not present in input_dict"
+ logger.error(errormsg)
+ return False
+
+ rmap_action = rmap_dict.setdefault("action", "deny")
+
+ seq_id = rmap_dict.setdefault("seq_id", None)
+ if seq_id is None:
+ seq_id = get_seq_id("route_maps", router, rmap_name)
+ else:
+ set_seq_id("route_maps", router, seq_id, rmap_name)
+
+ rmap_data.append("route-map {} {} {}".format(
+ rmap_name, rmap_action, seq_id
+ ))
+
+ # Verifying if SET criteria is defined
+ if "set" in rmap_dict:
+ set_data = rmap_dict["set"]
+
+ local_preference = set_data.setdefault("localpref",
+ None)
+ metric = set_data.setdefault("med", None)
+ as_path = set_data.setdefault("aspath", {})
+ weight = set_data.setdefault("weight", None)
+ community = set_data.setdefault("community", {})
+ large_community = set_data.setdefault(
+ "large_community", {})
+ set_action = set_data.setdefault("set_action", None)
+
+ # Local Preference
+ if local_preference:
+ rmap_data.append("set local-preference {}".
+ format(local_preference))
+
+ # Metric
+ if metric:
+ rmap_data.append("set metric {} \n".format(metric))
+
+ # AS Path Prepend
+ if as_path:
+ as_num = as_path.setdefault("as_num", None)
+ as_action = as_path.setdefault("as_action", None)
+ if as_action and as_num:
+ rmap_data.append("set as-path {} {}".
+ format(as_action, as_num))
+
+ # Community
+ if community:
+ num = community.setdefault("num", None)
+ comm_action = community.setdefault("action", None)
+ if num:
+ cmd = "set community {}".format(num)
+ if comm_action:
+ cmd = "{} {}".format(cmd, comm_action)
+ rmap_data.append(cmd)
+ else:
+ logger.error("In community, AS Num not"
+ " provided")
+ return False
+
+ if large_community:
+ num = large_community.setdefault("num", None)
+ comm_action = large_community.setdefault("action",
+ None)
+ if num:
+ cmd = "set large-community {}".format(num)
+ if comm_action:
+ cmd = "{} {}".format(cmd, comm_action)
+
+ rmap_data.append(cmd)
+ else:
+ logger.errror("In large_community, AS Num not"
+ " provided")
+ return False
+
+ # Weight
+ if weight:
+ rmap_data.append("set weight {} \n".format(
+ weight))
+
+ # Adding MATCH and SET sequence to RMAP if defined
+ if "match" in rmap_dict:
+ match_data = rmap_dict["match"]
+ ipv4_data = match_data.setdefault("ipv4", {})
+ ipv6_data = match_data.setdefault("ipv6", {})
+ community = match_data.setdefault("community-list",
+ {})
+ large_community = match_data.setdefault(
+ "large-community-list", {}
+ )
+ tag = match_data.setdefault("tag", None)
+
+ if ipv4_data:
+ prefix_name = ipv4_data.setdefault("prefix_lists",
+ None)
+ if prefix_name:
+ rmap_data.append("match ip address prefix-list"
+ " {}".format(prefix_name))
+ if ipv6_data:
+ prefix_name = ipv6_data.setdefault("prefix_lists",
+ None)
+ if prefix_name:
+ rmap_data.append("match ipv6 address "
+ "prefix-list {}".
+ format(prefix_name))
+ if tag:
+ rmap_data.append("match tag {}".format(tag))
+
+ if community:
+ if "id" not in community:
+ logger.error("'id' is mandatory for "
+ "community-list in match"
+ " criteria")
+ return False
+ cmd = "match community {}".format(community["id"])
+ exact_match = community.setdefault("exact_match",
+ False)
+ if exact_match:
+ cmd = "{} exact-match".format(cmd)
+
+ rmap_data.append(cmd)
+
+ if large_community:
+ if "id" not in large_community:
+ logger.error("'num' is mandatory for "
+ "large-community-list in match "
+ "criteria")
+ return False
+ cmd = "match large-community {}".format(
+ large_community["id"])
+ exact_match = large_community.setdefault(
+ "exact_match", False)
+ if exact_match:
+ cmd = "{} exact-match".format(cmd)
+
+ rmap_data.append(cmd)
+
+ result = create_common_configuration(tgen, router,
+ rmap_data,
+ "route_maps",
+ build=build)
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: create_prefix_lists()")
+ return result
+
+
+#############################################
+# Verification APIs
+#############################################
+def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
+ """
+ Data will be read from input_dict or input JSON file, API will generate
+ same prefixes, which were redistributed by either create_static_routes() or
+ advertise_networks_using_network_command() and do will verify next_hop and
+ each prefix/routes is present in "show ip/ipv6 route {bgp/stataic} json"
+ command o/p.
+
+ Parameters
+ ----------
+ * `tgen` : topogen object
+ * `addr_type` : ip type, ipv4/ipv6
+ * `dut`: Device Under Test, for which user wants to test the data
+ * `input_dict` : input dict, has details of static routes
+ * `next_hop`[optional]: next_hop which needs to be verified,
+ default: static
+ * `protocol`[optional]: protocol, default = None
+
+ Usage
+ -----
+ # RIB can be verified for static routes OR network advertised using
+ network command. Following are input_dicts to create static routes
+ and advertise networks using network command. Any one of the input_dict
+ can be passed to verify_rib() to verify routes in DUT"s RIB.
+
+ # Creating static routes for r1
+ input_dict = {
+ "r1": {
+ "static_routes": [{"network": "10.0.20.1/32", "no_of_ip": 9, \
+ "admin_distance": 100, "next_hop": "10.0.0.2", "tag": 4001}]
+ }}
+ # Advertising networks using network command in router r1
+ input_dict = {
+ "r1": {
+ "advertise_networks": [{"start_ip": "20.0.0.0/32",
+ "no_of_network": 10},
+ {"start_ip": "30.0.0.0/32"}]
+ }}
+ # Verifying ipv4 routes in router r1 learned via BGP
+ dut = "r2"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol = protocol)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: verify_rib()")
+
+ router_list = tgen.routers()
+ for routerInput in input_dict.keys():
+ for router, rnode in router_list.iteritems():
+ if router != dut:
+ continue
+
+ # Verifying RIB routes
+ if addr_type == "ipv4":
+ if protocol:
+ command = "show ip route {} json".format(protocol)
+ else:
+ command = "show ip route json"
+ else:
+ if protocol:
+ command = "show ipv6 route {} json".format(protocol)
+ else:
+ command = "show ipv6 route json"
+
+ sleep(2)
+ logger.info("Checking router %s RIB:", router)
+ rib_routes_json = rnode.vtysh_cmd(command, isjson=True)
+
+ # Verifying output dictionary rib_routes_json is not empty
+ if bool(rib_routes_json) is False:
+ errormsg = "No {} route found in rib of router {}..". \
+ format(protocol, router)
+ return errormsg
+
+ if "static_routes" in input_dict[routerInput]:
+ static_routes = input_dict[routerInput]["static_routes"]
+ st_found = False
+ nh_found = False
+ found_routes = []
+ missing_routes = []
+ for static_route in static_routes:
+ network = static_route["network"]
+ if "no_of_ip" in static_route:
+ no_of_ip = static_route["no_of_ip"]
+ else:
+ no_of_ip = 0
+
+ # Generating IPs for verification
+ ip_list = generate_ips(network, no_of_ip)
+ for st_rt in ip_list:
+ st_rt = str(ipaddr.IPNetwork(unicode(st_rt)))
+
+ if st_rt in rib_routes_json:
+ st_found = True
+ found_routes.append(st_rt)
+
+ if next_hop:
+ if type(next_hop) is not list:
+ next_hop = [next_hop]
+
+ found_hops = [rib_r["ip"] for rib_r in
+ rib_routes_json[st_rt][0][
+ "nexthops"]]
+ for nh in next_hop:
+ nh_found = False
+ if nh and nh in found_hops:
+ nh_found = True
+ else:
+ errormsg = ("Nexthop {} is Missing for {}"
+ " route {} in RIB of router"
+ " {}\n".format(next_hop,
+ protocol,
+ st_rt, dut))
+
+ return errormsg
+ else:
+ missing_routes.append(st_rt)
+ if nh_found:
+ logger.info("Found next_hop %s for all routes in RIB of"
+ " router %s\n", next_hop, dut)
+
+ if not st_found and len(missing_routes) > 0:
+ errormsg = "Missing route in RIB of router {}, routes: " \
+ "{}\n".format(dut, missing_routes)
+ return errormsg
+
+ logger.info("Verified routes in router %s RIB, found routes"
+ " are: %s\n", dut, found_routes)
+
+ advertise_network = input_dict[routerInput].setdefault(
+ "advertise_networks", {})
+ if advertise_network:
+ found_routes = []
+ missing_routes = []
+ found = False
+ for advertise_network_dict in advertise_network:
+ start_ip = advertise_network_dict["network"]
+ if "no_of_network" in advertise_network_dict:
+ no_of_network = advertise_network_dict["no_of_network"]
+ else:
+ no_of_network = 0
+
+ # Generating IPs for verification
+ ip_list = generate_ips(start_ip, no_of_network)
+ for st_rt in ip_list:
+ st_rt = str(ipaddr.IPNetwork(unicode(st_rt)))
+
+ if st_rt in rib_routes_json:
+ found = True
+ found_routes.append(st_rt)
+ else:
+ missing_routes.append(st_rt)
+
+ if not found and len(missing_routes) > 0:
+ errormsg = "Missing route in RIB of router {}, are: {}" \
+ " \n".format(dut, missing_routes)
+ return errormsg
+
+ logger.info("Verified routes in router %s RIB, found routes"
+ " are: %s", dut, found_routes)
+
+ logger.info("Exiting lib API: verify_rib()")
+ return True
+
+
+def verify_admin_distance_for_static_routes(tgen, input_dict):
+ """
+ API to verify admin distance for static routes as defined in input_dict/
+ input JSON by running show ip/ipv6 route json command.
+
+ Parameter
+ ---------
+ * `tgen` : topogen object
+ * `input_dict`: having details like - for which router and static routes
+ admin dsitance needs to be verified
+ Usage
+ -----
+ # To verify admin distance is 10 for prefix 10.0.20.1/32 having next_hop
+ 10.0.0.2 in router r1
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "admin_distance": 10,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = verify_admin_distance_for_static_routes(tgen, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: verify_admin_distance_for_static_routes()")
+
+ for router in input_dict.keys():
+ if router not in tgen.routers():
+ continue
+
+ rnode = tgen.routers()[router]
+
+ for static_route in input_dict[router]["static_routes"]:
+ addr_type = validate_ip_address(static_route["network"])
+ # Command to execute
+ if addr_type == "ipv4":
+ command = "show ip route json"
+ else:
+ command = "show ipv6 route json"
+ show_ip_route_json = rnode.vtysh_cmd(command, isjson=True)
+
+ logger.info("Verifying admin distance for static route %s"
+ " under dut %s:", static_route, router)
+ network = static_route["network"]
+ next_hop = static_route["next_hop"]
+ admin_distance = static_route["admin_distance"]
+ route_data = show_ip_route_json[network][0]
+ if network in show_ip_route_json:
+ if route_data["nexthops"][0]["ip"] == next_hop:
+ if route_data["distance"] != admin_distance:
+ errormsg = ("Verification failed: admin distance"
+ " for static route {} under dut {},"
+ " found:{} but expected:{}".
+ format(static_route, router,
+ route_data["distance"],
+ admin_distance))
+ return errormsg
+ else:
+ logger.info("Verification successful: admin"
+ " distance for static route %s under"
+ " dut %s, found:%s", static_route,
+ router, route_data["distance"])
+
+ else:
+ errormsg = ("Static route {} not found in "
+ "show_ip_route_json for dut {}".
+ format(network, router))
+ return errormsg
+
+ logger.info("Exiting lib API: verify_admin_distance_for_static_routes()")
+ return True
+
+
+def verify_prefix_lists(tgen, input_dict):
+ """
+ Running "show ip prefix-list" command and verifying given prefix-list
+ is present in router.
+
+ Parameters
+ ----------
+ * `tgen` : topogen object
+ * `input_dict`: data to verify prefix lists
+
+ Usage
+ -----
+ # To verify pf_list_1 is present in router r1
+ input_dict = {
+ "r1": {
+ "prefix_lists": ["pf_list_1"]
+ }}
+ result = verify_prefix_lists("ipv4", input_dict, tgen)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: verify_prefix_lists()")
+
+ for router in input_dict.keys():
+ if router not in tgen.routers():
+ continue
+
+ rnode = tgen.routers()[router]
+
+ # Show ip prefix list
+ show_prefix_list = rnode.vtysh_cmd("show ip prefix-list")
+
+ # Verify Prefix list is deleted
+ prefix_lists_addr = input_dict[router]["prefix_lists"]
+ for addr_type in prefix_lists_addr:
+ if not check_address_types(addr_type):
+ continue
+
+ for prefix_list in prefix_lists_addr[addr_type].keys():
+ if prefix_list in show_prefix_list:
+ errormsg = ("Prefix list {} is not deleted from router"
+ " {}".format(prefix_list, router))
+ return errormsg
+
+ logger.info("Prefix list %s is/are deleted successfully"
+ " from router %s", prefix_list, router)
+
+ logger.info("Exiting lib API: verify_prefix_lissts()")
+ return True
diff --git a/tests/topotests/lib/topojson.py b/tests/topotests/lib/topojson.py
new file mode 100644
index 0000000000..4130451d2e
--- /dev/null
+++ b/tests/topotests/lib/topojson.py
@@ -0,0 +1,193 @@
+#
+# Modified work Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Original work Copyright (c) 2018 by Network Device Education
+# Foundation, Inc. ("NetDEF")
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+from collections import OrderedDict
+from json import dumps as json_dumps
+import ipaddr
+import pytest
+
+# Import topogen and topotest helpers
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+from lib.common_config import (
+ number_to_row, number_to_column,
+ load_config_to_router,
+ create_interfaces_cfg,
+ create_static_routes,
+ create_prefix_lists,
+ create_route_maps,
+)
+
+from lib.bgp import create_router_bgp
+
+def build_topo_from_json(tgen, topo):
+ """
+ Reads configuration from JSON file. Adds routers, creates interface
+ names dynamically and link routers as defined in JSON to create
+ topology. Assigns IPs dynamically to all interfaces of each router.
+
+ * `tgen`: Topogen object
+ * `topo`: json file data
+ """
+
+ listRouters = []
+ for routerN in sorted(topo['routers'].iteritems()):
+ logger.info('Topo: Add router {}'.format(routerN[0]))
+ tgen.add_router(routerN[0])
+ listRouters.append(routerN[0])
+
+ listRouters.sort()
+ if 'ipv4base' in topo:
+ ipv4Next = ipaddr.IPv4Address(topo['link_ip_start']['ipv4'])
+ ipv4Step = 2 ** (32 - topo['link_ip_start']['v4mask'])
+ if topo['link_ip_start']['v4mask'] < 32:
+ ipv4Next += 1
+ if 'ipv6base' in topo:
+ ipv6Next = ipaddr.IPv6Address(topo['link_ip_start']['ipv6'])
+ ipv6Step = 2 ** (128 - topo['link_ip_start']['v6mask'])
+ if topo['link_ip_start']['v6mask'] < 127:
+ ipv6Next += 1
+ for router in listRouters:
+ topo['routers'][router]['nextIfname'] = 0
+
+ while listRouters != []:
+ curRouter = listRouters.pop(0)
+ # Physical Interfaces
+ if 'links' in topo['routers'][curRouter]:
+ def link_sort(x):
+ if x == 'lo':
+ return 0
+ elif 'link' in x:
+ return int(x.split('-link')[1])
+ else:
+ return int(x.split('r')[1])
+ for destRouterLink, data in sorted(topo['routers'][curRouter]['links']. \
+ iteritems(),
+ key=lambda x: link_sort(x[0])):
+ currRouter_lo_json = \
+ topo['routers'][curRouter]['links'][destRouterLink]
+ # Loopback interfaces
+ if 'type' in data and data['type'] == 'loopback':
+ if 'ipv4' in currRouter_lo_json and \
+ currRouter_lo_json['ipv4'] == 'auto':
+ currRouter_lo_json['ipv4'] = '{}{}.{}/{}'. \
+ format(topo['lo_prefix']['ipv4'], number_to_row(curRouter), \
+ number_to_column(curRouter), topo['lo_prefix']['v4mask'])
+ if 'ipv6' in currRouter_lo_json and \
+ currRouter_lo_json['ipv6'] == 'auto':
+ currRouter_lo_json['ipv6'] = '{}{}:{}/{}'. \
+ format(topo['lo_prefix']['ipv6'], number_to_row(curRouter), \
+ number_to_column(curRouter), topo['lo_prefix']['v6mask'])
+
+ if "-" in destRouterLink:
+ # Spliting and storing destRouterLink data in tempList
+ tempList = destRouterLink.split("-")
+
+ # destRouter
+ destRouter = tempList.pop(0)
+
+ # Current Router Link
+ tempList.insert(0, curRouter)
+ curRouterLink = "-".join(tempList)
+ else:
+ destRouter = destRouterLink
+ curRouterLink = curRouter
+
+ if destRouter in listRouters:
+ currRouter_link_json = \
+ topo['routers'][curRouter]['links'][destRouterLink]
+ destRouter_link_json = \
+ topo['routers'][destRouter]['links'][curRouterLink]
+
+ # Assigning name to interfaces
+ currRouter_link_json['interface'] = \
+ '{}-{}-eth{}'.format(curRouter, destRouter, topo['routers'] \
+ [curRouter]['nextIfname'])
+ destRouter_link_json['interface'] = \
+ '{}-{}-eth{}'.format(destRouter, curRouter, topo['routers'] \
+ [destRouter]['nextIfname'])
+
+ topo['routers'][curRouter]['nextIfname'] += 1
+ topo['routers'][destRouter]['nextIfname'] += 1
+
+ # Linking routers to each other as defined in JSON file
+ tgen.gears[curRouter].add_link(tgen.gears[destRouter],
+ topo['routers'][curRouter]['links'][destRouterLink] \
+ ['interface'], topo['routers'][destRouter]['links'] \
+ [curRouterLink]['interface'])
+
+ # IPv4
+ if 'ipv4' in currRouter_link_json:
+ if currRouter_link_json['ipv4'] == 'auto':
+ currRouter_link_json['ipv4'] = \
+ '{}/{}'.format(ipv4Next, topo['link_ip_start'][ \
+ 'v4mask'])
+ destRouter_link_json['ipv4'] = \
+ '{}/{}'.format(ipv4Next + 1, topo['link_ip_start'][ \
+ 'v4mask'])
+ ipv4Next += ipv4Step
+ # IPv6
+ if 'ipv6' in currRouter_link_json:
+ if currRouter_link_json['ipv6'] == 'auto':
+ currRouter_link_json['ipv6'] = \
+ '{}/{}'.format(ipv6Next, topo['link_ip_start'][ \
+ 'v6mask'])
+ destRouter_link_json['ipv6'] = \
+ '{}/{}'.format(ipv6Next + 1, topo['link_ip_start'][ \
+ 'v6mask'])
+ ipv6Next = ipaddr.IPv6Address(int(ipv6Next) + ipv6Step)
+
+ logger.debug("Generated link data for router: %s\n%s", curRouter,
+ json_dumps(topo["routers"][curRouter]["links"],
+ indent=4, sort_keys=True))
+
+
+def build_config_from_json(tgen, topo, save_bkup=True):
+ """
+ Reads initial configuraiton from JSON for each router, builds
+ configuration and loads its to router.
+
+ * `tgen`: Topogen object
+ * `topo`: json file data
+ """
+
+ func_dict = OrderedDict([
+ ("links", create_interfaces_cfg),
+ ("static_routes", create_static_routes),
+ ("prefix_lists", create_prefix_lists),
+ ("route_maps", create_route_maps),
+ ("bgp", create_router_bgp)
+ ])
+
+ data = topo["routers"]
+ for func_type in func_dict.keys():
+ logger.info('Building configuration for {}'.format(func_type))
+
+ func_dict.get(func_type)(tgen, data, build=True)
+
+ for router in sorted(topo['routers'].keys()):
+ logger.info('Configuring router {}...'.format(router))
+
+ result = load_config_to_router(tgen, router, save_bkup)
+ if not result:
+ logger.info("Failed while configuring {}".format(router))
+ pytest.exit(1)
+
diff --git a/tests/topotests/pytest.ini b/tests/topotests/pytest.ini
index 119ab93857..7ea38491d8 100644
--- a/tests/topotests/pytest.ini
+++ b/tests/topotests/pytest.ini
@@ -10,6 +10,13 @@ norecursedirs = .git example-test lib docker
# value is 'info', but can be changed to 'debug' to provide more details.
#verbosity = info
+# Save logs to log file, by default logs will be displayed to console
+#frrtest_log_dir = /tmp/topotests/
+
+# Display router current configuration during test execution,
+# by default configuration will not be shown
+show_router_config = True
+
# Default daemons binaries path.
#frrdir = /usr/lib/frr
#quaggadir = /usr/lib/quagga
diff --git a/vrrpd/vrrp_zebra.c b/vrrpd/vrrp_zebra.c
index 7503034de3..c15c250bdf 100644
--- a/vrrpd/vrrp_zebra.c
+++ b/vrrpd/vrrp_zebra.c
@@ -208,6 +208,8 @@ static int vrrp_zebra_if_address_del(int command, struct zclient *client,
vrrp_if_address_del(c->ifp);
+ if_set_index(c->ifp, IFINDEX_INTERNAL);
+
return 0;
}
diff --git a/yang/frr-bfdd.yang b/yang/frr-bfdd.yang
new file mode 100644
index 0000000000..24ca8f68a8
--- /dev/null
+++ b/yang/frr-bfdd.yang
@@ -0,0 +1,387 @@
+module frr-bfdd {
+ yang-version 1.1;
+ namespace "http://frrouting.org/yang/bfdd";
+ prefix frr-bfdd;
+
+ import ietf-inet-types {
+ prefix inet;
+ }
+ import ietf-yang-types {
+ prefix yang;
+ }
+ import frr-interface {
+ prefix frr-interface;
+ }
+ import frr-route-types {
+ prefix frr-route-types;
+ }
+
+ organization "Free Range Routing";
+ contact
+ "FRR Users List: <mailto:frog@lists.frrouting.org>
+ FRR Development List: <mailto:dev@lists.frrouting.org>";
+ description
+ "This module defines a model for managing FRR bfdd daemon.";
+
+ revision 2019-05-09 {
+ description "Initial revision.";
+ reference
+ "RFC 5880: Bidirectional Forwarding Detection (BFD).
+ RFC 5881: Bidirectional Forwarding Detection (BFD)
+ for IPv4 and IPv6 (Single Hop).
+ RFC 5882: Bidirectional Forwarding Detection (BFD) for Multihop Paths.";
+ }
+
+
+ /*
+ * BFD types declaration.
+ */
+ typedef multiplier {
+ description "Detection multiplier";
+ type uint8 {
+ range 2..255;
+ }
+ }
+
+ typedef discriminator {
+ description "BFD session identification";
+ type uint32 {
+ range 1..4294967295;
+ }
+ }
+
+ typedef state {
+ description "BFD session state";
+ type enumeration {
+ enum admin-down {
+ value 0;
+ description "Administratively down";
+ }
+ enum down {
+ value 1;
+ description "Down";
+ }
+ enum init {
+ value 2;
+ description "Initializing";
+ }
+ enum up {
+ value 3;
+ description "Up";
+ }
+ }
+ }
+
+ typedef diagnostic {
+ description "BFD session diagnostic";
+ type enumeration {
+ enum ok {
+ value 0;
+ description "Ok";
+ }
+ enum control-expired {
+ value 1;
+ description "Control timer expired";
+ }
+ enum echo-failed {
+ value 2;
+ description "Echo function failed";
+ }
+ enum neighbor-down {
+ value 3;
+ description "Neighbor signaled session down";
+ }
+ enum forwarding-reset {
+ value 4;
+ description "Forwarding plane reset";
+ }
+ enum path-down {
+ value 5;
+ description "Path down";
+ }
+ enum concatenated-path-down {
+ value 6;
+ description "Concatenated path down";
+ }
+ enum administratively-down {
+ value 7;
+ description "Administratively down";
+ }
+ enum reverse-concat-path-down {
+ value 8;
+ description "Reverse concatenated path down";
+ }
+ }
+ }
+
+ /*
+ * Shared BFD items.
+ */
+ grouping session-common {
+ description "Common BFD session settings";
+
+ leaf detection-multiplier {
+ type multiplier;
+ default 3;
+ description "Local session detection multiplier";
+ }
+
+ leaf desired-transmission-interval {
+ type uint32;
+ units microseconds;
+ default 300000;
+ description "Minimum desired control packet transmission interval";
+ }
+
+ leaf required-receive-interval {
+ type uint32;
+ units microseconds;
+ default 300000;
+ description "Minimum required control packet receive interval";
+ }
+
+ leaf administrative-down {
+ type boolean;
+ default true;
+ description "Disables or enables the session administratively";
+ }
+ }
+
+ grouping session-echo {
+ description "BFD session echo settings";
+
+ leaf echo-mode {
+ type boolean;
+ default false;
+ description "Use echo packets to detect failures";
+ }
+
+ leaf desired-echo-transmission-interval {
+ type uint32;
+ units microseconds;
+ default 50000;
+ description "Minimum desired control packet transmission interval";
+ }
+ }
+
+ grouping session-states {
+ /*
+ * Local settings.
+ */
+ leaf local-discriminator {
+ type discriminator;
+ description "Local session identifier";
+ }
+
+ leaf local-state {
+ type state;
+ description "Local session state";
+ }
+
+ leaf local-diagnostic {
+ type diagnostic;
+ description "Local session diagnostic";
+ }
+
+ leaf local-multiplier {
+ type multiplier;
+ description "Local session current multiplier";
+ }
+
+ /*
+ * Remote settings.
+ */
+ leaf remote-discriminator {
+ type discriminator;
+ description "Remote session identifier";
+ }
+
+ leaf remote-state {
+ type state;
+ description "Remote session state";
+ }
+
+ leaf remote-diagnostic {
+ type diagnostic;
+ description "Local session diagnostic";
+ }
+
+ leaf remote-multiplier {
+ type multiplier;
+ description "Remote session detection multiplier";
+ }
+
+ /*
+ * Negotiated settings.
+ */
+ leaf negotiated-transmission-interval {
+ description "Negotiated transmit interval";
+ type uint32;
+ units microseconds;
+ }
+
+ leaf negotiated-receive-interval {
+ description "Negotiated receive interval";
+ type uint32;
+ units microseconds;
+ }
+
+ leaf detection-mode {
+ description "Detection mode";
+
+ type enumeration {
+ enum async-with-echo {
+ value "1";
+ description "Async with echo";
+ }
+ enum async-without-echo {
+ value "2";
+ description "Async without echo";
+ }
+ enum demand-with-echo {
+ value "3";
+ description "Demand with echo";
+ }
+ enum demand-without-echo {
+ value "4";
+ description "Demand without echo";
+ }
+ }
+ }
+
+ /*
+ * Statistics.
+ */
+ leaf last-down-time {
+ type yang:date-and-time;
+ description "Time and date of the last time session was down";
+ }
+
+ leaf last-up-time {
+ type yang:date-and-time;
+ description "Time and date of the last time session was up";
+ }
+
+ leaf session-down-count {
+ type uint32;
+ description "Amount of times the session went down";
+ }
+
+ leaf session-up-count {
+ type uint32;
+ description "Amount of times the session went up";
+ }
+
+ leaf control-packet-input-count {
+ type uint64;
+ description "Amount of control packets received";
+ }
+
+ leaf control-packet-output-count {
+ type uint64;
+ description "Amount of control packets sent";
+ }
+
+ /*
+ * Echo mode operational data.
+ */
+ leaf negotiated-echo-transmission-interval {
+ type uint32;
+ units microseconds;
+ description "Negotiated echo transmit interval";
+ }
+
+ /*
+ * Statistics.
+ */
+ leaf echo-packet-input-count {
+ type uint64;
+ description "Amount of echo packets received";
+ }
+
+ leaf echo-packet-output-count {
+ type uint64;
+ description "Amount of echo packets sent";
+ }
+ }
+
+ /*
+ * BFD operational.
+ */
+ container bfdd {
+ container bfd {
+ presence "Present if the BFD protocol is enabled";
+
+ container sessions {
+ list single-hop {
+ key "dest-addr interface vrf";
+ description "List of single hop sessions";
+
+ leaf dest-addr {
+ type inet:ip-address;
+ description "IP address of the peer";
+ }
+
+ leaf interface {
+ type string {
+ length "0..16";
+ }
+ description "Interface to use to contact peer";
+ }
+
+ leaf vrf {
+ type string;
+ description "Virtual Routing Domain name";
+ }
+
+ leaf source-addr {
+ type inet:ip-address;
+ description "Local IP address";
+ }
+
+ uses session-common;
+ uses session-echo;
+
+ container stats {
+ uses session-states;
+ config false;
+ }
+ }
+
+ list multi-hop {
+ key "source-addr dest-addr interface vrf";
+ description "List of multi hop sessions";
+
+ leaf source-addr {
+ type inet:ip-address;
+ description "Local IP address";
+ }
+
+ leaf dest-addr {
+ type inet:ip-address;
+ description "IP address of the peer";
+ }
+
+ leaf interface {
+ type string {
+ length "0..16";
+ }
+ description "Interface to use to contact peer";
+ }
+
+ leaf vrf {
+ type string;
+ description "Virtual Routing Domain name";
+ }
+
+ uses session-common;
+
+ container stats {
+ uses session-states;
+ config false;
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/yang/frr-eigrpd.yang b/yang/frr-eigrpd.yang
new file mode 100644
index 0000000000..26de7a71ae
--- /dev/null
+++ b/yang/frr-eigrpd.yang
@@ -0,0 +1,336 @@
+module frr-eigrpd {
+ yang-version 1.1;
+ namespace "http://frrouting.org/yang/eigrpd";
+ prefix frr-eigrpd;
+
+ import ietf-inet-types {
+ prefix inet;
+ }
+ import ietf-yang-types {
+ prefix yang;
+ }
+ import frr-interface {
+ prefix frr-interface;
+ }
+ import frr-route-types {
+ prefix frr-route-types;
+ }
+
+ organization "Free Range Routing";
+ contact
+ "FRR Users List: <mailto:frog@lists.frrouting.org>
+ FRR Development List: <mailto:dev@lists.frrouting.org>";
+ description
+ "This module defines a model for managing FRR eigrpd daemon.";
+
+ revision 2019-06-19 {
+ description "Initial revision.";
+ reference
+ "RFC 7868: Cisco's Enhanced Interior Gateway Routing Protocol (EIGRP).";
+ }
+
+ /*
+ * Types specification.
+ */
+ typedef autonomous-system {
+ description "Administrative domain identification for a network";
+ type uint16 {
+ range 1..65535;
+ }
+ }
+
+ typedef authentication-type {
+ description "Authentication types";
+ type enumeration {
+ enum none {
+ description "Don't authenticate";
+ value 0;
+ }
+
+ enum text {
+ description "User defined text";
+ value 1;
+ }
+
+ enum md5 {
+ description "MD5 algorithm";
+ value 2;
+ }
+
+ enum hmac-sha2 {
+ description "HMAC SHA256 algorithm";
+ value 3;
+ }
+ }
+ }
+
+ /*
+ * EIGRP operational data.
+ */
+ container eigrpd {
+ list instance {
+ key "asn vrf";
+ description "EIGRP autonomous system instance";
+
+ leaf asn {
+ description "Autonomous System Number";
+ type autonomous-system;
+ }
+
+ leaf vrf {
+ description "Virtual Routing Domain name";
+ type string {
+ length "0..16";
+ }
+ }
+
+ /*
+ * Configurations.
+ */
+ leaf router-id {
+ description "Router identification";
+ type inet:ipv4-address;
+ }
+
+ leaf-list passive-interface {
+ description "List of suppressed interfaces";
+ type string {
+ length "1..16";
+ }
+ }
+
+ leaf active-time {
+ description "ACTIVE time limit in seconds (0 disables limit)";
+ type uint16 {
+ range "0..65535";
+ }
+ units seconds;
+ default 180;
+ }
+
+ leaf variance {
+ description "Control load balance variance";
+ type uint8 {
+ range "1..128";
+ }
+ }
+
+ leaf maximum-paths {
+ description "Most number of paths to forward packets to";
+ type uint8 {
+ range "1..32";
+ }
+ }
+
+ container metric-weights {
+ description
+ "Metrics and parameters for advertisement.
+
+ EIGRP calculates the composite metric with the following formula:
+
+ metric = 256 * ({(K1*BW) + [(K2*BW)/(256-LOAD)] + (K3*DELAY)} *
+ (K5/(REL+K4)))
+
+ Composite calculation:
+ K5
+ metric =[(K1*Net-Throughput) + Latency)+(K6*ExtAttr)] * ------
+ K4+Rel
+
+ RFC 7868 Sections 5.5 and 5.6.2.5.";
+
+ leaf K1 {
+ description "Bandwidth coefficient.";
+ type uint8 {
+ range "0..255";
+ }
+ }
+
+ leaf K2 {
+ description "Bandwidth on load coefficient.";
+ type uint8 {
+ range "0..255";
+ }
+ }
+
+ leaf K3 {
+ description "Delay or latency-based coefficient.";
+ type uint8 {
+ range "0..255";
+ }
+ }
+
+ leaf K4 {
+ description "Link quality coefficient.";
+ type uint8 {
+ range "0..255";
+ }
+ }
+
+ leaf K5 {
+ description "Packet loss coefficient.";
+ type uint8 {
+ range "0..255";
+ }
+ }
+
+ leaf K6 {
+ description "Jitter coefficient.";
+ type uint8 {
+ range "0..255";
+ }
+ }
+ }
+
+ leaf-list network {
+ description "Enable EIGRP on the specific network";
+ type inet:ipv4-prefix;
+ }
+
+ leaf-list neighbor {
+ description "Specific EIGRP neighbor";
+ type inet:ipv4-prefix;
+ }
+
+ list redistribute {
+ description "Redistribute routes learned from other routing protocols";
+
+ key "protocol";
+
+ leaf protocol {
+ description "Routing protocol";
+ type frr-route-types:frr-route-types-v4;
+ must '. != "eigrp"';
+ }
+
+ leaf route-map {
+ description
+ "Applies the conditions of the specified route-map to
+ routes that are redistributed into the EIGRP routing
+ instance";
+ type string {
+ length "1..max";
+ }
+ }
+
+ container metrics {
+ description "Metric for the redistributed routes";
+
+ leaf bandwidth {
+ description "Bandwidth metric in Kbits per second";
+ type uint32 {
+ range "1..4294967295";
+ }
+ }
+
+ leaf delay {
+ description "Delay metric";
+ units microseconds;
+ type uint32 {
+ range "0..4294967295";
+ }
+ }
+
+ leaf reliability {
+ description "Reliability metric";
+ type uint32 {
+ range "0..255";
+ }
+ }
+
+ leaf load {
+ description "Effective bandwidth usage";
+ type uint32 {
+ range "1..255";
+ }
+ }
+
+ leaf mtu {
+ description "Path Maximum Transmission Unit";
+ type uint32 {
+ range "1..65535";
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /*
+ * EIGRP interface configurations.
+ */
+ augment "/frr-interface:lib/frr-interface:interface" {
+ container eigrp {
+ description "EIGRP interface parameters";
+
+ leaf delay {
+ description "Throughput delay";
+ type uint32 {
+ range "1..16777215";
+ }
+ default 10;
+ }
+
+ leaf bandwidth {
+ description "Interface bandwidth value";
+ type uint32 {
+ range "1..10000000";
+ }
+ default 100000;
+ }
+
+ leaf hello-interval {
+ description "Hello packet interval";
+ type uint16 {
+ range "1..65535";
+ }
+ units seconds;
+ default 5;
+ }
+
+ leaf hold-time {
+ description "Timeout amount to consider neighbor down";
+ type uint16 {
+ range "1..65535";
+ }
+ units seconds;
+ default 15;
+ }
+
+ leaf split-horizon {
+ description "Perform split horizon loop preventing technique";
+ type boolean;
+ default true;
+ }
+
+ /*
+ * Per AS configuration.
+ */
+ list instance {
+ description "Autonomous System specific configuration";
+
+ key "asn";
+
+ leaf asn {
+ description "Autonomous System Number";
+ type autonomous-system;
+ }
+
+ leaf-list summarize-addresses {
+ description "Peform address summarization";
+ type inet:ipv4-prefix;
+ }
+
+ leaf authentication {
+ description "Authentication digest algorithm";
+ type authentication-type;
+ default "none";
+ }
+
+ leaf keychain {
+ description "FRR key chain name to use with authentication";
+ type string;
+ }
+ }
+ }
+ }
+}
diff --git a/yang/subdir.am b/yang/subdir.am
index c95ec4dbff..4b3baeea9d 100644
--- a/yang/subdir.am
+++ b/yang/subdir.am
@@ -24,6 +24,10 @@ dist_yangmodels_DATA += yang/frr-test-module.yang
dist_yangmodels_DATA += yang/frr-interface.yang
dist_yangmodels_DATA += yang/frr-route-types.yang
+if BFDD
+dist_yangmodels_DATA += yang/frr-bfdd.yang
+endif
+
if RIPD
dist_yangmodels_DATA += yang/frr-ripd.yang
endif
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index 61200806ba..9a638f8e7f 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -2432,6 +2432,7 @@ static inline void zread_iptable(ZAPI_HANDLER_ARGS)
STREAM_GETW(s, zpi.tcp_mask_flags);
STREAM_GETC(s, zpi.dscp_value);
STREAM_GETC(s, zpi.fragment);
+ STREAM_GETC(s, zpi.protocol);
STREAM_GETL(s, zpi.nb_interface);
zebra_pbr_iptable_update_interfacelist(s, &zpi);
diff --git a/zebra/zebra_pbr.c b/zebra/zebra_pbr.c
index a82dd4c24a..f95a4ff950 100644
--- a/zebra/zebra_pbr.c
+++ b/zebra/zebra_pbr.c
@@ -373,6 +373,7 @@ uint32_t zebra_pbr_iptable_hash_key(const void *arg)
key = jhash_1word(iptable->tcp_flags, key);
key = jhash_1word(iptable->tcp_mask_flags, key);
key = jhash_1word(iptable->dscp_value, key);
+ key = jhash_1word(iptable->protocol, key);
key = jhash_1word(iptable->fragment, key);
key = jhash_1word(iptable->vrf_id, key);
@@ -414,6 +415,8 @@ bool zebra_pbr_iptable_hash_equal(const void *arg1, const void *arg2)
return false;
if (r1->fragment != r2->fragment)
return false;
+ if (r1->protocol != r2->protocol)
+ return false;
return true;
}
@@ -1095,6 +1098,10 @@ static void zebra_pbr_show_iptable_unit(struct zebra_pbr_iptable *iptable,
" not" : "", lookup_msg(fragment_value_str,
iptable->fragment, val_str));
}
+ if (iptable->protocol) {
+ vty_out(vty, "\t protocol %d\n",
+ iptable->protocol);
+ }
ret = hook_call(zebra_pbr_iptable_get_stat, iptable, &pkts,
&bytes);
if (ret && pkts > 0)
diff --git a/zebra/zebra_pbr.h b/zebra/zebra_pbr.h
index cc1cc5acd5..fcc9c5c39a 100644
--- a/zebra/zebra_pbr.h
+++ b/zebra/zebra_pbr.h
@@ -145,6 +145,7 @@ struct zebra_pbr_iptable {
uint16_t tcp_mask_flags;
uint8_t dscp_value;
uint8_t fragment;
+ uint8_t protocol;
uint32_t nb_interface;