diff options
771 files changed, 37279 insertions, 16674 deletions
diff --git a/alpine/APKBUILD.in b/alpine/APKBUILD.in index f740a34583..d4657dfe55 100644 --- a/alpine/APKBUILD.in +++ b/alpine/APKBUILD.in @@ -2,7 +2,7 @@ pkgname=frr pkgver=@VERSION@ pkgrel=0 -pkgdesc="Free Range Routing is a fork of quagga" +pkgdesc="FRRouting is a fork of quagga" url="https://frrouting.org/" arch="x86_64" license="GPL-2.0" diff --git a/babeld/babel_interface.c b/babeld/babel_interface.c index 7f9a13c271..5d66e51fa7 100644 --- a/babeld/babel_interface.c +++ b/babeld/babel_interface.c @@ -28,6 +28,7 @@ THE SOFTWARE. #include "vector.h" #include "distribute.h" #include "lib_errors.h" +#include "network.h" #include "babel_main.h" #include "util.h" @@ -58,11 +59,13 @@ static void babel_interface_free (babel_interface_nfo *bi); static vector babel_enable_if; /* enable interfaces (by cmd). */ -static struct cmd_node babel_interface_node = /* babeld's interface node. */ -{ - INTERFACE_NODE, - "%s(config-if)# ", - 1 /* VTYSH */ +static int interface_config_write(struct vty *vty); +static struct cmd_node babel_interface_node = { + .name = "interface", + .node = INTERFACE_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-if)# ", + .config_write = interface_config_write, }; @@ -1247,7 +1250,7 @@ babel_if_init(void) babel_enable_if = vector_init (1); /* install interface node and commands */ - install_node (&babel_interface_node, interface_config_write); + install_node(&babel_interface_node); if_cmd_init(); install_element(BABEL_NODE, &babel_network_cmd); @@ -1394,7 +1397,7 @@ babel_interface_allocate (void) /* All flags are unset */ babel_ifp->bucket_time = babel_now.tv_sec; babel_ifp->bucket = BUCKET_TOKENS_MAX; - babel_ifp->hello_seqno = (random() & 0xFFFF); + babel_ifp->hello_seqno = (frr_weak_random() & 0xFFFF); babel_ifp->rtt_min = 10000; babel_ifp->rtt_max = 120000; babel_ifp->max_rtt_penalty = 150; diff --git a/babeld/babel_main.c b/babeld/babel_main.c index 6f4b905c15..e7ba29ed06 100644 --- a/babeld/babel_main.c +++ b/babeld/babel_main.c @@ -136,10 +136,10 @@ struct option longopts[] = { 0 } }; -static const struct frr_yang_module_info *const babeld_yang_modules[] = - { - &frr_interface_info, - }; +static const struct frr_yang_module_info *const babeld_yang_modules[] = { + &frr_interface_info, + &frr_vrf_info, +}; FRR_DAEMON_INFO(babeld, BABELD, .vty_port = BABEL_VTY_PORT, diff --git a/babeld/babeld.c b/babeld/babeld.c index a7a3481998..09955cfbef 100644 --- a/babeld/babeld.c +++ b/babeld/babeld.c @@ -30,6 +30,7 @@ THE SOFTWARE. #include "filter.h" #include "plist.h" #include "lib_errors.h" +#include "network.h" #include "babel_main.h" #include "babeld.h" @@ -69,11 +70,14 @@ static time_t expiry_time; static time_t source_expiry_time; /* Babel node structure. */ +static int babel_config_write (struct vty *vty); static struct cmd_node cmd_babel_node = { + .name = "babel", .node = BABEL_NODE, + .parent_node = CONFIG_NODE, .prompt = "%s(config-router)# ", - .vtysh = 1, + .config_write = babel_config_write, }; /* print current babel configuration on vty */ @@ -210,7 +214,7 @@ babel_read_protocol (struct thread *thread) static int babel_init_routing_process(struct thread *thread) { - myseqno = (random() & 0xFFFF); + myseqno = (frr_weak_random() & 0xFFFF); babel_get_myid(); babel_load_state_file(); debugf(BABEL_DEBUG_COMMON, "My ID is : %s.", format_eui64(myid)); @@ -719,7 +723,7 @@ void babeld_quagga_init(void) { - install_node(&cmd_babel_node, &babel_config_write); + install_node(&cmd_babel_node); install_element(CONFIG_NODE, &router_babel_cmd); install_element(CONFIG_NODE, &no_router_babel_cmd); diff --git a/babeld/net.c b/babeld/net.c index d1f6a44142..40716a701d 100644 --- a/babeld/net.c +++ b/babeld/net.c @@ -144,7 +144,7 @@ babel_send(int s, iovec[1].iov_base = buf2; iovec[1].iov_len = buflen2; memset(&msg, 0, sizeof(msg)); - msg.msg_name = (struct sockaddr*)sin; + msg.msg_name = sin; msg.msg_namelen = slen; msg.msg_iov = iovec; msg.msg_iovlen = 2; diff --git a/babeld/util.c b/babeld/util.c index c6606e4f0e..e99bd861dc 100644 --- a/babeld/util.c +++ b/babeld/util.c @@ -39,6 +39,8 @@ THE SOFTWARE. #include <netinet/in.h> #include <arpa/inet.h> +#include "lib/network.h" + #include "babel_main.h" #include "babeld.h" #include "util.h" @@ -51,7 +53,7 @@ roughly(int value) else if(value <= 1) return value; else - return value * 3 / 4 + random() % (value / 2); + return value * 3 / 4 + frr_weak_random() % (value / 2); } /* d = s1 - s2 */ @@ -145,7 +147,7 @@ timeval_min_sec(struct timeval *d, time_t secs) { if(d->tv_sec == 0 || d->tv_sec > secs) { d->tv_sec = secs; - d->tv_usec = random() % 1000000; + d->tv_usec = frr_weak_random() % 1000000; } } diff --git a/bfdd/bfd.c b/bfdd/bfd.c index 222bf32c94..f9e572db4d 100644 --- a/bfdd/bfd.c +++ b/bfdd/bfd.c @@ -28,6 +28,7 @@ #include <zebra.h> #include "lib/jhash.h" +#include "lib/network.h" #include "bfd.h" @@ -131,7 +132,7 @@ int bfd_session_enable(struct bfd_session *bs) if (bs->key.vrfname[0]) { vrf = vrf_lookup_by_name(bs->key.vrfname); if (vrf == NULL) { - log_error( + zlog_err( "session-enable: specified VRF doesn't exists."); return 0; } @@ -143,15 +144,15 @@ int bfd_session_enable(struct bfd_session *bs) else ifp = if_lookup_by_name_all_vrf(bs->key.ifname); if (ifp == NULL) { - log_error( - "session-enable: specified interface doesn't exists."); + zlog_err( + "session-enable: specified interface doesn't exists."); return 0; } if (bs->key.ifname[0] && !vrf) { vrf = vrf_lookup_by_id(ifp->vrf_id); if (vrf == NULL) { - log_error( - "session-enable: specified VRF doesn't exists."); + zlog_err( + "session-enable: specified VRF doesn't exists."); return 0; } } @@ -164,12 +165,14 @@ int bfd_session_enable(struct bfd_session *bs) assert(bs->vrf); if (bs->key.ifname[0] - && BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) == 0) + && CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) == 0) bs->ifp = ifp; /* Sanity check: don't leak open sockets. */ if (bs->sock != -1) { - log_debug("session-enable: previous socket open"); + if (bglobal.debug_peer_event) + zlog_debug("session-enable: previous socket open"); + close(bs->sock); bs->sock = -1; } @@ -179,7 +182,7 @@ int bfd_session_enable(struct bfd_session *bs) * could use the destination port (3784) for the source * port we wouldn't need a socket per session. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_IPV6) == 0) { + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_IPV6) == 0) { psock = bp_peer_socket(bs); if (psock == -1) return 0; @@ -234,8 +237,8 @@ static uint32_t ptm_bfd_gen_ID(void) * random session identification numbers. */ do { - session_id = ((random() << 16) & 0xFFFF0000) - | (random() & 0x0000FFFF); + session_id = ((frr_weak_random() << 16) & 0xFFFF0000) + | (frr_weak_random() & 0x0000FFFF); } while (session_id == 0 || bfd_id_lookup(session_id) != NULL); return session_id; @@ -256,7 +259,7 @@ void ptm_bfd_start_xmt_timer(struct bfd_session *bfd, bool is_echo) * between 75% and 90%. */ maxpercent = (bfd->detect_mult == 1) ? 16 : 26; - jitter = (xmt_TO * (75 + (random() % maxpercent))) / 100; + jitter = (xmt_TO * (75 + (frr_weak_random() % maxpercent))) / 100; /* XXX remove that division above */ if (is_echo) @@ -287,7 +290,7 @@ void ptm_bfd_echo_stop(struct bfd_session *bfd) { bfd->echo_xmt_TO = 0; bfd->echo_detect_TO = 0; - BFD_UNSET_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE); + UNSET_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE); bfd_echo_xmttimer_delete(bfd); bfd_echo_recvtimer_delete(bfd); @@ -318,9 +321,10 @@ void ptm_bfd_sess_up(struct bfd_session *bfd) if (old_state != bfd->ses_state) { bfd->stats.session_up++; - log_info("state-change: [%s] %s -> %s", bs_to_string(bfd), - state_list[old_state].str, - state_list[bfd->ses_state].str); + if (bglobal.debug_peer_event) + zlog_debug("state-change: [%s] %s -> %s", + bs_to_string(bfd), state_list[old_state].str, + state_list[bfd->ses_state].str); } } @@ -352,15 +356,16 @@ void ptm_bfd_sess_dn(struct bfd_session *bfd, uint8_t diag) control_notify(bfd, PTM_BFD_DOWN); /* Stop echo packet transmission if they are active */ - if (BFD_CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) + if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) ptm_bfd_echo_stop(bfd); if (old_state != bfd->ses_state) { bfd->stats.session_down++; - log_info("state-change: [%s] %s -> %s reason:%s", - bs_to_string(bfd), state_list[old_state].str, - state_list[bfd->ses_state].str, - get_diag_str(bfd->local_diag)); + if (bglobal.debug_peer_event) + zlog_debug("state-change: [%s] %s -> %s reason:%s", + bs_to_string(bfd), state_list[old_state].str, + state_list[bfd->ses_state].str, + get_diag_str(bfd->local_diag)); } } @@ -522,8 +527,7 @@ int bfd_session_update_label(struct bfd_session *bs, const char *nlabel) return -1; } - if (pl_new(nlabel, bs) == NULL) - return -1; + pl_new(nlabel, bs); return 0; } @@ -548,19 +552,19 @@ static void _bfd_session_update(struct bfd_session *bs, { if (bpc->bpc_echo) { /* Check if echo mode is already active. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) goto skip_echo; - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); + SET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); /* Activate/update echo receive timeout timer. */ bs_echo_timer_handler(bs); } else { /* Check if echo mode is already disabled. */ - if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) + if (!CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) goto skip_echo; - BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); + UNSET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); ptm_bfd_echo_stop(bs); } @@ -582,10 +586,10 @@ skip_echo: if (bpc->bpc_shutdown) { /* Check if already shutdown. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) return; - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); + SET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); /* Disable all events. */ bfd_recvtimer_delete(bs); @@ -602,10 +606,10 @@ skip_echo: ptm_bfd_snd(bs, 0); } else { /* Check if already working. */ - if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) + if (!CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) return; - BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); + UNSET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); /* Change and notify state change. */ bs->ses_state = PTM_BFD_DOWN; @@ -616,15 +620,15 @@ skip_echo: bfd_xmttimer_update(bs, bs->xmt_TO); } if (bpc->bpc_cbit) { - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CBIT)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CBIT)) return; - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_CBIT); + SET_FLAG(bs->flags, BFD_SESS_FLAG_CBIT); } else { - if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CBIT)) + if (!CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CBIT)) return; - BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_CBIT); + UNSET_FLAG(bs->flags, BFD_SESS_FLAG_CBIT); } } @@ -681,10 +685,6 @@ struct bfd_session *ptm_bfd_sess_new(struct bfd_peer_cfg *bpc) /* Get BFD session storage with its defaults. */ bfd = bfd_session_new(); - if (bfd == NULL) { - log_error("session-new: allocation failed"); - return NULL; - } /* * Store interface/VRF name in case we need to delay session @@ -703,7 +703,7 @@ struct bfd_session *ptm_bfd_sess_new(struct bfd_peer_cfg *bpc) /* Copy remaining data. */ if (bpc->bpc_ipv4 == false) - BFD_SET_FLAG(bfd->flags, BFD_SESS_FLAG_IPV6); + SET_FLAG(bfd->flags, BFD_SESS_FLAG_IPV6); bfd->key.family = (bpc->bpc_ipv4) ? AF_INET : AF_INET6; switch (bfd->key.family) { @@ -727,7 +727,7 @@ struct bfd_session *ptm_bfd_sess_new(struct bfd_peer_cfg *bpc) } if (bpc->bpc_mhop) - BFD_SET_FLAG(bfd->flags, BFD_SESS_FLAG_MH); + SET_FLAG(bfd->flags, BFD_SESS_FLAG_MH); bfd->key.mhop = bpc->bpc_mhop; @@ -758,7 +758,8 @@ struct bfd_session *bs_registrate(struct bfd_session *bfd) if (bfd->key.ifname[0] || bfd->key.vrfname[0] || bfd->sock == -1) bs_observer_add(bfd); - log_info("session-new: %s", bs_to_string(bfd)); + if (bglobal.debug_peer_event) + zlog_debug("session-new: %s", bs_to_string(bfd)); control_notify_config(BCM_NOTIFY_CONFIG_ADD, bfd); @@ -776,13 +777,14 @@ int ptm_bfd_sess_del(struct bfd_peer_cfg *bpc) /* This pointer is being referenced, don't let it be deleted. */ if (bs->refcount > 0) { - log_error("session-delete: refcount failure: %" PRIu64 - " references", - bs->refcount); + zlog_err("session-delete: refcount failure: %" PRIu64 + " references", + bs->refcount); return -1; } - log_info("session-delete: %s", bs_to_string(bs)); + if (bglobal.debug_peer_event) + zlog_debug("session-delete: %s", bs_to_string(bs)); control_notify_config(BCM_NOTIFY_CONFIG_DELETE, bs); @@ -849,7 +851,9 @@ static void bs_down_handler(struct bfd_session *bs, int nstate) break; default: - log_debug("state-change: unhandled neighbor state: %d", nstate); + if (bglobal.debug_peer_event) + zlog_debug("state-change: unhandled neighbor state: %d", + nstate); break; } } @@ -876,7 +880,9 @@ static void bs_init_handler(struct bfd_session *bs, int nstate) break; default: - log_debug("state-change: unhandled neighbor state: %d", nstate); + if (bglobal.debug_peer_event) + zlog_debug("state-change: unhandled neighbor state: %d", + nstate); break; } } @@ -901,16 +907,16 @@ static void bs_neighbour_admin_down_handler(struct bfd_session *bfd, control_notify(bfd, PTM_BFD_ADM_DOWN); /* Stop echo packet transmission if they are active */ - if (BFD_CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) + if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) ptm_bfd_echo_stop(bfd); if (old_state != bfd->ses_state) { bfd->stats.session_down++; - - log_info("state-change: [%s] %s -> %s reason:%s", - bs_to_string(bfd), state_list[old_state].str, - state_list[bfd->ses_state].str, - get_diag_str(bfd->local_diag)); + if (bglobal.debug_peer_event) + zlog_debug("state-change: [%s] %s -> %s reason:%s", + bs_to_string(bfd), state_list[old_state].str, + state_list[bfd->ses_state].str, + get_diag_str(bfd->local_diag)); } } @@ -932,7 +938,9 @@ static void bs_up_handler(struct bfd_session *bs, int nstate) break; default: - log_debug("state-change: unhandled neighbor state: %d", nstate); + if (bglobal.debug_peer_event) + zlog_debug("state-change: unhandled neighbor state: %d", + nstate); break; } } @@ -954,8 +962,9 @@ void bs_state_handler(struct bfd_session *bs, int nstate) break; default: - log_debug("state-change: [%s] is in invalid state: %d", - bs_to_string(bs), nstate); + if (bglobal.debug_peer_event) + zlog_debug("state-change: [%s] is in invalid state: %d", + bs_to_string(bs), nstate); break; } } @@ -976,14 +985,14 @@ void bs_echo_timer_handler(struct bfd_session *bs) * Section 3). * - Check that we are already at the up state. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO) == 0 - || BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO) == 0 + || CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) || bs->ses_state != PTM_BFD_UP) return; /* Remote peer asked to stop echo. */ if (bs->remote_timers.required_min_echo == 0) { - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) ptm_bfd_echo_stop(bs); return; @@ -1002,7 +1011,7 @@ void bs_echo_timer_handler(struct bfd_session *bs) else bs->echo_xmt_TO = bs->timers.required_min_echo; - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO_ACTIVE) == 0 + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO_ACTIVE) == 0 || old_timer != bs->echo_xmt_TO) ptm_bfd_echo_start(bs); } @@ -1032,20 +1041,19 @@ void bs_final_handler(struct bfd_session *bs) } /* - * Calculate detection time based on new timers. + * Calculate transmission time based on new timers. * * Transmission calculation: - * We must respect the RequiredMinRxInterval from the remote - * system: if our desired transmission timer is more than the - * minimum receive rate, then we must lower it to at least the - * minimum receive interval. + * Unless specified by exceptions at the end of Section 6.8.7, the + * transmission time will be determined by the system with the + * slowest rate. * - * RFC 5880, Section 6.8.3. + * RFC 5880, Section 6.8.7. */ if (bs->timers.desired_min_tx > bs->remote_timers.required_min_rx) - bs->xmt_TO = bs->remote_timers.required_min_rx; - else bs->xmt_TO = bs->timers.desired_min_tx; + else + bs->xmt_TO = bs->remote_timers.required_min_rx; /* Apply new transmission timer immediately. */ ptm_bfd_start_xmt_timer(bs, false); @@ -1104,13 +1112,13 @@ static const char *get_diag_str(int diag) return "N/A"; } -const char *satostr(struct sockaddr_any *sa) +const char *satostr(const struct sockaddr_any *sa) { #define INETSTR_BUFCOUNT 8 static char buf[INETSTR_BUFCOUNT][INET6_ADDRSTRLEN]; static int bufidx; - struct sockaddr_in *sin = &sa->sa_sin; - struct sockaddr_in6 *sin6 = &sa->sa_sin6; + const struct sockaddr_in *sin = &sa->sa_sin; + const struct sockaddr_in6 *sin6 = &sa->sa_sin6; bufidx += (bufidx + 1) % INETSTR_BUFCOUNT; buf[bufidx][0] = 0; @@ -1241,7 +1249,7 @@ const char *bs_to_string(const struct bfd_session *bs) static char buf[256]; char addr_buf[INET6_ADDRSTRLEN]; int pos; - bool is_mhop = BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH); + bool is_mhop = CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH); pos = snprintf(buf, sizeof(buf), "mhop:%s", is_mhop ? "yes" : "no"); pos += snprintf(buf + pos, sizeof(buf) - pos, " peer:%s", @@ -1431,12 +1439,14 @@ struct bfd_session *bfd_key_lookup(struct bfd_key key) memset(&bs.key.local, 0, sizeof(bs.key.local)); bsp = hash_lookup(bfd_key_hash, &bs); if (bsp) { - char addr_buf[INET6_ADDRSTRLEN]; - - inet_ntop(bs.key.family, &key.local, addr_buf, - sizeof(addr_buf)); - log_debug(" peer %s found, but loc-addr %s ignored", - peer_buf, addr_buf); + if (bglobal.debug_peer_event) { + char addr_buf[INET6_ADDRSTRLEN]; + inet_ntop(bs.key.family, &key.local, addr_buf, + sizeof(addr_buf)); + zlog_debug( + " peer %s found, but loc-addr %s ignored", + peer_buf, addr_buf); + } return bsp; } } @@ -1447,8 +1457,9 @@ struct bfd_session *bfd_key_lookup(struct bfd_key key) memset(bs.key.ifname, 0, sizeof(bs.key.ifname)); bsp = hash_lookup(bfd_key_hash, &bs); if (bsp) { - log_debug(" peer %s found, but ifp %s ignored", - peer_buf, key.ifname); + if (bglobal.debug_peer_event) + zlog_debug(" peer %s found, but ifp %s ignored", + peer_buf, key.ifname); return bsp; } } @@ -1458,14 +1469,15 @@ struct bfd_session *bfd_key_lookup(struct bfd_key key) memset(&bs.key.local, 0, sizeof(bs.key.local)); bsp = hash_lookup(bfd_key_hash, &bs); if (bsp) { - char addr_buf[INET6_ADDRSTRLEN]; - - inet_ntop(bs.key.family, &bs.key.local, addr_buf, - sizeof(addr_buf)); - log_debug(" peer %s found, but ifp %s" - " and loc-addr %s ignored", - peer_buf, key.ifname, - addr_buf); + if (bglobal.debug_peer_event) { + char addr_buf[INET6_ADDRSTRLEN]; + inet_ntop(bs.key.family, &bs.key.local, + addr_buf, sizeof(addr_buf)); + zlog_debug( + " peer %s found, but ifp %s" + " and loc-addr %s ignored", + peer_buf, key.ifname, addr_buf); + } return bsp; } } @@ -1483,8 +1495,11 @@ struct bfd_session *bfd_key_lookup(struct bfd_key key) /* change key */ if (ctx.result) { bsp = ctx.result; - log_debug(" peer %s found, but ifp" - " and/or loc-addr params ignored", peer_buf); + if (bglobal.debug_peer_event) + zlog_debug( + " peer %s found, but ifp" + " and/or loc-addr params ignored", + peer_buf); } return bsp; } @@ -1644,11 +1659,11 @@ static void _bfd_session_remove_manual(struct hash_bucket *hb, struct bfd_session *bs = hb->data; /* Delete only manually configured sessions. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) == 0) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) == 0) return; bs->refcount--; - BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG); + UNSET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG); /* Don't delete sessions still in use. */ if (bs->refcount != 0) @@ -1672,13 +1687,17 @@ void bfd_sessions_remove_manual(void) */ static int bfd_vrf_new(struct vrf *vrf) { - log_debug("VRF Created: %s(%u)", vrf->name, vrf->vrf_id); + if (bglobal.debug_zebra) + zlog_debug("VRF Created: %s(%u)", vrf->name, vrf->vrf_id); + return 0; } static int bfd_vrf_delete(struct vrf *vrf) { - log_debug("VRF Deletion: %s(%u)", vrf->name, vrf->vrf_id); + if (bglobal.debug_zebra) + zlog_debug("VRF Deletion: %s(%u)", vrf->name, vrf->vrf_id); + return 0; } @@ -1686,7 +1705,10 @@ static int bfd_vrf_update(struct vrf *vrf) { if (!vrf_is_enabled(vrf)) return 0; - log_debug("VRF update: %s(%u)", vrf->name, vrf->vrf_id); + + if (bglobal.debug_zebra) + zlog_debug("VRF update: %s(%u)", vrf->name, vrf->vrf_id); + /* a different name is given; update bfd list */ bfdd_sessions_enable_vrf(vrf); return 0; @@ -1703,7 +1725,10 @@ static int bfd_vrf_enable(struct vrf *vrf) vrf->info = (void *)bvrf; } else bvrf = vrf->info; - log_debug("VRF enable add %s id %u", vrf->name, vrf->vrf_id); + + if (bglobal.debug_zebra) + zlog_debug("VRF enable add %s id %u", vrf->name, vrf->vrf_id); + if (vrf->vrf_id == VRF_DEFAULT || vrf_get_backend() == VRF_BACKEND_NETNS) { if (!bvrf->bg_shop) @@ -1759,7 +1784,8 @@ static int bfd_vrf_disable(struct vrf *vrf) bfdd_zclient_unregister(vrf->vrf_id); } - log_debug("VRF disable %s id %d", vrf->name, vrf->vrf_id); + if (bglobal.debug_zebra) + zlog_debug("VRF disable %s id %d", vrf->name, vrf->vrf_id); /* Disable read/write poll triggering. */ THREAD_OFF(bvrf->bg_ev[0]); diff --git a/bfdd/bfd.h b/bfdd/bfd.h index 2ae74d7880..93873eed94 100644 --- a/bfdd/bfd.h +++ b/bfdd/bfd.h @@ -172,10 +172,6 @@ enum bfd_session_flags { BFD_SESS_FLAG_CBIT = 1 << 9, /* CBIT is set */ }; -#define BFD_SET_FLAG(field, flag) (field |= flag) -#define BFD_UNSET_FLAG(field, flag) (field &= ~flag) -#define BFD_CHECK_FLAG(field, flag) (field & flag) - /* BFD session hash keys */ struct bfd_key { uint16_t family; @@ -397,7 +393,26 @@ struct bfd_global { struct obslist bg_obslist; struct zebra_privs_t bfdd_privs; + + /* Debug options. */ + /* Show all peer state changes events. */ + bool debug_peer_event; + /* + * Show zebra message exchanges: + * - Interface add/delete. + * - Local address add/delete. + * - VRF add/delete. + */ + bool debug_zebra; + /* + * Show network level debug information: + * - Echo packets without session. + * - Unavailable peer sessions. + * - Network system call failures. + */ + bool debug_network; }; + extern struct bfd_global bglobal; extern const struct bfd_diag_str_list diag_list[]; extern const struct bfd_state_str_list state_list[]; @@ -429,15 +444,9 @@ void pl_free(struct peer_label *pl); /* * logging - alias to zebra log */ - -#define log_debug zlog_debug -#define log_info zlog_info -#define log_warning zlog_warn -#define log_error zlog_err - -#define log_fatal(msg, ...) \ +#define zlog_fatal(msg, ...) \ do { \ - zlog_err(msg, ## __VA_ARGS__); \ + zlog_err(msg, ##__VA_ARGS__); \ assert(!msg); \ abort(); \ } while (0) @@ -520,7 +529,7 @@ void bs_state_handler(struct bfd_session *bs, int nstate); void bs_echo_timer_handler(struct bfd_session *bs); void bs_final_handler(struct bfd_session *bs); void bs_set_slow_timers(struct bfd_session *bs); -const char *satostr(struct sockaddr_any *sa); +const char *satostr(const struct sockaddr_any *sa); const char *diag2str(uint8_t diag); int strtosa(const char *addr, struct sockaddr_any *sa); void integer2timestr(uint64_t time, char *buf, size_t buflen); diff --git a/bfdd/bfd_packet.c b/bfdd/bfd_packet.c index 1ec761e3b8..68bdd89bb7 100644 --- a/bfdd/bfd_packet.c +++ b/bfdd/bfd_packet.c @@ -76,7 +76,7 @@ int _ptm_bfd_send(struct bfd_session *bs, uint16_t *port, const void *data, ssize_t rv; int sd = -1; - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_IPV6)) { + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_IPV6)) { memset(&sin6, 0, sizeof(sin6)); sin6.sin6_family = AF_INET6; memcpy(&sin6.sin6_addr, &bs->key.peer, sizeof(sin6.sin6_addr)); @@ -85,7 +85,7 @@ int _ptm_bfd_send(struct bfd_session *bs, uint16_t *port, const void *data, sin6.sin6_port = (port) ? *port - : (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) + : (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) ? htons(BFD_DEF_MHOP_DEST_PORT) : htons(BFD_DEFDESTPORT); @@ -98,7 +98,7 @@ int _ptm_bfd_send(struct bfd_session *bs, uint16_t *port, const void *data, memcpy(&sin.sin_addr, &bs->key.peer, sizeof(sin.sin_addr)); sin.sin_port = (port) ? *port - : (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) + : (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) ? htons(BFD_DEF_MHOP_DEST_PORT) : htons(BFD_DEFDESTPORT); @@ -112,11 +112,16 @@ int _ptm_bfd_send(struct bfd_session *bs, uint16_t *port, const void *data, #endif /* HAVE_STRUCT_SOCKADDR_SA_LEN */ rv = sendto(sd, data, datalen, 0, sa, slen); if (rv <= 0) { - log_debug("packet-send: send failure: %s", strerror(errno)); + if (bglobal.debug_network) + zlog_debug("packet-send: send failure: %s", + strerror(errno)); return -1; } - if (rv < (ssize_t)datalen) - log_debug("packet-send: send partial: %s", strerror(errno)); + if (rv < (ssize_t)datalen) { + if (bglobal.debug_network) + zlog_debug("packet-send: send partial: %s", + strerror(errno)); + } return 0; } @@ -133,15 +138,15 @@ void ptm_bfd_echo_snd(struct bfd_session *bfd) if (!bvrf) return; - if (!BFD_CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) - BFD_SET_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE); + if (!CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) + SET_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE); memset(&bep, 0, sizeof(bep)); bep.ver = BFD_ECHO_VERSION; bep.len = BFD_ECHO_PKT_LEN; bep.my_discr = htonl(bfd->discrs.my_discr); - if (BFD_CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_IPV6)) { + if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_IPV6)) { sd = bvrf->bg_echov6; memset(&sin6, 0, sizeof(sin6)); sin6.sin6_family = AF_INET6; @@ -190,13 +195,16 @@ static int ptm_bfd_process_echo_pkt(struct bfd_vrf_global *bvrf, int s) /* Your discriminator not zero - use it to find session */ bfd = bfd_id_lookup(my_discr); if (bfd == NULL) { - log_debug("echo-packet: no matching session (id:%u)", my_discr); + if (bglobal.debug_network) + zlog_debug("echo-packet: no matching session (id:%u)", + my_discr); return -1; } - if (!BFD_CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) { - log_debug("echo-packet: echo disabled [%s] (id:%u)", - bs_to_string(bfd), my_discr); + if (!CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) { + if (bglobal.debug_network) + zlog_debug("echo-packet: echo disabled [%s] (id:%u)", + bs_to_string(bfd), my_discr); return -1; } @@ -214,7 +222,7 @@ static int ptm_bfd_process_echo_pkt(struct bfd_vrf_global *bvrf, int s) void ptm_bfd_snd(struct bfd_session *bfd, int fbit) { - struct bfd_pkt cp; + struct bfd_pkt cp = {}; /* Set fields according to section 6.5.7 */ cp.diag = bfd->local_diag; @@ -222,7 +230,7 @@ void ptm_bfd_snd(struct bfd_session *bfd, int fbit) cp.flags = 0; BFD_SETSTATE(cp.flags, bfd->ses_state); - if (BFD_CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_CBIT)) + if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_CBIT)) BFD_SETCBIT(cp.flags, BFD_CBIT); BFD_SETDEMANDBIT(cp.flags, BFD_DEF_DEMAND); @@ -291,8 +299,7 @@ ssize_t bfd_recv_ipv4(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl, mlen = recvmsg(sd, &msghdr, MSG_DONTWAIT); if (mlen == -1) { if (errno != EAGAIN) - log_error("ipv4-recv: recv failed: %s", - strerror(errno)); + zlog_err("ipv4-recv: recv failed: %s", strerror(errno)); return -1; } @@ -313,7 +320,9 @@ ssize_t bfd_recv_ipv4(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl, memcpy(&ttlval, CMSG_DATA(cm), sizeof(ttlval)); if (ttlval > 255) { - log_debug("ipv4-recv: invalid TTL: %u", ttlval); + if (bglobal.debug_network) + zlog_debug("ipv4-recv: invalid TTL: %u", + ttlval); return -1; } *ttl = ttlval; @@ -402,8 +411,7 @@ ssize_t bfd_recv_ipv6(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl, mlen = recvmsg(sd, &msghdr6, MSG_DONTWAIT); if (mlen == -1) { if (errno != EAGAIN) - log_error("ipv6-recv: recv failed: %s", - strerror(errno)); + zlog_err("ipv6-recv: recv failed: %s", strerror(errno)); return -1; } @@ -420,7 +428,9 @@ ssize_t bfd_recv_ipv6(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl, if (cm->cmsg_type == IPV6_HOPLIMIT) { memcpy(&ttlval, CMSG_DATA(cm), sizeof(ttlval)); if (ttlval > 255) { - log_debug("ipv6-recv: invalid TTL: %u", ttlval); + if (bglobal.debug_network) + zlog_debug("ipv6-recv: invalid TTL: %u", + ttlval); return -1; } @@ -486,6 +496,10 @@ static void cp_debug(bool mhop, struct sockaddr_any *peer, char buf[512], peerstr[128], localstr[128], portstr[64], vrfstr[64]; va_list vl; + /* Don't to any processing if debug is disabled. */ + if (bglobal.debug_network == false) + return; + if (peer->sa_sin.sin_family) snprintf(peerstr, sizeof(peerstr), " peer:%s", satostr(peer)); else @@ -511,8 +525,8 @@ static void cp_debug(bool mhop, struct sockaddr_any *peer, vsnprintf(buf, sizeof(buf), fmt, vl); va_end(vl); - log_debug("control-packet: %s [mhop:%s%s%s%s%s]", buf, - mhop ? "yes" : "no", peerstr, localstr, portstr, vrfstr); + zlog_debug("control-packet: %s [mhop:%s%s%s%s%s]", buf, + mhop ? "yes" : "no", peerstr, localstr, portstr, vrfstr); } int bfd_recv_cb(struct thread *t) @@ -779,7 +793,7 @@ int bp_udp_send(int sd, uint8_t ttl, uint8_t *data, size_t datalen, cmsg->cmsg_level = IPPROTO_IPV6; cmsg->cmsg_type = IPV6_HOPLIMIT; } else { -#if BFD_LINUX +#ifdef BFD_LINUX cmsg->cmsg_level = IPPROTO_IP; cmsg->cmsg_type = IP_TTL; #else @@ -796,11 +810,14 @@ int bp_udp_send(int sd, uint8_t ttl, uint8_t *data, size_t datalen, /* Send echo back. */ wlen = sendmsg(sd, &msg, 0); if (wlen <= 0) { - log_debug("udp-send: loopback failure: (%d) %s", errno, strerror(errno)); + if (bglobal.debug_network) + zlog_debug("udp-send: loopback failure: (%d) %s", errno, + strerror(errno)); return -1; } else if (wlen < (ssize_t)datalen) { - log_debug("udp-send: partial send: %zd expected %zu", wlen, - datalen); + if (bglobal.debug_network) + zlog_debug("udp-send: partial send: %zd expected %zu", + wlen, datalen); return -1; } @@ -821,8 +838,8 @@ int bp_set_ttl(int sd, uint8_t value) int ttl = value; if (setsockopt(sd, IPPROTO_IP, IP_TTL, &ttl, sizeof(ttl)) == -1) { - log_warning("set-ttl: setsockopt(IP_TTL, %d): %s", value, - strerror(errno)); + zlog_warn("set-ttl: setsockopt(IP_TTL, %d): %s", value, + strerror(errno)); return -1; } @@ -834,8 +851,8 @@ int bp_set_tos(int sd, uint8_t value) int tos = value; if (setsockopt(sd, IPPROTO_IP, IP_TOS, &tos, sizeof(tos)) == -1) { - log_warning("set-tos: setsockopt(IP_TOS, %d): %s", value, - strerror(errno)); + zlog_warn("set-tos: setsockopt(IP_TOS, %d): %s", value, + strerror(errno)); return -1; } @@ -847,12 +864,12 @@ static void bp_set_ipopts(int sd) int rcvttl = BFD_RCV_TTL_VAL; if (bp_set_ttl(sd, BFD_TTL_VAL) != 0) - log_fatal("set-ipopts: TTL configuration failed"); + zlog_fatal("set-ipopts: TTL configuration failed"); if (setsockopt(sd, IPPROTO_IP, IP_RECVTTL, &rcvttl, sizeof(rcvttl)) == -1) - log_fatal("set-ipopts: setsockopt(IP_RECVTTL, %d): %s", rcvttl, - strerror(errno)); + zlog_fatal("set-ipopts: setsockopt(IP_RECVTTL, %d): %s", rcvttl, + strerror(errno)); #ifdef BFD_LINUX int pktinfo = BFD_PKT_INFO_VAL; @@ -860,21 +877,21 @@ static void bp_set_ipopts(int sd) /* Figure out address and interface to do the peer matching. */ if (setsockopt(sd, IPPROTO_IP, IP_PKTINFO, &pktinfo, sizeof(pktinfo)) == -1) - log_fatal("set-ipopts: setsockopt(IP_PKTINFO, %d): %s", pktinfo, - strerror(errno)); + zlog_fatal("set-ipopts: setsockopt(IP_PKTINFO, %d): %s", + pktinfo, strerror(errno)); #endif /* BFD_LINUX */ #ifdef BFD_BSD int yes = 1; /* Find out our address for peer matching. */ if (setsockopt(sd, IPPROTO_IP, IP_RECVDSTADDR, &yes, sizeof(yes)) == -1) - log_fatal("set-ipopts: setsockopt(IP_RECVDSTADDR, %d): %s", yes, - strerror(errno)); + zlog_fatal("set-ipopts: setsockopt(IP_RECVDSTADDR, %d): %s", + yes, strerror(errno)); /* Find out interface where the packet came in. */ if (setsockopt_ifindex(AF_INET, sd, yes) == -1) - log_fatal("set-ipopts: setsockopt_ipv4_ifindex(%d): %s", yes, - strerror(errno)); + zlog_fatal("set-ipopts: setsockopt_ipv4_ifindex(%d): %s", yes, + strerror(errno)); #endif /* BFD_BSD */ } @@ -887,7 +904,7 @@ static void bp_bind_ip(int sd, uint16_t port) sin.sin_addr.s_addr = htonl(INADDR_ANY); sin.sin_port = htons(port); if (bind(sd, (struct sockaddr *)&sin, sizeof(sin)) == -1) - log_fatal("bind-ip: bind: %s", strerror(errno)); + zlog_fatal("bind-ip: bind: %s", strerror(errno)); } int bp_udp_shop(const struct vrf *vrf) @@ -899,7 +916,7 @@ int bp_udp_shop(const struct vrf *vrf) vrf->name); } if (sd == -1) - log_fatal("udp-shop: socket: %s", strerror(errno)); + zlog_fatal("udp-shop: socket: %s", strerror(errno)); bp_set_ipopts(sd); bp_bind_ip(sd, BFD_DEFDESTPORT); @@ -915,7 +932,7 @@ int bp_udp_mhop(const struct vrf *vrf) vrf->name); } if (sd == -1) - log_fatal("udp-mhop: socket: %s", strerror(errno)); + zlog_fatal("udp-mhop: socket: %s", strerror(errno)); bp_set_ipopts(sd); bp_bind_ip(sd, BFD_DEF_MHOP_DEST_PORT); @@ -932,7 +949,7 @@ int bp_peer_socket(const struct bfd_session *bs) if (bs->key.ifname[0]) device_to_bind = (const char *)bs->key.ifname; - else if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) + else if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) && bs->key.vrfname[0]) device_to_bind = (const char *)bs->key.vrfname; @@ -941,8 +958,8 @@ int bp_peer_socket(const struct bfd_session *bs) bs->vrf->vrf_id, device_to_bind); } if (sd == -1) { - log_error("ipv4-new: failed to create socket: %s", - strerror(errno)); + zlog_err("ipv4-new: failed to create socket: %s", + strerror(errno)); return -1; } @@ -965,15 +982,15 @@ int bp_peer_socket(const struct bfd_session *bs) sin.sin_len = sizeof(sin); #endif /* HAVE_STRUCT_SOCKADDR_SA_LEN */ memcpy(&sin.sin_addr, &bs->key.local, sizeof(sin.sin_addr)); - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) == 0) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) == 0) sin.sin_addr.s_addr = INADDR_ANY; pcount = 0; do { if ((++pcount) > (BFD_SRCPORTMAX - BFD_SRCPORTINIT)) { /* Searched all ports, none available */ - log_error("ipv4-new: failed to bind port: %s", - strerror(errno)); + zlog_err("ipv4-new: failed to bind port: %s", + strerror(errno)); close(sd); return -1; } @@ -999,7 +1016,7 @@ int bp_peer_socketv6(const struct bfd_session *bs) if (bs->key.ifname[0]) device_to_bind = (const char *)bs->key.ifname; - else if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) + else if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) && bs->key.vrfname[0]) device_to_bind = (const char *)bs->key.vrfname; @@ -1008,8 +1025,8 @@ int bp_peer_socketv6(const struct bfd_session *bs) bs->vrf->vrf_id, device_to_bind); } if (sd == -1) { - log_error("ipv6-new: failed to create socket: %s", - strerror(errno)); + zlog_err("ipv6-new: failed to create socket: %s", + strerror(errno)); return -1; } @@ -1039,8 +1056,8 @@ int bp_peer_socketv6(const struct bfd_session *bs) do { if ((++pcount) > (BFD_SRCPORTMAX - BFD_SRCPORTINIT)) { /* Searched all ports, none available */ - log_error("ipv6-new: failed to bind port: %s", - strerror(errno)); + zlog_err("ipv6-new: failed to bind port: %s", + strerror(errno)); close(sd); return -1; } @@ -1058,8 +1075,8 @@ int bp_set_ttlv6(int sd, uint8_t value) if (setsockopt(sd, IPPROTO_IPV6, IPV6_UNICAST_HOPS, &ttl, sizeof(ttl)) == -1) { - log_warning("set-ttlv6: setsockopt(IPV6_UNICAST_HOPS, %d): %s", - value, strerror(errno)); + zlog_warn("set-ttlv6: setsockopt(IPV6_UNICAST_HOPS, %d): %s", + value, strerror(errno)); return -1; } @@ -1072,8 +1089,8 @@ int bp_set_tosv6(int sd, uint8_t value) if (setsockopt(sd, IPPROTO_IPV6, IPV6_TCLASS, &tos, sizeof(tos)) == -1) { - log_warning("set-tosv6: setsockopt(IPV6_TCLASS, %d): %s", value, - strerror(errno)); + zlog_warn("set-tosv6: setsockopt(IPV6_TCLASS, %d): %s", value, + strerror(errno)); return -1; } @@ -1086,22 +1103,23 @@ static void bp_set_ipv6opts(int sd) int ipv6_only = BFD_IPV6_ONLY_VAL; if (bp_set_ttlv6(sd, BFD_TTL_VAL) == -1) - log_fatal("set-ipv6opts: setsockopt(IPV6_UNICAST_HOPS, %d): %s", - BFD_TTL_VAL, strerror(errno)); + zlog_fatal( + "set-ipv6opts: setsockopt(IPV6_UNICAST_HOPS, %d): %s", + BFD_TTL_VAL, strerror(errno)); if (setsockopt_ipv6_hoplimit(sd, BFD_RCV_TTL_VAL) == -1) - log_fatal("set-ipv6opts: setsockopt(IPV6_HOPLIMIT, %d): %s", - BFD_RCV_TTL_VAL, strerror(errno)); + zlog_fatal("set-ipv6opts: setsockopt(IPV6_HOPLIMIT, %d): %s", + BFD_RCV_TTL_VAL, strerror(errno)); if (setsockopt_ipv6_pktinfo(sd, ipv6_pktinfo) == -1) - log_fatal("set-ipv6opts: setsockopt(IPV6_PKTINFO, %d): %s", - ipv6_pktinfo, strerror(errno)); + zlog_fatal("set-ipv6opts: setsockopt(IPV6_PKTINFO, %d): %s", + ipv6_pktinfo, strerror(errno)); if (setsockopt(sd, IPPROTO_IPV6, IPV6_V6ONLY, &ipv6_only, sizeof(ipv6_only)) == -1) - log_fatal("set-ipv6opts: setsockopt(IPV6_V6ONLY, %d): %s", - ipv6_only, strerror(errno)); + zlog_fatal("set-ipv6opts: setsockopt(IPV6_V6ONLY, %d): %s", + ipv6_only, strerror(errno)); } static void bp_bind_ipv6(int sd, uint16_t port) @@ -1116,7 +1134,7 @@ static void bp_bind_ipv6(int sd, uint16_t port) sin6.sin6_len = sizeof(sin6); #endif /* HAVE_STRUCT_SOCKADDR_SA_LEN */ if (bind(sd, (struct sockaddr *)&sin6, sizeof(sin6)) == -1) - log_fatal("bind-ipv6: bind: %s", strerror(errno)); + zlog_fatal("bind-ipv6: bind: %s", strerror(errno)); } int bp_udp6_shop(const struct vrf *vrf) @@ -1128,7 +1146,7 @@ int bp_udp6_shop(const struct vrf *vrf) vrf->name); } if (sd == -1) - log_fatal("udp6-shop: socket: %s", strerror(errno)); + zlog_fatal("udp6-shop: socket: %s", strerror(errno)); bp_set_ipv6opts(sd); bp_bind_ipv6(sd, BFD_DEFDESTPORT); @@ -1145,7 +1163,7 @@ int bp_udp6_mhop(const struct vrf *vrf) vrf->name); } if (sd == -1) - log_fatal("udp6-mhop: socket: %s", strerror(errno)); + zlog_fatal("udp6-mhop: socket: %s", strerror(errno)); bp_set_ipv6opts(sd); bp_bind_ipv6(sd, BFD_DEF_MHOP_DEST_PORT); @@ -1161,7 +1179,7 @@ int bp_echo_socket(const struct vrf *vrf) s = vrf_socket(AF_INET, SOCK_DGRAM, 0, vrf->vrf_id, vrf->name); } if (s == -1) - log_fatal("echo-socket: socket: %s", strerror(errno)); + zlog_fatal("echo-socket: socket: %s", strerror(errno)); bp_set_ipopts(s); bp_bind_ip(s, BFD_DEF_ECHO_PORT); @@ -1177,7 +1195,7 @@ int bp_echov6_socket(const struct vrf *vrf) s = vrf_socket(AF_INET6, SOCK_DGRAM, 0, vrf->vrf_id, vrf->name); } if (s == -1) - log_fatal("echov6-socket: socket: %s", strerror(errno)); + zlog_fatal("echov6-socket: socket: %s", strerror(errno)); bp_set_ipv6opts(s); bp_bind_ipv6(s, BFD_DEF_ECHO_PORT); diff --git a/bfdd/bfdd.c b/bfdd/bfdd.c index 69f268ab01..39d51eb649 100644 --- a/bfdd/bfdd.c +++ b/bfdd/bfdd.c @@ -25,6 +25,7 @@ #include "bfd.h" #include "bfdd_nb.h" #include "lib/version.h" +#include "lib/command.h" /* @@ -49,8 +50,8 @@ void socket_close(int *s) return; if (close(*s) != 0) - log_error("%s: close(%d): (%d) %s", __func__, *s, errno, - strerror(errno)); + zlog_err("%s: close(%d): (%d) %s", __func__, *s, errno, + strerror(errno)); *s = -1; } @@ -112,6 +113,7 @@ static struct quagga_signal_t bfd_signals[] = { static const struct frr_yang_module_info *const bfdd_yang_modules[] = { &frr_interface_info, &frr_bfdd_info, + &frr_vrf_info, }; FRR_DAEMON_INFO(bfdd, BFD, .vty_port = 2617, diff --git a/bfdd/bfdd_nb_config.c b/bfdd/bfdd_nb_config.c index 48dcce3ddf..7b95bd23c6 100644 --- a/bfdd/bfdd_nb_config.c +++ b/bfdd/bfdd_nb_config.c @@ -99,7 +99,7 @@ static int bfd_session_create(enum nb_event event, const struct lyd_node *dnode, /* This session was already configured by another daemon. */ if (bs != NULL) { /* Now it is configured also by CLI. */ - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG); + SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG); bs->refcount++; resource->ptr = bs; @@ -107,19 +107,17 @@ static int bfd_session_create(enum nb_event event, const struct lyd_node *dnode, } bs = bfd_session_new(); - if (bs == NULL) - return NB_ERR_RESOURCE; /* Fill the session key. */ bfd_session_get_key(mhop, dnode, &bs->key); /* Set configuration flags. */ bs->refcount = 1; - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG); + SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG); if (mhop) - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_MH); + SET_FLAG(bs->flags, BFD_SESS_FLAG_MH); if (bs->key.family == AF_INET6) - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_IPV6); + SET_FLAG(bs->flags, BFD_SESS_FLAG_IPV6); resource->ptr = bs; break; @@ -164,10 +162,10 @@ static int bfd_session_destroy(enum nb_event event, case NB_EV_APPLY: bs = nb_running_unset_entry(dnode); /* CLI is not using this session anymore. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) == 0) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) == 0) break; - BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG); + UNSET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG); bs->refcount--; /* There are still daemons using it. */ if (bs->refcount > 0) @@ -384,10 +382,10 @@ int bfdd_bfd_sessions_single_hop_administrative_down_modify( bs = nb_running_get_entry(dnode, NULL, true); if (!shutdown) { - if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) + if (!CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) return NB_OK; - BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); + UNSET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); /* Change and notify state change. */ bs->ses_state = PTM_BFD_DOWN; @@ -396,15 +394,15 @@ int bfdd_bfd_sessions_single_hop_administrative_down_modify( /* Enable all timers. */ bfd_recvtimer_update(bs); bfd_xmttimer_update(bs, bs->xmt_TO); - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) { + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) { bfd_echo_recvtimer_update(bs); bfd_echo_xmttimer_update(bs, bs->echo_xmt_TO); } } else { - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) return NB_OK; - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); + SET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); /* Disable all events. */ bfd_recvtimer_delete(bs); @@ -448,18 +446,18 @@ int bfdd_bfd_sessions_single_hop_echo_mode_modify(enum nb_event event, bs = nb_running_get_entry(dnode, NULL, true); if (!echo) { - if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) + if (!CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) return NB_OK; - BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); + UNSET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); ptm_bfd_echo_stop(bs); } else { - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) return NB_OK; - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); + SET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); /* Apply setting immediately. */ - if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) + if (!CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) bs_echo_timer_handler(bs); } diff --git a/bfdd/bfdd_nb_state.c b/bfdd/bfdd_nb_state.c index dfca3d1417..2a44d46c41 100644 --- a/bfdd/bfdd_nb_state.c +++ b/bfdd/bfdd_nb_state.c @@ -211,7 +211,7 @@ struct yang_data *bfdd_bfd_sessions_single_hop_stats_detection_mode_get_elem( * * TODO: support demand mode. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) detection_mode = 1; else detection_mode = 2; diff --git a/bfdd/bfdd_vty.c b/bfdd/bfdd_vty.c index 2a98b0fb02..71d0a49f6d 100644 --- a/bfdd/bfdd_vty.c +++ b/bfdd/bfdd_vty.c @@ -84,7 +84,7 @@ static void _display_peer_header(struct vty *vty, struct bfd_session *bs) inet_ntop(bs->key.family, &bs->key.peer, addr_buf, sizeof(addr_buf))); - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) vty_out(vty, " multihop"); if (memcmp(&bs->key.local, &zero_addr, sizeof(bs->key.local))) @@ -143,7 +143,7 @@ static void _display_peer(struct vty *vty, struct bfd_session *bs) vty_out(vty, "\t\tDiagnostics: %s\n", diag2str(bs->local_diag)); vty_out(vty, "\t\tRemote diagnostics: %s\n", diag2str(bs->remote_diag)); vty_out(vty, "\t\tPeer Type: %s\n", - BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) ? "configured" : "dynamic"); + CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) ? "configured" : "dynamic"); vty_out(vty, "\t\tLocal timers:\n"); vty_out(vty, "\t\t\tDetect-multiplier: %" PRIu32 "\n", @@ -235,7 +235,7 @@ static struct json_object *__display_peer_json(struct bfd_session *bs) bs->timers.required_min_rx / 1000); json_object_int_add(jo, "transmit-interval", bs->timers.desired_min_tx / 1000); - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) json_object_int_add(jo, "echo-interval", bs->timers.required_min_echo / 1000); else @@ -305,7 +305,7 @@ static void _display_peer_json_iter(struct hash_bucket *hb, void *arg) jon = __display_peer_json(bs); if (jon == NULL) { - log_warning("%s: not enough memory", __func__); + zlog_warn("%s: not enough memory", __func__); return; } @@ -415,7 +415,7 @@ static void _display_peer_counter_json_iter(struct hash_bucket *hb, void *arg) jon = __display_peer_counters_json(bs); if (jon == NULL) { - log_warning("%s: not enough memory", __func__); + zlog_warn("%s: not enough memory", __func__); return; } @@ -457,7 +457,7 @@ static void _display_peer_brief(struct vty *vty, struct bfd_session *bs) { char addr_buf[INET6_ADDRSTRLEN]; - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) { + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) { vty_out(vty, "%-10u", bs->discrs.my_discr); inet_ntop(bs->key.family, &bs->key.local, addr_buf, sizeof(addr_buf)); vty_out(vty, " %-40s", addr_buf); @@ -738,6 +738,42 @@ DEFPY(bfd_show_peers_brief, bfd_show_peers_brief_cmd, return CMD_SUCCESS; } +DEFPY( + bfd_debug_peer, bfd_debug_peer_cmd, + "[no] debug bfd peer", + NO_STR + DEBUG_STR + "Bidirection Forwarding Detection\n" + "Peer events debugging\n") +{ + bglobal.debug_peer_event = !no; + return CMD_SUCCESS; +} + +DEFPY( + bfd_debug_zebra, bfd_debug_zebra_cmd, + "[no] debug bfd zebra", + NO_STR + DEBUG_STR + "Bidirection Forwarding Detection\n" + "Zebra events debugging\n") +{ + bglobal.debug_zebra = !no; + return CMD_SUCCESS; +} + +DEFPY( + bfd_debug_network, bfd_debug_network_cmd, + "[no] debug bfd network", + NO_STR + DEBUG_STR + "Bidirection Forwarding Detection\n" + "Network layer debugging\n") +{ + bglobal.debug_network = !no; + return CMD_SUCCESS; +} + /* * Function definitions. */ @@ -842,20 +878,27 @@ DEFUN_NOSH(show_debugging_bfd, "BFD daemon\n") { vty_out(vty, "BFD debugging status:\n"); + vty_out(vty, " Peer events debugging.\n"); + vty_out(vty, " Zebra events debugging.\n"); + vty_out(vty, " Network layer debugging.\n"); return CMD_SUCCESS; } +static int bfdd_write_config(struct vty *vty); struct cmd_node bfd_node = { - BFD_NODE, - "%s(config-bfd)# ", - 1, + .name = "bfd", + .node = BFD_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-bfd)# ", + .config_write = bfdd_write_config, }; struct cmd_node bfd_peer_node = { - BFD_PEER_NODE, - "%s(config-bfd-peer)# ", - 1, + .name = "bfd peer", + .node = BFD_PEER_NODE, + .parent_node = BFD_NODE, + .prompt = "%s(config-bfd-peer)# ", }; static int bfdd_write_config(struct vty *vty) @@ -863,6 +906,21 @@ static int bfdd_write_config(struct vty *vty) struct lyd_node *dnode; int written = 0; + if (bglobal.debug_peer_event) { + vty_out(vty, "debug bfd peer\n"); + written = 1; + } + + if (bglobal.debug_zebra) { + vty_out(vty, "debug bfd zebra\n"); + written = 1; + } + + if (bglobal.debug_network) { + vty_out(vty, "debug bfd network\n"); + written = 1; + } + dnode = yang_dnode_get(running_config->dnode, "/frr-bfdd:bfdd"); if (dnode) { nb_cli_show_dnode_cmds(vty, dnode, false); @@ -882,12 +940,20 @@ void bfdd_vty_init(void) install_element(ENABLE_NODE, &bfd_show_peers_brief_cmd); install_element(ENABLE_NODE, &show_debugging_bfd_cmd); + install_element(ENABLE_NODE, &bfd_debug_peer_cmd); + install_element(ENABLE_NODE, &bfd_debug_zebra_cmd); + install_element(ENABLE_NODE, &bfd_debug_network_cmd); + + install_element(CONFIG_NODE, &bfd_debug_peer_cmd); + install_element(CONFIG_NODE, &bfd_debug_zebra_cmd); + install_element(CONFIG_NODE, &bfd_debug_network_cmd); + /* Install BFD node and commands. */ - install_node(&bfd_node, bfdd_write_config); + install_node(&bfd_node); install_default(BFD_NODE); /* Install BFD peer node. */ - install_node(&bfd_peer_node, NULL); + install_node(&bfd_peer_node); install_default(BFD_PEER_NODE); bfdd_cli_init(); diff --git a/bfdd/config.c b/bfdd/config.c index 4ae7bfdc08..b71670f012 100644 --- a/bfdd/config.c +++ b/bfdd/config.c @@ -92,8 +92,8 @@ static int parse_config_json(struct json_object *jo, bpc_handle h, void *arg) error += parse_list(jo_val, PLT_LABEL, h, arg); } else { sval = json_object_get_string(jo_val); - log_warning("%s:%d invalid configuration: %s", __func__, - __LINE__, sval); + zlog_warn("%s:%d invalid configuration: %s", __func__, + __LINE__, sval); error++; } } @@ -139,15 +139,15 @@ static int parse_list(struct json_object *jo, enum peer_list_type plt, switch (plt) { case PLT_IPV4: - log_debug("ipv4 peers %d:", allen); + zlog_debug("ipv4 peers %d:", allen); bpc.bpc_ipv4 = true; break; case PLT_IPV6: - log_debug("ipv6 peers %d:", allen); + zlog_debug("ipv6 peers %d:", allen); bpc.bpc_ipv4 = false; break; case PLT_LABEL: - log_debug("label peers %d:", allen); + zlog_debug("label peers %d:", allen); if (parse_peer_label_config(jo_val, &bpc) != 0) { error++; continue; @@ -156,8 +156,8 @@ static int parse_list(struct json_object *jo, enum peer_list_type plt, default: error++; - log_error("%s:%d: unsupported peer type", __func__, - __LINE__); + zlog_err("%s:%d: unsupported peer type", __func__, + __LINE__); break; } @@ -178,7 +178,7 @@ static int parse_peer_config(struct json_object *jo, struct bfd_peer_cfg *bpc) int family_type = (bpc->bpc_ipv4) ? AF_INET : AF_INET6; int error = 0; - log_debug(" peer: %s", bpc->bpc_ipv4 ? "ipv4" : "ipv6"); + zlog_debug(" peer: %s", bpc->bpc_ipv4 ? "ipv4" : "ipv6"); JSON_FOREACH (jo, joi, join) { key = json_object_iter_peek_name(&joi); @@ -186,41 +186,41 @@ static int parse_peer_config(struct json_object *jo, struct bfd_peer_cfg *bpc) if (strcmp(key, "multihop") == 0) { bpc->bpc_mhop = json_object_get_boolean(jo_val); - log_debug(" multihop: %s", - bpc->bpc_mhop ? "true" : "false"); + zlog_debug(" multihop: %s", + bpc->bpc_mhop ? "true" : "false"); } else if (strcmp(key, "peer-address") == 0) { sval = json_object_get_string(jo_val); if (strtosa(sval, &bpc->bpc_peer) != 0 || bpc->bpc_peer.sa_sin.sin_family != family_type) { - log_info( + zlog_debug( "%s:%d failed to parse peer-address '%s'", __func__, __LINE__, sval); error++; } - log_debug(" peer-address: %s", sval); + zlog_debug(" peer-address: %s", sval); } else if (strcmp(key, "local-address") == 0) { sval = json_object_get_string(jo_val); if (strtosa(sval, &bpc->bpc_local) != 0 || bpc->bpc_local.sa_sin.sin_family != family_type) { - log_info( + zlog_debug( "%s:%d failed to parse local-address '%s'", __func__, __LINE__, sval); error++; } - log_debug(" local-address: %s", sval); + zlog_debug(" local-address: %s", sval); } else if (strcmp(key, "local-interface") == 0) { bpc->bpc_has_localif = true; sval = json_object_get_string(jo_val); if (strlcpy(bpc->bpc_localif, sval, sizeof(bpc->bpc_localif)) > sizeof(bpc->bpc_localif)) { - log_debug( + zlog_debug( " local-interface: %s (truncated)", sval); error++; } else { - log_debug(" local-interface: %s", sval); + zlog_debug(" local-interface: %s", sval); } } else if (strcmp(key, "vrf-name") == 0) { bpc->bpc_has_vrfname = true; @@ -228,67 +228,68 @@ static int parse_peer_config(struct json_object *jo, struct bfd_peer_cfg *bpc) if (strlcpy(bpc->bpc_vrfname, sval, sizeof(bpc->bpc_vrfname)) > sizeof(bpc->bpc_vrfname)) { - log_debug(" vrf-name: %s (truncated)", - sval); + zlog_debug(" vrf-name: %s (truncated)", + sval); error++; } else { - log_debug(" vrf-name: %s", sval); + zlog_debug(" vrf-name: %s", sval); } } else if (strcmp(key, "detect-multiplier") == 0) { bpc->bpc_detectmultiplier = json_object_get_int64(jo_val); bpc->bpc_has_detectmultiplier = true; - log_debug(" detect-multiplier: %u", - bpc->bpc_detectmultiplier); + zlog_debug(" detect-multiplier: %u", + bpc->bpc_detectmultiplier); } else if (strcmp(key, "receive-interval") == 0) { bpc->bpc_recvinterval = json_object_get_int64(jo_val); bpc->bpc_has_recvinterval = true; - log_debug(" receive-interval: %" PRIu64, - bpc->bpc_recvinterval); + zlog_debug(" receive-interval: %" PRIu64, + bpc->bpc_recvinterval); } else if (strcmp(key, "transmit-interval") == 0) { bpc->bpc_txinterval = json_object_get_int64(jo_val); bpc->bpc_has_txinterval = true; - log_debug(" transmit-interval: %" PRIu64, - bpc->bpc_txinterval); + zlog_debug(" transmit-interval: %" PRIu64, + bpc->bpc_txinterval); } else if (strcmp(key, "echo-interval") == 0) { bpc->bpc_echointerval = json_object_get_int64(jo_val); bpc->bpc_has_echointerval = true; - log_debug(" echo-interval: %" PRIu64, - bpc->bpc_echointerval); + zlog_debug(" echo-interval: %" PRIu64, + bpc->bpc_echointerval); } else if (strcmp(key, "create-only") == 0) { bpc->bpc_createonly = json_object_get_boolean(jo_val); - log_debug(" create-only: %s", - bpc->bpc_createonly ? "true" : "false"); + zlog_debug(" create-only: %s", + bpc->bpc_createonly ? "true" : "false"); } else if (strcmp(key, "shutdown") == 0) { bpc->bpc_shutdown = json_object_get_boolean(jo_val); - log_debug(" shutdown: %s", - bpc->bpc_shutdown ? "true" : "false"); + zlog_debug(" shutdown: %s", + bpc->bpc_shutdown ? "true" : "false"); } else if (strcmp(key, "echo-mode") == 0) { bpc->bpc_echo = json_object_get_boolean(jo_val); - log_debug(" echo-mode: %s", - bpc->bpc_echo ? "true" : "false"); + zlog_debug(" echo-mode: %s", + bpc->bpc_echo ? "true" : "false"); } else if (strcmp(key, "label") == 0) { bpc->bpc_has_label = true; sval = json_object_get_string(jo_val); if (strlcpy(bpc->bpc_label, sval, sizeof(bpc->bpc_label)) > sizeof(bpc->bpc_label)) { - log_debug(" label: %s (truncated)", - sval); + zlog_debug(" label: %s (truncated)", + sval); error++; } else { - log_debug(" label: %s", sval); + zlog_debug(" label: %s", sval); } } else { sval = json_object_get_string(jo_val); - log_warning("%s:%d invalid configuration: '%s: %s'", - __func__, __LINE__, key, sval); + zlog_warn("%s:%d invalid configuration: '%s: %s'", + __func__, __LINE__, key, sval); error++; } } if (bpc->bpc_peer.sa_sin.sin_family == 0) { - log_debug("%s:%d no peer address provided", __func__, __LINE__); + zlog_debug("%s:%d no peer address provided", __func__, + __LINE__); error++; } @@ -312,7 +313,7 @@ static int parse_peer_label_config(struct json_object *jo, if (pl == NULL) return 1; - log_debug(" peer-label: %s", sval); + zlog_debug(" peer-label: %s", sval); /* Translate the label into BFD address keys. */ bs_to_bpc(pl->pl_bs, bpc); @@ -474,12 +475,12 @@ char *config_notify_config(const char *op, struct bfd_session *bs) json_object_int_add(resp, "remote-echo-interval", bs->remote_timers.required_min_echo / 1000); - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) json_object_boolean_true_add(resp, "echo-mode"); else json_object_boolean_false_add(resp, "echo-mode"); - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) json_object_boolean_true_add(resp, "shutdown"); else json_object_boolean_false_add(resp, "shutdown"); @@ -511,12 +512,12 @@ static int json_object_add_peer(struct json_object *jo, struct bfd_session *bs) char addr_buf[INET6_ADDRSTRLEN]; /* Add peer 'key' information. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_IPV6)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_IPV6)) json_object_boolean_true_add(jo, "ipv6"); else json_object_boolean_false_add(jo, "ipv6"); - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) { + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) { json_object_boolean_true_add(jo, "multihop"); json_object_string_add(jo, "peer-address", inet_ntop(bs->key.family, &bs->key.peer, @@ -573,7 +574,7 @@ struct peer_label *pl_new(const char *label, struct bfd_session *bs) if (strlcpy(pl->pl_label, label, sizeof(pl->pl_label)) > sizeof(pl->pl_label)) - log_warning("%s:%d: label was truncated", __func__, __LINE__); + zlog_warn("%s:%d: label was truncated", __func__, __LINE__); pl->pl_bs = bs; bs->pl = pl; diff --git a/bfdd/control.c b/bfdd/control.c index ae6f5a3e79..3b954c64f8 100644 --- a/bfdd/control.c +++ b/bfdd/control.c @@ -86,13 +86,13 @@ static int sock_set_nonblock(int fd) flags = fcntl(fd, F_GETFL, 0); if (flags == -1) { - log_warning("%s: fcntl F_GETFL: %s", __func__, strerror(errno)); + zlog_warn("%s: fcntl F_GETFL: %s", __func__, strerror(errno)); return -1; } flags |= O_NONBLOCK; if (fcntl(fd, F_SETFL, flags) == -1) { - log_warning("%s: fcntl F_SETFL: %s", __func__, strerror(errno)); + zlog_warn("%s: fcntl F_SETFL: %s", __func__, strerror(errno)); return -1; } @@ -116,20 +116,20 @@ int control_init(const char *path) sd = socket(AF_UNIX, SOCK_STREAM, PF_UNSPEC); if (sd == -1) { - log_error("%s: socket: %s", __func__, strerror(errno)); + zlog_err("%s: socket: %s", __func__, strerror(errno)); return -1; } umval = umask(0); if (bind(sd, (struct sockaddr *)&sun_, sizeof(sun_)) == -1) { - log_error("%s: bind: %s", __func__, strerror(errno)); + zlog_err("%s: bind: %s", __func__, strerror(errno)); close(sd); return -1; } umask(umval); if (listen(sd, SOMAXCONN) == -1) { - log_error("%s: listen: %s", __func__, strerror(errno)); + zlog_err("%s: listen: %s", __func__, strerror(errno)); close(sd); return -1; } @@ -164,12 +164,11 @@ int control_accept(struct thread *t) csock = accept(sd, NULL, 0); if (csock == -1) { - log_warning("%s: accept: %s", __func__, strerror(errno)); + zlog_warn("%s: accept: %s", __func__, strerror(errno)); return 0; } - if (control_new(csock) == NULL) - close(csock); + control_new(csock); bglobal.bg_csockev = NULL; thread_add_read(master, control_accept, NULL, sd, &bglobal.bg_csockev); @@ -334,8 +333,6 @@ static int control_queue_enqueue(struct bfd_control_socket *bcs, struct bfd_control_buffer *bcb; bcq = control_queue_new(bcs); - if (bcq == NULL) - return -1; bcb = &bcq->bcq_bcb; bcb->bcb_left = sizeof(struct bfd_control_msg) + ntohl(bcm->bcm_length); @@ -440,7 +437,7 @@ static int control_read(struct thread *t) if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) goto schedule_next_read; - log_warning("%s: read: %s", __func__, strerror(errno)); + zlog_warn("%s: read: %s", __func__, strerror(errno)); control_free(bcs); return 0; } @@ -448,15 +445,15 @@ static int control_read(struct thread *t) /* Validate header fields. */ plen = ntohl(bcm.bcm_length); if (plen < 2) { - log_debug("%s: client closed due small message length: %d", - __func__, bcm.bcm_length); + zlog_debug("%s: client closed due small message length: %d", + __func__, bcm.bcm_length); control_free(bcs); return 0; } if (bcm.bcm_ver != BMV_VERSION_1) { - log_debug("%s: client closed due bad version: %d", __func__, - bcm.bcm_ver); + zlog_debug("%s: client closed due bad version: %d", __func__, + bcm.bcm_ver); control_free(bcs); return 0; } @@ -470,8 +467,8 @@ static int control_read(struct thread *t) bcb->bcb_buf = XMALLOC(MTYPE_BFDD_NOTIFICATION, sizeof(bcm) + bcb->bcb_left + 1); if (bcb->bcb_buf == NULL) { - log_warning("%s: not enough memory for message size: %zu", - __func__, bcb->bcb_left); + zlog_warn("%s: not enough memory for message size: %zu", + __func__, bcb->bcb_left); control_free(bcs); return 0; } @@ -492,7 +489,7 @@ skip_header: if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) goto schedule_next_read; - log_warning("%s: read: %s", __func__, strerror(errno)); + zlog_warn("%s: read: %s", __func__, strerror(errno)); control_free(bcs); return 0; } @@ -521,8 +518,8 @@ skip_header: break; default: - log_debug("%s: unhandled message type: %d", __func__, - bcb->bcb_bcm->bcm_type); + zlog_debug("%s: unhandled message type: %d", __func__, + bcb->bcb_bcm->bcm_type); control_response(bcs, bcb->bcb_bcm->bcm_id, BCM_RESPONSE_ERROR, "invalid message type"); break; @@ -559,7 +556,7 @@ static int control_write(struct thread *t) return 0; } - log_warning("%s: write: %s", __func__, strerror(errno)); + zlog_warn("%s: write: %s", __func__, strerror(errno)); control_free(bcs); return 0; } @@ -656,8 +653,7 @@ static int notify_add_cb(struct bfd_peer_cfg *bpc, void *arg) if (bs == NULL) return -1; - if (control_notifypeer_new(bcs, bs) == NULL) - return -1; + control_notifypeer_new(bcs, bs); /* Notify peer status. */ _control_notify(bcs, bs); @@ -723,8 +719,8 @@ static void control_response(struct bfd_control_socket *bcs, uint16_t id, /* Generate JSON response. */ jsonstr = config_response(status, error); if (jsonstr == NULL) { - log_warning("%s: config_response: failed to get JSON str", - __func__); + zlog_warn("%s: config_response: failed to get JSON str", + __func__); return; } @@ -753,8 +749,8 @@ static void _control_notify(struct bfd_control_socket *bcs, /* Generate JSON response. */ jsonstr = config_notify(bs); if (jsonstr == NULL) { - log_warning("%s: config_notify: failed to get JSON str", - __func__); + zlog_warn("%s: config_notify: failed to get JSON str", + __func__); return; } @@ -816,8 +812,8 @@ static void _control_notify_config(struct bfd_control_socket *bcs, /* Generate JSON response. */ jsonstr = config_notify_config(op, bs); if (jsonstr == NULL) { - log_warning("%s: config_notify_config: failed to get JSON str", - __func__); + zlog_warn("%s: config_notify_config: failed to get JSON str", + __func__); return; } diff --git a/bfdd/event.c b/bfdd/event.c index 5ba54c2b0b..654928b9b3 100644 --- a/bfdd/event.c +++ b/bfdd/event.c @@ -43,14 +43,11 @@ void bfd_recvtimer_update(struct bfd_session *bs) bfd_recvtimer_delete(bs); /* Don't add event if peer is deactivated. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || bs->sock == -1) return; tv_normalize(&tv); -#ifdef BFD_EVENT_DEBUG - log_debug("%s: sec = %ld, usec = %ld", __func__, tv.tv_sec, tv.tv_usec); -#endif /* BFD_EVENT_DEBUG */ thread_add_timer_tv(master, bfd_recvtimer_cb, bs, &tv, &bs->recvtimer_ev); @@ -64,14 +61,11 @@ void bfd_echo_recvtimer_update(struct bfd_session *bs) bfd_echo_recvtimer_delete(bs); /* Don't add event if peer is deactivated. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || bs->sock == -1) return; tv_normalize(&tv); -#ifdef BFD_EVENT_DEBUG - log_debug("%s: sec = %ld, usec = %ld", __func__, tv.tv_sec, tv.tv_usec); -#endif /* BFD_EVENT_DEBUG */ thread_add_timer_tv(master, bfd_echo_recvtimer_cb, bs, &tv, &bs->echo_recvtimer_ev); @@ -85,14 +79,11 @@ void bfd_xmttimer_update(struct bfd_session *bs, uint64_t jitter) bfd_xmttimer_delete(bs); /* Don't add event if peer is deactivated. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || bs->sock == -1) return; tv_normalize(&tv); -#ifdef BFD_EVENT_DEBUG - log_debug("%s: sec = %ld, usec = %ld", __func__, tv.tv_sec, tv.tv_usec); -#endif /* BFD_EVENT_DEBUG */ thread_add_timer_tv(master, bfd_xmt_cb, bs, &tv, &bs->xmttimer_ev); } @@ -105,14 +96,11 @@ void bfd_echo_xmttimer_update(struct bfd_session *bs, uint64_t jitter) bfd_echo_xmttimer_delete(bs); /* Don't add event if peer is deactivated. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || bs->sock == -1) return; tv_normalize(&tv); -#ifdef BFD_EVENT_DEBUG - log_debug("%s: sec = %ld, usec = %ld", __func__, tv.tv_sec, tv.tv_usec); -#endif /* BFD_EVENT_DEBUG */ thread_add_timer_tv(master, bfd_echo_xmt_cb, bs, &tv, &bs->echo_xmttimer_ev); diff --git a/bfdd/ptm_adapter.c b/bfdd/ptm_adapter.c index dcca70b796..4c88922b8a 100644 --- a/bfdd/ptm_adapter.c +++ b/bfdd/ptm_adapter.c @@ -80,19 +80,17 @@ static void bfdd_client_deregister(struct stream *msg); /* * Functions */ -#ifdef BFD_DEBUG -static void debug_printbpc(const char *func, unsigned int line, - struct bfd_peer_cfg *bpc); - -static void debug_printbpc(const char *func, unsigned int line, - struct bfd_peer_cfg *bpc) +static void debug_printbpc(const struct bfd_peer_cfg *bpc, const char *fmt, ...) { - char addr[3][128]; - char timers[3][128]; - char cbit_str[10]; - - addr[0][0] = addr[1][0] = addr[2][0] = timers[0][0] = timers[1][0] = - timers[2][0] = 0; + char timers[3][128] = {}; + char addr[3][128] = {}; + char cbit_str[32]; + char msgbuf[256]; + va_list vl; + + /* Avoid debug calculations if it's disabled. */ + if (bglobal.debug_zebra == false) + return; snprintf(addr[0], sizeof(addr[0]), "peer:%s", satostr(&bpc->bpc_peer)); if (bpc->bpc_local.sa_sin.sin_family) @@ -107,28 +105,27 @@ static void debug_printbpc(const char *func, unsigned int line, snprintf(addr[2], sizeof(addr[2]), " vrf:%s", bpc->bpc_vrfname); if (bpc->bpc_has_recvinterval) - snprintf(timers[0], sizeof(timers[0]), " rx:%lu", + snprintf(timers[0], sizeof(timers[0]), " rx:%" PRIu64, bpc->bpc_recvinterval); if (bpc->bpc_has_txinterval) - snprintf(timers[1], sizeof(timers[1]), " tx:%lu", + snprintf(timers[1], sizeof(timers[1]), " tx:%" PRIu64, bpc->bpc_recvinterval); if (bpc->bpc_has_detectmultiplier) snprintf(timers[2], sizeof(timers[2]), " detect-multiplier:%d", bpc->bpc_detectmultiplier); - sprintf(cbit_str, "CB %x", bpc->bpc_cbit); + snprintf(cbit_str, sizeof(cbit_str), " cbit:0x%02x", bpc->bpc_cbit); - log_debug("%s:%d: %s %s%s%s%s%s%s %s", func, line, - bpc->bpc_mhop ? "multi-hop" : "single-hop", addr[0], addr[1], - addr[2], timers[0], timers[1], timers[2], cbit_str); -} + va_start(vl, fmt); + vsnprintf(msgbuf, sizeof(msgbuf), fmt, vl); + va_end(vl); -#define DEBUG_PRINTBPC(bpc) debug_printbpc(__FILE__, __LINE__, (bpc)) -#else -#define DEBUG_PRINTBPC(bpc) -#endif /* BFD_DEBUG */ + zlog_debug("%s [mhop:%s %s%s%s%s%s%s%s]", msgbuf, + bpc->bpc_mhop ? "yes" : "no", addr[0], addr[1], addr[2], + timers[0], timers[1], timers[2], cbit_str); +} static int _ptm_msg_address(struct stream *msg, int family, const void *addr) { @@ -260,7 +257,7 @@ static void _ptm_msg_read_address(struct stream *msg, struct sockaddr_any *sa) return; default: - log_warning("ptm-read-address: invalid family: %d", family); + zlog_warn("ptm-read-address: invalid family: %d", family); break; } @@ -315,10 +312,6 @@ static int _ptm_msg_read(struct stream *msg, int command, vrf_id_t vrf_id, STREAM_GETL(msg, pid); *pc = pc_new(pid); - if (*pc == NULL) { - log_debug("ptm-read: failed to allocate memory"); - return -1; - } /* Register/update peer information. */ _ptm_msg_read_address(msg, &bpc->bpc_peer); @@ -358,7 +351,7 @@ static int _ptm_msg_read(struct stream *msg, int command, vrf_id_t vrf_id, */ STREAM_GETC(msg, ifnamelen); if (ifnamelen >= sizeof(bpc->bpc_localif)) { - log_error("ptm-read: interface name is too big"); + zlog_err("ptm-read: interface name is too big"); return -1; } @@ -376,7 +369,8 @@ static int _ptm_msg_read(struct stream *msg, int command, vrf_id_t vrf_id, bpc->bpc_has_vrfname = true; strlcpy(bpc->bpc_vrfname, vrf->name, sizeof(bpc->bpc_vrfname)); } else { - log_error("ptm-read: vrf id %u could not be identified", vrf_id); + zlog_err("ptm-read: vrf id %u could not be identified", + vrf_id); return -1; } } else { @@ -390,7 +384,7 @@ static int _ptm_msg_read(struct stream *msg, int command, vrf_id_t vrf_id, if (bpc->bpc_local.sa_sin.sin_family != 0 && (bpc->bpc_local.sa_sin.sin_family != bpc->bpc_peer.sa_sin.sin_family)) { - log_warning("ptm-read: peer family doesn't match local type"); + zlog_warn("ptm-read: peer family doesn't match local type"); return -1; } @@ -403,7 +397,6 @@ stream_failure: static void bfdd_dest_register(struct stream *msg, vrf_id_t vrf_id) { struct ptm_client *pc; - struct ptm_client_notification *pcn; struct bfd_session *bs; struct bfd_peer_cfg bpc; @@ -411,29 +404,27 @@ static void bfdd_dest_register(struct stream *msg, vrf_id_t vrf_id) if (_ptm_msg_read(msg, ZEBRA_BFD_DEST_REGISTER, vrf_id, &bpc, &pc) == -1) return; - DEBUG_PRINTBPC(&bpc); + debug_printbpc(&bpc, "ptm-add-dest: register peer"); /* Find or start new BFD session. */ bs = bs_peer_find(&bpc); if (bs == NULL) { bs = ptm_bfd_sess_new(&bpc); if (bs == NULL) { - log_debug("ptm-add-dest: failed to create BFD session"); + if (bglobal.debug_zebra) + zlog_debug( + "ptm-add-dest: failed to create BFD session"); return; } } else { /* Don't try to change echo/shutdown state. */ - bpc.bpc_echo = BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); + bpc.bpc_echo = CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); bpc.bpc_shutdown = - BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); + CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); } /* Create client peer notification register. */ - pcn = pcn_new(pc, bs); - if (pcn == NULL) { - log_error("ptm-add-dest: failed to registrate notifications"); - return; - } + pcn_new(pc, bs); ptm_bfd_notify(bs, bs->ses_state); } @@ -449,12 +440,13 @@ static void bfdd_dest_deregister(struct stream *msg, vrf_id_t vrf_id) if (_ptm_msg_read(msg, ZEBRA_BFD_DEST_DEREGISTER, vrf_id, &bpc, &pc) == -1) return; - DEBUG_PRINTBPC(&bpc); + debug_printbpc(&bpc, "ptm-del-dest: deregister peer"); /* Find or start new BFD session. */ bs = bs_peer_find(&bpc); if (bs == NULL) { - log_debug("ptm-del-dest: failed to find BFD session"); + if (bglobal.debug_zebra) + zlog_debug("ptm-del-dest: failed to find BFD session"); return; } @@ -462,7 +454,7 @@ static void bfdd_dest_deregister(struct stream *msg, vrf_id_t vrf_id) pcn = pcn_lookup(pc, bs); pcn_free(pcn); if (bs->refcount || - BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG)) + CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG)) return; bs->ses_state = PTM_BFD_ADM_DOWN; @@ -477,22 +469,17 @@ static void bfdd_dest_deregister(struct stream *msg, vrf_id_t vrf_id) */ static void bfdd_client_register(struct stream *msg) { - struct ptm_client *pc; uint32_t pid; /* Find or allocate process context data. */ STREAM_GETL(msg, pid); - pc = pc_new(pid); - if (pc == NULL) { - log_error("ptm-add-client: failed to register client: %u", pid); - return; - } + pc_new(pid); return; stream_failure: - log_error("ptm-add-client: failed to register client"); + zlog_err("ptm-add-client: failed to register client"); } /* @@ -509,7 +496,9 @@ static void bfdd_client_deregister(struct stream *msg) pc = pc_lookup(pid); if (pc == NULL) { - log_debug("ptm-del-client: failed to find client: %u", pid); + if (bglobal.debug_zebra) + zlog_debug("ptm-del-client: failed to find client: %u", + pid); return; } @@ -518,7 +507,7 @@ static void bfdd_client_deregister(struct stream *msg) return; stream_failure: - log_error("ptm-del-client: failed to deregister client"); + zlog_err("ptm-del-client: failed to deregister client"); } static int bfdd_replay(ZAPI_CALLBACK_ARGS) @@ -544,14 +533,15 @@ static int bfdd_replay(ZAPI_CALLBACK_ARGS) break; default: - log_debug("ptm-replay: invalid message type %u", rcmd); + if (bglobal.debug_zebra) + zlog_debug("ptm-replay: invalid message type %u", rcmd); return -1; } return 0; stream_failure: - log_error("ptm-replay: failed to find command"); + zlog_err("ptm-replay: failed to find command"); return -1; } @@ -672,6 +662,9 @@ void bfdd_sessions_disable_vrf(struct vrf *vrf) static int bfd_ifp_destroy(struct interface *ifp) { + if (bglobal.debug_zebra) + zlog_debug("zclient: delete interface %s", ifp->name); + bfdd_sessions_disable_interface(ifp); return 0; @@ -717,11 +710,18 @@ static void bfdd_sessions_enable_address(struct connected *ifc) static int bfdd_interface_address_update(ZAPI_CALLBACK_ARGS) { struct connected *ifc; + char buf[64]; ifc = zebra_interface_address_read(cmd, zclient->ibuf, vrf_id); if (ifc == NULL) return 0; + if (bglobal.debug_zebra) + zlog_debug("zclient: %s local address %s", + cmd == ZEBRA_INTERFACE_ADDRESS_ADD ? "add" + : "delete", + prefix2str(ifc->address, buf, sizeof(buf))); + bfdd_sessions_enable_address(ifc); return 0; @@ -729,6 +729,9 @@ static int bfdd_interface_address_update(ZAPI_CALLBACK_ARGS) static int bfd_ifp_create(struct interface *ifp) { + if (bglobal.debug_zebra) + zlog_debug("zclient: add interface %s", ifp->name); + bfdd_sessions_enable_interface(ifp); return 0; diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c index 221386e38d..b7e2f45195 100644 --- a/bgpd/bgp_attr.c +++ b/bgpd/bgp_attr.c @@ -119,11 +119,11 @@ static void *cluster_hash_alloc(void *p) /* Cluster list related functions. */ static struct cluster_list *cluster_parse(struct in_addr *pnt, int length) { - struct cluster_list tmp; + struct cluster_list tmp = {}; struct cluster_list *cluster; tmp.length = length; - tmp.list = pnt; + tmp.list = length == 0 ? NULL : pnt; cluster = hash_get(cluster_hash, &tmp, cluster_hash_alloc); cluster->refcnt++; @@ -152,10 +152,16 @@ static bool cluster_hash_cmp(const void *p1, const void *p2) const struct cluster_list *cluster1 = p1; const struct cluster_list *cluster2 = p2; - return (cluster1->length == cluster2->length - && (cluster1->list == cluster2->list - || memcmp(cluster1->list, cluster2->list, cluster1->length) - == 0)); + if (cluster1->list == cluster2->list) + return true; + + if (!cluster1->list || !cluster2->list) + return false; + + if (cluster1->length != cluster2->length) + return false; + + return (memcmp(cluster1->list, cluster2->list, cluster1->length) == 0); } static void cluster_free(struct cluster_list *cluster) @@ -174,14 +180,16 @@ static struct cluster_list *cluster_intern(struct cluster_list *cluster) return find; } -void cluster_unintern(struct cluster_list *cluster) +static void cluster_unintern(struct cluster_list **cluster) { - if (cluster->refcnt) - cluster->refcnt--; + if ((*cluster)->refcnt) + (*cluster)->refcnt--; - if (cluster->refcnt == 0) { - hash_release(cluster_hash, cluster); - cluster_free(cluster); + if ((*cluster)->refcnt == 0) { + void *p = hash_release(cluster_hash, *cluster); + assert(p == *cluster); + cluster_free(*cluster); + *cluster = NULL; } } @@ -1029,7 +1037,7 @@ void bgp_attr_unintern_sub(struct attr *attr) UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES)); if (attr->cluster) - cluster_unintern(attr->cluster); + cluster_unintern(&attr->cluster); UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_CLUSTER_LIST)); if (attr->transit) @@ -1824,7 +1832,8 @@ bgp_attr_community(struct bgp_attr_parser_args *args) if (length == 0) { attr->community = NULL; - return BGP_ATTR_PARSE_PROCEED; + return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR, + args->total); } attr->community = @@ -1886,7 +1895,7 @@ bgp_attr_cluster_list(struct bgp_attr_parser_args *args) * malformed, the UPDATE message SHALL be handled using the approach * of "treat-as-withdraw". */ - if (length % 4) { + if (length == 0 || length % 4) { flog_err(EC_BGP_ATTR_LEN, "Bad cluster list length %d", length); return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR, @@ -2165,11 +2174,11 @@ bgp_attr_large_community(struct bgp_attr_parser_args *args) if (length == 0) { attr->lcommunity = NULL; /* Empty extcomm doesn't seem to be invalid per se */ - return BGP_ATTR_PARSE_PROCEED; + return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR, + args->total); } - attr->lcommunity = - lcommunity_parse((uint8_t *)stream_pnt(peer->curr), length); + attr->lcommunity = lcommunity_parse(stream_pnt(peer->curr), length); /* XXX: fix ecommunity_parse to use stream API */ stream_forward_getp(peer->curr, length); @@ -2194,11 +2203,12 @@ bgp_attr_ext_communities(struct bgp_attr_parser_args *args) if (length == 0) { attr->ecommunity = NULL; /* Empty extcomm doesn't seem to be invalid per se */ - return BGP_ATTR_PARSE_PROCEED; + return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR, + args->total); } attr->ecommunity = - ecommunity_parse((uint8_t *)stream_pnt(peer->curr), length); + ecommunity_parse(stream_pnt(peer->curr), length); /* XXX: fix ecommunity_parse to use stream API */ stream_forward_getp(peer->curr, length); @@ -2246,6 +2256,9 @@ bgp_attr_ext_communities(struct bgp_attr_parser_args *args) bgp_attr_extcom_tunnel_type(attr, (bgp_encap_types *)&attr->encap_tunneltype); + /* Extract link bandwidth, if any. */ + (void)ecommunity_linkbw_present(attr->ecommunity, &attr->link_bw); + return BGP_ATTR_PARSE_PROCEED; } diff --git a/bgpd/bgp_attr.h b/bgpd/bgp_attr.h index 98a9a620f7..94531313ae 100644 --- a/bgpd/bgp_attr.h +++ b/bgpd/bgp_attr.h @@ -87,7 +87,7 @@ struct bgp_attr_encap_subtlv { uint8_t value[0]; /* will be extended */ }; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC /* * old rfp<->rfapi representation */ @@ -233,7 +233,7 @@ struct attr { uint16_t encap_tunneltype; /* grr */ struct bgp_attr_encap_subtlv *encap_subtlvs; /* rfc5512 */ -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC struct bgp_attr_encap_subtlv *vnc_subtlvs; /* VNC-specific */ #endif /* EVPN */ @@ -250,6 +250,9 @@ struct attr { /* rmap set table */ uint32_t rmap_table_id; + + /* Link bandwidth value, if any. */ + uint32_t link_bw; }; /* rmap_change_flags definition */ @@ -260,6 +263,7 @@ struct attr { #define BATTR_RMAP_IPV6_GLOBAL_NHOP_CHANGED (1 << 4) #define BATTR_RMAP_IPV6_LL_NHOP_CHANGED (1 << 5) #define BATTR_RMAP_IPV6_PREFER_GLOBAL_CHANGED (1 << 6) +#define BATTR_RMAP_LINK_BW_SET (1 << 7) /* Router Reflector related structure. */ struct cluster_list { @@ -331,7 +335,6 @@ extern unsigned long int attr_unknown_count(void); /* Cluster list prototypes. */ extern bool cluster_loop_check(struct cluster_list *, struct in_addr); -extern void cluster_unintern(struct cluster_list *); /* Below exported for unit-test purposes only */ struct bgp_attr_parser_args { @@ -409,5 +412,4 @@ static inline uint32_t mac_mobility_seqnum(struct attr *attr) { return (attr) ? attr->mm_seqnum : 0; } - #endif /* _QUAGGA_BGP_ATTR_H */ diff --git a/bgpd/bgp_attr_evpn.c b/bgpd/bgp_attr_evpn.c index 7239ddef93..65072088ae 100644 --- a/bgpd/bgp_attr_evpn.c +++ b/bgpd/bgp_attr_evpn.c @@ -45,7 +45,7 @@ void bgp_add_routermac_ecom(struct attr *attr, struct ethaddr *routermac) memcpy(&routermac_ecom.val[2], routermac->octet, ETH_ALEN); if (!attr->ecommunity) attr->ecommunity = ecommunity_new(); - ecommunity_add_val(attr->ecommunity, &routermac_ecom); + ecommunity_add_val(attr->ecommunity, &routermac_ecom, false, false); ecommunity_str(attr->ecommunity); } diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c index a6fc4ebd03..8902a8789a 100644 --- a/bgpd/bgp_bmp.c +++ b/bgpd/bgp_bmp.c @@ -1772,7 +1772,12 @@ static void bmp_active_setup(struct bmp_active *ba) } } -static struct cmd_node bmp_node = {BMP_NODE, "%s(config-bgp-bmp)# "}; +static struct cmd_node bmp_node = { + .name = "bmp", + .node = BMP_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-bgp-bmp)# " +}; #define BMP_STR "BGP Monitoring Protocol\n" @@ -2266,7 +2271,7 @@ static int bmp_config_write(struct bgp *bgp, struct vty *vty) static int bgp_bmp_init(struct thread_master *tm) { - install_node(&bmp_node, NULL); + install_node(&bmp_node); install_default(BMP_NODE); install_element(BGP_NODE, &bmp_targets_cmd); install_element(BGP_NODE, &no_bmp_targets_cmd); diff --git a/bgpd/bgp_clist.c b/bgpd/bgp_clist.c index cf4d44ea22..28b22997ed 100644 --- a/bgpd/bgp_clist.c +++ b/bgpd/bgp_clist.c @@ -972,7 +972,7 @@ int community_list_set(struct community_list_handler *ch, const char *name, entry = community_entry_new(); entry->direct = direct; entry->style = style; - entry->any = (str ? 0 : 1); + entry->any = (str ? false : true); entry->u.com = com; entry->reg = regex; entry->seq = seqnum; @@ -1090,26 +1090,34 @@ struct lcommunity *lcommunity_list_match_delete(struct lcommunity *lcom, /* Helper to check if every octet do not exceed UINT_MAX */ static bool lcommunity_list_valid(const char *community) { - int octets = 0; - char **splits; - int num; + int octets; + char **splits, **communities; + int num, num_communities; - frrstr_split(community, ":", &splits, &num); + frrstr_split(community, " ", &communities, &num_communities); - for (int i = 0; i < num; i++) { - if (strtoul(splits[i], NULL, 10) > UINT_MAX) - return false; + for (int j = 0; j < num_communities; j++) { + octets = 0; + frrstr_split(communities[j], ":", &splits, &num); + + for (int i = 0; i < num; i++) { + if (strtoul(splits[i], NULL, 10) > UINT_MAX) + return false; - if (strlen(splits[i]) == 0) + if (strlen(splits[i]) == 0) + return false; + + octets++; + XFREE(MTYPE_TMP, splits[i]); + } + XFREE(MTYPE_TMP, splits); + + if (octets < 3) return false; - octets++; - XFREE(MTYPE_TMP, splits[i]); + XFREE(MTYPE_TMP, communities[j]); } - XFREE(MTYPE_TMP, splits); - - if (octets < 3) - return false; + XFREE(MTYPE_TMP, communities); return true; } @@ -1161,7 +1169,7 @@ int lcommunity_list_set(struct community_list_handler *ch, const char *name, entry = community_entry_new(); entry->direct = direct; entry->style = style; - entry->any = (str ? 0 : 1); + entry->any = (str ? false : true); entry->u.lcom = lcom; entry->reg = regex; entry->seq = seqnum; @@ -1282,7 +1290,7 @@ int extcommunity_list_set(struct community_list_handler *ch, const char *name, entry = community_entry_new(); entry->direct = direct; entry->style = style; - entry->any = 0; + entry->any = false; if (ecom) entry->config = ecommunity_ecom2str( ecom, ECOMMUNITY_FORMAT_COMMUNITY_LIST, 0); diff --git a/bgpd/bgp_clist.h b/bgpd/bgp_clist.h index 4cb5d7c593..f7d46525a0 100644 --- a/bgpd/bgp_clist.h +++ b/bgpd/bgp_clist.h @@ -81,7 +81,7 @@ struct community_entry { uint8_t style; /* Any match. */ - uint8_t any; + bool any; /* Sequence number. */ int64_t seq; diff --git a/bgpd/bgp_community.c b/bgpd/bgp_community.c index 195c0f394c..30de84c878 100644 --- a/bgpd/bgp_community.c +++ b/bgpd/bgp_community.c @@ -146,7 +146,7 @@ uint32_t community_val_get(struct community *com, int i) uint32_t val; p = (uint8_t *)com->val; - p += (i * 4); + p += (i * COMMUNITY_SIZE); memcpy(&val, p, sizeof(uint32_t)); @@ -514,11 +514,11 @@ struct community *community_parse(uint32_t *pnt, unsigned short length) struct community *new; /* If length is malformed return NULL. */ - if (length % 4) + if (length % COMMUNITY_SIZE) return NULL; /* Make temporary community for hash look up. */ - tmp.size = length / 4; + tmp.size = length / COMMUNITY_SIZE; tmp.val = pnt; new = community_uniq_sort(&tmp); @@ -533,8 +533,9 @@ struct community *community_dup(struct community *com) new = XCALLOC(MTYPE_COMMUNITY, sizeof(struct community)); new->size = com->size; if (new->size) { - new->val = XMALLOC(MTYPE_COMMUNITY_VAL, com->size * 4); - memcpy(new->val, com->val, com->size * 4); + new->val = XMALLOC(MTYPE_COMMUNITY_VAL, + com->size * COMMUNITY_SIZE); + memcpy(new->val, com->val, com->size * COMMUNITY_SIZE); } else new->val = NULL; return new; @@ -558,7 +559,7 @@ char *community_str(struct community *com, bool make_json) hash package.*/ unsigned int community_hash_make(const struct community *com) { - uint32_t *pnt = (uint32_t *)com->val; + uint32_t *pnt = com->val; return jhash2(pnt, com->size, 0x43ea96c1); } @@ -600,7 +601,8 @@ bool community_cmp(const struct community *com1, const struct community *com2) return false; if (com1->size == com2->size) - if (memcmp(com1->val, com2->val, com1->size * 4) == 0) + if (memcmp(com1->val, com2->val, com1->size * COMMUNITY_SIZE) + == 0) return true; return false; } @@ -610,13 +612,14 @@ struct community *community_merge(struct community *com1, struct community *com2) { if (com1->val) - com1->val = XREALLOC(MTYPE_COMMUNITY_VAL, com1->val, - (com1->size + com2->size) * 4); + com1->val = + XREALLOC(MTYPE_COMMUNITY_VAL, com1->val, + (com1->size + com2->size) * COMMUNITY_SIZE); else com1->val = XMALLOC(MTYPE_COMMUNITY_VAL, - (com1->size + com2->size) * 4); + (com1->size + com2->size) * COMMUNITY_SIZE); - memcpy(com1->val + com1->size, com2->val, com2->size * 4); + memcpy(com1->val + com1->size, com2->val, com2->size * COMMUNITY_SIZE); com1->size += com2->size; return com1; diff --git a/bgpd/bgp_community.h b/bgpd/bgp_community.h index 31a061370d..b99f38ab64 100644 --- a/bgpd/bgp_community.h +++ b/bgpd/bgp_community.h @@ -61,8 +61,10 @@ struct community { #define COMMUNITY_LOCAL_AS 0xFFFFFF03 #define COMMUNITY_NO_PEER 0xFFFFFF04 +#define COMMUNITY_SIZE 4 + /* Macros of community attribute. */ -#define com_length(X) ((X)->size * 4) +#define com_length(X) ((X)->size * COMMUNITY_SIZE) #define com_lastval(X) ((X)->val + (X)->size - 1) #define com_nthval(X,n) ((X)->val + (n)) diff --git a/bgpd/bgp_debug.c b/bgpd/bgp_debug.c index 5104e23515..f503c1b18e 100644 --- a/bgpd/bgp_debug.c +++ b/bgpd/bgp_debug.c @@ -2282,11 +2282,17 @@ static int bgp_config_write_debug(struct vty *vty) return write; } -static struct cmd_node debug_node = {DEBUG_NODE, "", 1}; +static int bgp_config_write_debug(struct vty *vty); +static struct cmd_node debug_node = { + .name = "debug", + .node = DEBUG_NODE, + .prompt = "", + .config_write = bgp_config_write_debug, +}; void bgp_debug_init(void) { - install_node(&debug_node, bgp_config_write_debug); + install_node(&debug_node); install_element(ENABLE_NODE, &show_debugging_bgp_cmd); diff --git a/bgpd/bgp_dump.c b/bgpd/bgp_dump.c index cd1722ccca..a79c5e0da0 100644 --- a/bgpd/bgp_dump.c +++ b/bgpd/bgp_dump.c @@ -777,8 +777,14 @@ DEFUN (no_dump_bgp_all, return bgp_dump_unset(bgp_dump_struct); } +static int config_write_bgp_dump(struct vty *vty); /* BGP node structure. */ -static struct cmd_node bgp_dump_node = {DUMP_NODE, "", 1}; +static struct cmd_node bgp_dump_node = { + .name = "dump", + .node = DUMP_NODE, + .prompt = "", + .config_write = config_write_bgp_dump, +}; #if 0 char * @@ -857,7 +863,7 @@ void bgp_dump_init(void) stream_new((BGP_MAX_PACKET_SIZE << 1) + BGP_DUMP_MSG_HEADER + BGP_DUMP_HEADER_SIZE); - install_node(&bgp_dump_node, config_write_bgp_dump); + install_node(&bgp_dump_node); install_element(CONFIG_NODE, &dump_bgp_all_cmd); install_element(CONFIG_NODE, &no_dump_bgp_all_cmd); diff --git a/bgpd/bgp_ecommunity.c b/bgpd/bgp_ecommunity.c index fc66494742..fe09aab956 100644 --- a/bgpd/bgp_ecommunity.c +++ b/bgpd/bgp_ecommunity.c @@ -74,10 +74,16 @@ static void ecommunity_hash_free(struct ecommunity *ecom) Attribute structure. When the value is already exists in the structure, we don't add the value. Newly added value is sorted by numerical order. When the value is added to the structure return 1 - else return 0. */ -bool ecommunity_add_val(struct ecommunity *ecom, struct ecommunity_val *eval) + else return 0. + The additional parameters 'unique' and 'overwrite' ensure a particular + extended community (based on type and sub-type) is present only + once and whether the new value should replace what is existing or + not. +*/ +bool ecommunity_add_val(struct ecommunity *ecom, struct ecommunity_val *eval, + bool unique, bool overwrite) { - int c; + int c, ins_idx; /* When this is fist value, just add it. */ if (ecom->val == NULL) { @@ -88,25 +94,45 @@ bool ecommunity_add_val(struct ecommunity *ecom, struct ecommunity_val *eval) } /* If the value already exists in the structure return 0. */ + /* check also if the extended community itself exists. */ c = 0; + ins_idx = -1; for (uint8_t *p = ecom->val; c < ecom->size; p += ECOMMUNITY_SIZE, c++) { + if (unique) { + if (p[0] == eval->val[0] && + p[1] == eval->val[1]) { + if (overwrite) { + memcpy(p, eval->val, ECOMMUNITY_SIZE); + return 1; + } + return 0; + } + } int ret = memcmp(p, eval->val, ECOMMUNITY_SIZE); if (ret == 0) - return false; - else if (ret > 0) - break; + return 0; + if (ret > 0) { + if (!unique) + break; + if (ins_idx == -1) + ins_idx = c; + } } + if (ins_idx == -1) + ins_idx = c; + /* Add the value to the structure with numerical sorting. */ ecom->size++; ecom->val = XREALLOC(MTYPE_ECOMMUNITY_VAL, ecom->val, ecom->size * ECOMMUNITY_SIZE); - memmove(ecom->val + ((c + 1) * ECOMMUNITY_SIZE), - ecom->val + (c * ECOMMUNITY_SIZE), - (ecom->size - 1 - c) * ECOMMUNITY_SIZE); - memcpy(ecom->val + (c * ECOMMUNITY_SIZE), eval->val, ECOMMUNITY_SIZE); + memmove(ecom->val + ((ins_idx + 1) * ECOMMUNITY_SIZE), + ecom->val + (ins_idx * ECOMMUNITY_SIZE), + (ecom->size - 1 - ins_idx) * ECOMMUNITY_SIZE); + memcpy(ecom->val + (ins_idx * ECOMMUNITY_SIZE), + eval->val, ECOMMUNITY_SIZE); return true; } @@ -128,7 +154,7 @@ struct ecommunity *ecommunity_uniq_sort(struct ecommunity *ecom) for (i = 0; i < ecom->size; i++) { eval = (struct ecommunity_val *)(ecom->val + (i * ECOMMUNITY_SIZE)); - ecommunity_add_val(new, eval); + ecommunity_add_val(new, eval, false, false); } return new; } @@ -543,7 +569,7 @@ struct ecommunity *ecommunity_str2com(const char *str, int type, if (ecom == NULL) ecom = ecommunity_new(); eval.val[1] = type; - ecommunity_add_val(ecom, &eval); + ecommunity_add_val(ecom, &eval, false, false); break; case ecommunity_token_unknown: default: @@ -611,6 +637,33 @@ static int ecommunity_rt_soo_str(char *buf, size_t bufsz, const uint8_t *pnt, return len; } +static int ecommunity_lb_str(char *buf, size_t bufsz, const uint8_t *pnt) +{ + int len = 0; + as_t as; + uint32_t bw; + char bps_buf[20] = {0}; + +#define ONE_GBPS_BYTES (1000 * 1000 * 1000 / 8) +#define ONE_MBPS_BYTES (1000 * 1000 / 8) +#define ONE_KBPS_BYTES (1000 / 8) + + as = (*pnt++ << 8); + as |= (*pnt++); + (void)ptr_get_be32(pnt, &bw); + if (bw >= ONE_GBPS_BYTES) + sprintf(bps_buf, "%.3f Gbps", (float)(bw/ONE_GBPS_BYTES)); + else if (bw >= ONE_MBPS_BYTES) + sprintf(bps_buf, "%.3f Mbps", (float)(bw/ONE_MBPS_BYTES)); + else if (bw >= ONE_KBPS_BYTES) + sprintf(bps_buf, "%.3f Kbps", (float)(bw/ONE_KBPS_BYTES)); + else + sprintf(bps_buf, "%u bps", bw * 8); + + len = snprintf(buf, bufsz, "LB:%u:%u (%s)", as, bw, bps_buf); + return len; +} + /* Convert extended community attribute to string. Due to historical reason of industry standard implementation, there @@ -686,6 +739,11 @@ char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter) INET_ADDRSTRLEN); snprintf(encbuf, sizeof(encbuf), "NH:%s:%d", ipv4str, pnt[5]); + } else if (sub_type == + ECOMMUNITY_LINK_BANDWIDTH && + type == ECOMMUNITY_ENCODE_AS) { + ecommunity_lb_str(encbuf, + sizeof(encbuf), pnt); } else unk_ecom = 1; } else { @@ -765,7 +823,7 @@ char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter) if (sub_type == ECOMMUNITY_REDIRECT_VRF) { char buf[16] = {}; ecommunity_rt_soo_str( - buf, sizeof(buf), (uint8_t *)pnt, + buf, sizeof(buf), pnt, type & ~ECOMMUNITY_ENCODE_TRANS_EXP, ECOMMUNITY_ROUTE_TARGET, ECOMMUNITY_FORMAT_DISPLAY); @@ -821,6 +879,12 @@ char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter) (uint8_t)mac.octet[5]); } else unk_ecom = 1; + } else if (type == ECOMMUNITY_ENCODE_AS_NON_TRANS) { + sub_type = *pnt++; + if (sub_type == ECOMMUNITY_LINK_BANDWIDTH) + ecommunity_lb_str(encbuf, sizeof(encbuf), pnt); + else + unk_ecom = 1; } else { sub_type = *pnt++; unk_ecom = 1; @@ -889,8 +953,8 @@ extern struct ecommunity_val *ecommunity_lookup(const struct ecommunity *ecom, /* remove ext. community matching type and subtype * return 1 on success ( removed ), 0 otherwise (not present) */ -extern bool ecommunity_strip(struct ecommunity *ecom, uint8_t type, - uint8_t subtype) +bool ecommunity_strip(struct ecommunity *ecom, uint8_t type, + uint8_t subtype) { uint8_t *p, *q, *new; int c, found = 0; @@ -1165,3 +1229,83 @@ void bgp_remove_ecomm_from_aggregate_hash(struct bgp_aggregate *aggregate, } } } + +/* + * return the BGP link bandwidth extended community, if present; + * the actual bandwidth is returned via param + */ +const uint8_t *ecommunity_linkbw_present(struct ecommunity *ecom, uint32_t *bw) +{ + const uint8_t *eval; + int i; + + if (bw) + *bw = 0; + + if (!ecom || !ecom->size) + return NULL; + + for (i = 0; i < ecom->size; i++) { + const uint8_t *pnt; + uint8_t type, sub_type; + uint32_t bwval; + + eval = pnt = (ecom->val + (i * ECOMMUNITY_SIZE)); + type = *pnt++; + sub_type = *pnt++; + + if ((type == ECOMMUNITY_ENCODE_AS || + type == ECOMMUNITY_ENCODE_AS_NON_TRANS) && + sub_type == ECOMMUNITY_LINK_BANDWIDTH) { + pnt += 2; /* bandwidth is encoded as AS:val */ + pnt = ptr_get_be32(pnt, &bwval); + (void)pnt; /* consume value */ + if (bw) + *bw = bwval; + return eval; + } + } + + return NULL; +} + + +struct ecommunity *ecommunity_replace_linkbw(as_t as, + struct ecommunity *ecom, + uint64_t cum_bw) +{ + struct ecommunity *new; + struct ecommunity_val lb_eval; + const uint8_t *eval; + uint8_t type; + uint32_t cur_bw; + + /* Nothing to replace if link-bandwidth doesn't exist or + * is non-transitive - just return existing extcommunity. + */ + new = ecom; + if (!ecom || !ecom->size) + return new; + + eval = ecommunity_linkbw_present(ecom, &cur_bw); + if (!eval) + return new; + + type = *eval; + if (type & ECOMMUNITY_FLAG_NON_TRANSITIVE) + return new; + + /* Transitive link-bandwidth exists, replace with the passed + * (cumulative) bandwidth value. We need to create a new + * extcommunity for this - refer to AS-Path replace function + * for reference. + */ + if (cum_bw > 0xFFFFFFFF) + cum_bw = 0xFFFFFFFF; + encode_lb_extcomm(as > BGP_AS_MAX ? BGP_AS_TRANS : as, cum_bw, + false, &lb_eval); + new = ecommunity_dup(ecom); + ecommunity_add_val(new, &lb_eval, true, true); + + return new; +} diff --git a/bgpd/bgp_ecommunity.h b/bgpd/bgp_ecommunity.h index df0da091d0..7deae8e746 100644 --- a/bgpd/bgp_ecommunity.h +++ b/bgpd/bgp_ecommunity.h @@ -24,21 +24,36 @@ #include "bgpd/bgp_route.h" #include "bgpd/bgpd.h" +/* Refer to rfc7153 for the IANA registry definitions. These are + * updated by other standards like rfc7674. + */ /* High-order octet of the Extended Communities type field. */ #define ECOMMUNITY_ENCODE_AS 0x00 #define ECOMMUNITY_ENCODE_IP 0x01 #define ECOMMUNITY_ENCODE_AS4 0x02 #define ECOMMUNITY_ENCODE_OPAQUE 0x03 #define ECOMMUNITY_ENCODE_EVPN 0x06 -#define ECOMMUNITY_ENCODE_TRANS_EXP 0x80 /* Flow Spec */ #define ECOMMUNITY_ENCODE_REDIRECT_IP_NH 0x08 /* Flow Spec */ +/* Generic Transitive Experimental */ +#define ECOMMUNITY_ENCODE_TRANS_EXP 0x80 + /* RFC7674 */ #define ECOMMUNITY_EXTENDED_COMMUNITY_PART_2 0x81 #define ECOMMUNITY_EXTENDED_COMMUNITY_PART_3 0x82 +/* Non-transitive extended community types. */ +#define ECOMMUNITY_ENCODE_AS_NON_TRANS 0x40 +#define ECOMMUNITY_ENCODE_IP_NON_TRANS 0x41 +#define ECOMMUNITY_ENCODE_AS4_NON_TRANS 0x42 +#define ECOMMUNITY_ENCODE_OPAQUE_NON_TRANS 0x43 + /* Low-order octet of the Extended Communities type field. */ +/* Note: This really depends on the high-order octet. This means that + * multiple definitions for the same value are possible. + */ #define ECOMMUNITY_ROUTE_TARGET 0x02 #define ECOMMUNITY_SITE_ORIGIN 0x03 +#define ECOMMUNITY_LINK_BANDWIDTH 0x04 #define ECOMMUNITY_TRAFFIC_RATE 0x06 /* Flow Spec */ #define ECOMMUNITY_TRAFFIC_ACTION 0x07 #define ECOMMUNITY_REDIRECT_VRF 0x08 @@ -150,6 +165,26 @@ static inline void encode_route_target_as4(as_t as, uint16_t val, eval->val[7] = val & 0xff; } +/* + * Encode BGP Link Bandwidth extended community + * bandwidth (bw) is in bytes-per-sec + */ +static inline void encode_lb_extcomm(as_t as, uint32_t bw, bool non_trans, + struct ecommunity_val *eval) +{ + memset(eval, 0, sizeof(*eval)); + eval->val[0] = ECOMMUNITY_ENCODE_AS; + if (non_trans) + eval->val[0] |= ECOMMUNITY_FLAG_NON_TRANSITIVE; + eval->val[1] = ECOMMUNITY_LINK_BANDWIDTH; + eval->val[2] = (as >> 8) & 0xff; + eval->val[3] = as & 0xff; + eval->val[4] = (bw >> 24) & 0xff; + eval->val[5] = (bw >> 16) & 0xff; + eval->val[6] = (bw >> 8) & 0xff; + eval->val[7] = bw & 0xff; +} + extern void ecommunity_init(void); extern void ecommunity_finish(void); extern void ecommunity_free(struct ecommunity **); @@ -171,11 +206,11 @@ extern char *ecommunity_str(struct ecommunity *); extern struct ecommunity_val *ecommunity_lookup(const struct ecommunity *, uint8_t, uint8_t); extern bool ecommunity_add_val(struct ecommunity *ecom, - struct ecommunity_val *eval); + struct ecommunity_val *eval, + bool unique, bool overwrite); /* for vpn */ extern struct ecommunity *ecommunity_new(void); -extern bool ecommunity_add_val(struct ecommunity *, struct ecommunity_val *); extern bool ecommunity_strip(struct ecommunity *ecom, uint8_t type, uint8_t subtype); extern struct ecommunity *ecommunity_new(void); @@ -201,7 +236,10 @@ extern void bgp_remove_ecomm_from_aggregate_hash( struct bgp_aggregate *aggregate, struct ecommunity *ecommunity); extern void bgp_aggr_ecommunity_remove(void *arg); - +extern const uint8_t *ecommunity_linkbw_present(struct ecommunity *ecom, + uint32_t *bw); +extern struct ecommunity *ecommunity_replace_linkbw(as_t as, + struct ecommunity *ecom, uint64_t cum_bw); static inline void ecommunity_strip_rts(struct ecommunity *ecom) { diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c index a77a1e912e..fadccc5026 100644 --- a/bgpd/bgp_evpn.c +++ b/bgpd/bgp_evpn.c @@ -548,7 +548,7 @@ static void form_auto_rt(struct bgp *bgp, vni_t vni, struct list *rtl) encode_route_target_as((bgp->as & 0xFFFF), vni, &eval); ecomadd = ecommunity_new(); - ecommunity_add_val(ecomadd, &eval); + ecommunity_add_val(ecomadd, &eval, false, false); for (ALL_LIST_ELEMENTS_RO(rtl, node, ecom)) if (ecommunity_cmp(ecomadd, ecom)) ecom_found = true; @@ -738,12 +738,12 @@ static void build_evpn_type5_route_extcomm(struct bgp *bgp_vrf, struct attr *attr) { struct ecommunity ecom_encap; - struct ecommunity ecom_rmac; struct ecommunity_val eval; struct ecommunity_val eval_rmac; bgp_encap_types tnl_type; struct listnode *node, *nnode; struct ecommunity *ecom; + struct ecommunity *old_ecom; struct list *vrf_export_rtl = NULL; /* Encap */ @@ -754,7 +754,14 @@ static void build_evpn_type5_route_extcomm(struct bgp *bgp_vrf, ecom_encap.val = (uint8_t *)eval.val; /* Add Encap */ - attr->ecommunity = ecommunity_dup(&ecom_encap); + if (attr->ecommunity) { + old_ecom = attr->ecommunity; + ecom = ecommunity_merge(ecommunity_dup(old_ecom), &ecom_encap); + if (!old_ecom->refcnt) + ecommunity_free(&old_ecom); + } else + ecom = ecommunity_dup(&ecom_encap); + attr->ecommunity = ecom; /* Add the export RTs for L3VNI/VRF */ vrf_export_rtl = bgp_vrf->vrf_export_rtl; @@ -764,12 +771,8 @@ static void build_evpn_type5_route_extcomm(struct bgp *bgp_vrf, /* add the router mac extended community */ if (!is_zero_mac(&attr->rmac)) { - memset(&ecom_rmac, 0, sizeof(ecom_rmac)); encode_rmac_extcomm(&eval_rmac, &attr->rmac); - ecom_rmac.size = 1; - ecom_rmac.val = (uint8_t *)eval_rmac.val; - attr->ecommunity = - ecommunity_merge(attr->ecommunity, &ecom_rmac); + ecommunity_add_val(attr->ecommunity, &eval_rmac, true, true); } attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES); @@ -791,7 +794,6 @@ static void build_evpn_route_extcomm(struct bgpevpn *vpn, struct attr *attr, struct ecommunity ecom_encap; struct ecommunity ecom_sticky; struct ecommunity ecom_default_gw; - struct ecommunity ecom_rmac; struct ecommunity ecom_na; struct ecommunity_val eval; struct ecommunity_val eval_sticky; @@ -845,12 +847,8 @@ static void build_evpn_route_extcomm(struct bgpevpn *vpn, struct attr *attr, /* Add RMAC, if told to. */ if (add_l3_ecomm) { - memset(&ecom_rmac, 0, sizeof(ecom_rmac)); encode_rmac_extcomm(&eval_rmac, &attr->rmac); - ecom_rmac.size = 1; - ecom_rmac.val = (uint8_t *)eval_rmac.val; - attr->ecommunity = - ecommunity_merge(attr->ecommunity, &ecom_rmac); + ecommunity_add_val(attr->ecommunity, &eval_rmac, true, true); } /* Add default gateway, if needed. */ @@ -903,8 +901,8 @@ static void add_mac_mobility_to_attr(uint32_t seq_num, struct attr *attr) if (type == ECOMMUNITY_ENCODE_EVPN && sub_type == ECOMMUNITY_EVPN_SUBTYPE_MACMOBILITY) { - ecom_val_ptr = (uint8_t *)(attr->ecommunity->val - + (i * 8)); + ecom_val_ptr = + (attr->ecommunity->val + (i * 8)); break; } } @@ -1132,6 +1130,7 @@ static int evpn_es_route_select_install(struct bgp *bgp, old_select->attr->nexthop); } UNSET_FLAG(old_select->flags, BGP_PATH_MULTIPATH_CHG); + UNSET_FLAG(old_select->flags, BGP_PATH_LINK_BW_CHG); bgp_zebra_clear_route_change_flags(rn); return ret; } @@ -1152,6 +1151,7 @@ static int evpn_es_route_select_install(struct bgp *bgp, bgp_path_info_set_flag(rn, new_select, BGP_PATH_SELECTED); bgp_path_info_unset_flag(rn, new_select, BGP_PATH_ATTR_CHANGED); UNSET_FLAG(new_select->flags, BGP_PATH_MULTIPATH_CHG); + UNSET_FLAG(new_select->flags, BGP_PATH_LINK_BW_CHG); } if (new_select && new_select->type == ZEBRA_ROUTE_BGP @@ -1211,6 +1211,7 @@ static int evpn_route_select_install(struct bgp *bgp, struct bgpevpn *vpn, bgp, vpn, (const struct prefix_evpn *)bgp_node_get_prefix(rn), old_select); UNSET_FLAG(old_select->flags, BGP_PATH_MULTIPATH_CHG); + UNSET_FLAG(old_select->flags, BGP_PATH_LINK_BW_CHG); bgp_zebra_clear_route_change_flags(rn); return ret; } @@ -1230,6 +1231,7 @@ static int evpn_route_select_install(struct bgp *bgp, struct bgpevpn *vpn, bgp_path_info_set_flag(rn, new_select, BGP_PATH_SELECTED); bgp_path_info_unset_flag(rn, new_select, BGP_PATH_ATTR_CHANGED); UNSET_FLAG(new_select->flags, BGP_PATH_MULTIPATH_CHG); + UNSET_FLAG(new_select->flags, BGP_PATH_LINK_BW_CHG); } if (new_select && new_select->type == ZEBRA_ROUTE_BGP @@ -4633,7 +4635,7 @@ void evpn_rt_delete_auto(struct bgp *bgp, vni_t vni, struct list *rtl) encode_route_target_as((bgp->as & 0xFFFF), vni, &eval); ecom_auto = ecommunity_new(); - ecommunity_add_val(ecom_auto, &eval); + ecommunity_add_val(ecom_auto, &eval, false, false); node_to_del = NULL; for (ALL_LIST_ELEMENTS(rtl, node, nnode, ecom)) { diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c index fddb00b6e2..d20012f5fd 100644 --- a/bgpd/bgp_evpn_vty.c +++ b/bgpd/bgp_evpn_vty.c @@ -2515,8 +2515,7 @@ static void evpn_show_route_rd_macip(struct vty *vty, struct bgp *bgp, return; } - bgp_evpn_route2str((struct prefix_evpn *)&p, prefix_str, - sizeof(prefix_str)); + bgp_evpn_route2str(&p, prefix_str, sizeof(prefix_str)); /* Prefix and num paths displayed once per prefix. */ route_vty_out_detail_header(vty, bgp, rn, prd, afi, safi, json); @@ -2574,7 +2573,7 @@ static void evpn_show_route_rd(struct vty *vty, struct bgp *bgp, safi = SAFI_EVPN; prefix_cnt = path_cnt = 0; - prefix_rd2str((struct prefix_rd *)prd, rd_str, sizeof(rd_str)); + prefix_rd2str(prd, rd_str, sizeof(rd_str)); rd_rn = bgp_node_lookup(bgp->rib[afi][safi], (struct prefix *)prd); if (!rd_rn) @@ -4099,7 +4098,7 @@ DEFUN(show_bgp_l2vpn_evpn_summary, */ DEFUN(show_bgp_l2vpn_evpn_route, show_bgp_l2vpn_evpn_route_cmd, - "show bgp l2vpn evpn route [detail] [type <macip|multicast|es|prefix>] [json]", + "show bgp l2vpn evpn route [detail] [type <macip|2|multicast|3|es|4|prefix|5>] [json]", SHOW_STR BGP_STR L2VPN_HELP_STR @@ -4108,9 +4107,13 @@ DEFUN(show_bgp_l2vpn_evpn_route, "Display Detailed Information\n" "Specify Route type\n" "MAC-IP (Type-2) route\n" + "MAC-IP (Type-2) route\n" + "Multicast (Type-3) route\n" "Multicast (Type-3) route\n" - "Ethernet Segment (type-4) route \n" - "Prefix (type-5 )route\n" + "Ethernet Segment (Type-4) route\n" + "Ethernet Segment (Type-4) route\n" + "Prefix (Type-5) route\n" + "Prefix (Type-5) route\n" JSON_STR) { struct bgp *bgp; @@ -4132,13 +4135,17 @@ DEFUN(show_bgp_l2vpn_evpn_route, /* get the type */ if (argv_find(argv, argc, "type", &type_idx)) { /* Specific type is requested */ - if (strncmp(argv[type_idx + 1]->arg, "ma", 2) == 0) + if ((strncmp(argv[type_idx + 1]->arg, "ma", 2) == 0) + || (strmatch(argv[type_idx + 1]->arg, "2"))) type = BGP_EVPN_MAC_IP_ROUTE; - else if (strncmp(argv[type_idx + 1]->arg, "mu", 2) == 0) + else if ((strncmp(argv[type_idx + 1]->arg, "mu", 2) == 0) + || (strmatch(argv[type_idx + 1]->arg, "3"))) type = BGP_EVPN_IMET_ROUTE; - else if (strncmp(argv[type_idx + 1]->arg, "e", 1) == 0) + else if ((strncmp(argv[type_idx + 1]->arg, "e", 1) == 0) + || (strmatch(argv[type_idx + 1]->arg, "4"))) type = BGP_EVPN_ES_ROUTE; - else if (strncmp(argv[type_idx + 1]->arg, "p", 1) == 0) + else if ((strncmp(argv[type_idx + 1]->arg, "p", 1) == 0) + || (strmatch(argv[type_idx + 1]->arg, "5"))) type = BGP_EVPN_IP_PREFIX_ROUTE; else return CMD_WARNING; diff --git a/bgpd/bgp_filter.c b/bgpd/bgp_filter.c index 7de8dc2c80..3e26263df1 100644 --- a/bgpd/bgp_filter.c +++ b/bgpd/bgp_filter.c @@ -667,12 +667,18 @@ static int config_write_as_list(struct vty *vty) return write; } -static struct cmd_node as_list_node = {AS_LIST_NODE, "", 1}; +static int config_write_as_list(struct vty *vty); +static struct cmd_node as_list_node = { + .name = "as list", + .node = AS_LIST_NODE, + .prompt = "", + .config_write = config_write_as_list, +}; /* Register functions. */ void bgp_filter_init(void) { - install_node(&as_list_node, config_write_as_list); + install_node(&as_list_node); install_element(CONFIG_NODE, &bgp_as_path_cmd); install_element(CONFIG_NODE, &no_bgp_as_path_cmd); diff --git a/bgpd/bgp_lcommunity.c b/bgpd/bgp_lcommunity.c index c21056f305..f47ae91663 100644 --- a/bgpd/bgp_lcommunity.c +++ b/bgpd/bgp_lcommunity.c @@ -2,7 +2,7 @@ * * Copyright (C) 2016 Keyur Patel <keyur@arrcus.com> * - * This file is part of FreeRangeRouting (FRR). + * This file is part of FRRouting (FRR). * * FRR is free software; you can redistribute it and/or modify it under the * terms of the GNU General Public License as published by the Free Software @@ -46,6 +46,8 @@ void lcommunity_free(struct lcommunity **lcom) { XFREE(MTYPE_LCOMMUNITY_VAL, (*lcom)->val); XFREE(MTYPE_LCOMMUNITY_STR, (*lcom)->str); + if ((*lcom)->json) + json_object_free((*lcom)->json); XFREE(MTYPE_LCOMMUNITY, *lcom); } diff --git a/bgpd/bgp_lcommunity.h b/bgpd/bgp_lcommunity.h index e10ab0eef1..c96df8482d 100644 --- a/bgpd/bgp_lcommunity.h +++ b/bgpd/bgp_lcommunity.h @@ -2,7 +2,7 @@ * * Copyright (C) 2016 Keyur Patel <keyur@arrcus.com> * - * This file is part of FreeRangeRouting (FRR). + * This file is part of FRRouting (FRR). * * FRR is free software; you can redistribute it and/or modify it under the * terms of the GNU General Public License as published by the Free Software diff --git a/bgpd/bgp_main.c b/bgpd/bgp_main.c index c4ece2f082..04be8d83eb 100644 --- a/bgpd/bgp_main.c +++ b/bgpd/bgp_main.c @@ -228,7 +228,7 @@ static __attribute__((__noreturn__)) void bgp_exit(int status) community_list_terminate(bgp_clist); bgp_vrf_terminate(); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC vnc_zebra_destroy(); #endif bgp_zebra_destroy(); @@ -275,7 +275,7 @@ static int bgp_vrf_enable(struct vrf *vrf) XFREE(MTYPE_BGP, bgp->name_pretty); bgp->name_pretty = XSTRDUP(MTYPE_BGP, "VRF default"); bgp->inst_type = BGP_INSTANCE_TYPE_DEFAULT; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (!bgp->rfapi) { bgp->rfapi = bgp_rfapi_new(bgp); assert(bgp->rfapi); @@ -361,6 +361,7 @@ static void bgp_vrf_terminate(void) static const struct frr_yang_module_info *const bgpd_yang_modules[] = { &frr_interface_info, &frr_route_map_info, + &frr_vrf_info, }; FRR_DAEMON_INFO(bgpd, BGP, .vty_port = BGP_VTY_PORT, @@ -425,17 +426,21 @@ int main(int argc, char **argv) else bgp_port = tmp_port; break; - case 'e': - multipath_num = atoi(optarg); - if (multipath_num > MULTIPATH_NUM - || multipath_num <= 0) { + case 'e': { + unsigned long int parsed_multipath = + strtoul(optarg, NULL, 10); + if (parsed_multipath == 0 + || parsed_multipath > MULTIPATH_NUM + || parsed_multipath > UINT_MAX) { flog_err( EC_BGP_MULTIPATH, - "Multipath Number specified must be less than %d and greater than 0", + "Multipath Number specified must be less than %u and greater than 0", MULTIPATH_NUM); return 1; } + multipath_num = parsed_multipath; break; + } case 'l': bgp_address = optarg; /* listenon implies -n */ diff --git a/bgpd/bgp_mpath.c b/bgpd/bgp_mpath.c index cbef41bafd..f66f56cb49 100644 --- a/bgpd/bgp_mpath.c +++ b/bgpd/bgp_mpath.c @@ -390,7 +390,7 @@ uint32_t bgp_path_info_mpath_count(struct bgp_path_info *path) * Sets the count of multipaths into bestpath's mpath element */ static void bgp_path_info_mpath_count_set(struct bgp_path_info *path, - uint32_t count) + uint16_t count) { struct bgp_path_info_mpath *mpath; if (!count && !path->mpath) @@ -402,6 +402,39 @@ static void bgp_path_info_mpath_count_set(struct bgp_path_info *path, } /* + * bgp_path_info_mpath_lb_update + * + * Update cumulative info related to link-bandwidth + */ +static void bgp_path_info_mpath_lb_update(struct bgp_path_info *path, bool set, + bool all_paths_lb, uint64_t cum_bw) +{ + struct bgp_path_info_mpath *mpath; + + if ((mpath = path->mpath) == NULL) { + if (!set) + return; + mpath = bgp_path_info_mpath_get(path); + if (!mpath) + return; + } + if (set) { + if (cum_bw) + SET_FLAG(mpath->mp_flags, BGP_MP_LB_PRESENT); + else + UNSET_FLAG(mpath->mp_flags, BGP_MP_LB_PRESENT); + if (all_paths_lb) + SET_FLAG(mpath->mp_flags, BGP_MP_LB_ALL); + else + UNSET_FLAG(mpath->mp_flags, BGP_MP_LB_ALL); + mpath->cum_bw = cum_bw; + } else { + mpath->mp_flags = 0; + mpath->cum_bw = 0; + } +} + +/* * bgp_path_info_mpath_attr * * Given bestpath bgp_path_info, return aggregated attribute set used @@ -415,6 +448,42 @@ struct attr *bgp_path_info_mpath_attr(struct bgp_path_info *path) } /* + * bgp_path_info_chkwtd + * + * Return if we should attempt to do weighted ECMP or not + * The path passed in is the bestpath. + */ +bool bgp_path_info_mpath_chkwtd(struct bgp *bgp, struct bgp_path_info *path) +{ + /* Check if told to ignore weights or not multipath */ + if (bgp->lb_handling == BGP_LINK_BW_IGNORE_BW || !path->mpath) + return false; + + /* All paths in multipath should have associated weight (bandwidth) + * unless told explicitly otherwise. + */ + if (bgp->lb_handling != BGP_LINK_BW_SKIP_MISSING && + bgp->lb_handling != BGP_LINK_BW_DEFWT_4_MISSING) + return (path->mpath->mp_flags & BGP_MP_LB_ALL); + + /* At least one path should have bandwidth. */ + return (path->mpath->mp_flags & BGP_MP_LB_PRESENT); +} + +/* + * bgp_path_info_mpath_attr + * + * Given bestpath bgp_path_info, return cumulative bandwidth + * computed for all multipaths with bandwidth info + */ +uint64_t bgp_path_info_mpath_cumbw(struct bgp_path_info *path) +{ + if (!path->mpath) + return 0; + return path->mpath->cum_bw; +} + +/* * bgp_path_info_mpath_attr_set * * Sets the aggregated attribute into bestpath's mpath element @@ -444,10 +513,13 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, struct bgp_maxpaths_cfg *mpath_cfg) { uint16_t maxpaths, mpath_count, old_mpath_count; + uint32_t bwval; + uint64_t cum_bw, old_cum_bw; struct listnode *mp_node, *mp_next_node; struct bgp_path_info *cur_mpath, *new_mpath, *next_mpath, *prev_mpath; int mpath_changed, debug; char nh_buf[2][INET6_ADDRSTRLEN]; + bool all_paths_lb; char path_buf[PATH_ADDPATH_STR_BUFFER]; mpath_changed = 0; @@ -455,6 +527,7 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, mpath_count = 0; cur_mpath = NULL; old_mpath_count = 0; + old_cum_bw = cum_bw = 0; prev_mpath = new_best; mp_node = listhead(mp_list); debug = bgp_debug_bestpath(rn); @@ -471,15 +544,18 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, if (old_best) { cur_mpath = bgp_path_info_mpath_first(old_best); old_mpath_count = bgp_path_info_mpath_count(old_best); + old_cum_bw = bgp_path_info_mpath_cumbw(old_best); bgp_path_info_mpath_count_set(old_best, 0); + bgp_path_info_mpath_lb_update(old_best, false, false, 0); bgp_path_info_mpath_dequeue(old_best); } if (debug) zlog_debug( - "%pRN: starting mpath update, newbest %s num candidates %d old-mpath-count %d", + "%pRN: starting mpath update, newbest %s num candidates %d old-mpath-count %d old-cum-bw u%" PRIu64, rn, new_best ? new_best->peer->host : "NONE", - mp_list ? listcount(mp_list) : 0, old_mpath_count); + mp_list ? listcount(mp_list) : 0, + old_mpath_count, old_cum_bw); /* * We perform an ordered walk through both lists in parallel. @@ -492,6 +568,7 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, * Note that new_best might be somewhere in the mp_list, so we need * to skip over it */ + all_paths_lb = true; /* We'll reset if any path doesn't have LB. */ while (mp_node || cur_mpath) { struct bgp_path_info *tmp_info; @@ -530,6 +607,11 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, cur_mpath); prev_mpath = cur_mpath; mpath_count++; + if (ecommunity_linkbw_present( + cur_mpath->attr->ecommunity, &bwval)) + cum_bw += bwval; + else + all_paths_lb = false; if (debug) { bgp_path_info_path_with_addpath_rx_str( cur_mpath, path_buf); @@ -617,6 +699,11 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, prev_mpath = new_mpath; mpath_changed = 1; mpath_count++; + if (ecommunity_linkbw_present( + new_mpath->attr->ecommunity, &bwval)) + cum_bw += bwval; + else + all_paths_lb = false; if (debug) { bgp_path_info_path_with_addpath_rx_str( new_mpath, path_buf); @@ -636,16 +723,30 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, } if (new_best) { + bgp_path_info_mpath_count_set(new_best, mpath_count - 1); + if (mpath_count <= 1 || + !ecommunity_linkbw_present( + new_best->attr->ecommunity, &bwval)) + all_paths_lb = false; + else + cum_bw += bwval; + bgp_path_info_mpath_lb_update(new_best, true, + all_paths_lb, cum_bw); + if (debug) zlog_debug( - "%pRN: New mpath count (incl newbest) %d mpath-change %s", + "%pRN: New mpath count (incl newbest) %d mpath-change %s" + " all_paths_lb %d cum_bw u%" PRIu64, rn, mpath_count, - mpath_changed ? "YES" : "NO"); + mpath_changed ? "YES" : "NO", + all_paths_lb, cum_bw); - bgp_path_info_mpath_count_set(new_best, mpath_count - 1); if (mpath_changed || (bgp_path_info_mpath_count(new_best) != old_mpath_count)) SET_FLAG(new_best->flags, BGP_PATH_MULTIPATH_CHG); + if ((mpath_count - 1) != old_mpath_count || + old_cum_bw != cum_bw) + SET_FLAG(new_best->flags, BGP_PATH_LINK_BW_CHG); } } @@ -670,6 +771,7 @@ void bgp_mp_dmed_deselect(struct bgp_path_info *dmed_best) bgp_path_info_mpath_count_set(dmed_best, 0); UNSET_FLAG(dmed_best->flags, BGP_PATH_MULTIPATH_CHG); + UNSET_FLAG(dmed_best->flags, BGP_PATH_LINK_BW_CHG); assert(bgp_path_info_mpath_first(dmed_best) == NULL); } diff --git a/bgpd/bgp_mpath.h b/bgpd/bgp_mpath.h index d15f3c9035..34f94b256b 100644 --- a/bgpd/bgp_mpath.h +++ b/bgpd/bgp_mpath.h @@ -36,10 +36,18 @@ struct bgp_path_info_mpath { struct bgp_path_info *mp_info; /* When attached to best path, the number of selected multipaths */ - uint32_t mp_count; + uint16_t mp_count; + + /* Flags - relevant as noted. */ + uint16_t mp_flags; +#define BGP_MP_LB_PRESENT 0x1 /* Link-bandwidth present for >= 1 path */ +#define BGP_MP_LB_ALL 0x2 /* Link-bandwidth present for all multipaths */ /* Aggregated attribute for advertising multipath route */ struct attr *mp_attr; + + /* Cumulative bandiwdth of all multipaths - attached to best path. */ + uint64_t cum_bw; }; /* Functions to support maximum-paths configuration */ @@ -78,5 +86,8 @@ bgp_path_info_mpath_next(struct bgp_path_info *path); /* Accessors for multipath information */ extern uint32_t bgp_path_info_mpath_count(struct bgp_path_info *path); extern struct attr *bgp_path_info_mpath_attr(struct bgp_path_info *path); +extern bool bgp_path_info_mpath_chkwtd(struct bgp *bgp, + struct bgp_path_info *path); +extern uint64_t bgp_path_info_mpath_cumbw(struct bgp_path_info *path); #endif /* _QUAGGA_BGP_MPATH_H */ diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c index fbcfe39c3a..46dcd2864e 100644 --- a/bgpd/bgp_mplsvpn.c +++ b/bgpd/bgp_mplsvpn.c @@ -48,7 +48,7 @@ #include "bgpd/bgp_nht.h" #include "bgpd/bgp_evpn.h" -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/rfapi_backend.h" #endif @@ -214,7 +214,7 @@ int bgp_nlri_parse_vpn(struct peer *peer, struct attr *attr, decode_rd_ip(pnt + 5, &rd_ip); break; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC case RD_TYPE_VNC_ETH: break; #endif @@ -1282,7 +1282,7 @@ void vpn_leak_to_vrf_withdraw(struct bgp *bgp_vpn, /* from */ zlog_debug("%s: start (path_vpn=%p)", __func__, path_vpn); if (!path_vpn->net) { -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC /* BGP_ROUTE_RFP routes do not have path_vpn->net set (yet) */ if (path_vpn->type == ZEBRA_ROUTE_BGP && path_vpn->sub_type == BGP_ROUTE_RFP) { diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c index c77238aa33..bfce61c2af 100644 --- a/bgpd/bgp_nexthop.c +++ b/bgpd/bgp_nexthop.c @@ -436,8 +436,7 @@ void bgp_connected_delete(struct bgp *bgp, struct connected *ifc) bgp_address_del(bgp, ifc, addr); - rn = bgp_node_lookup(bgp->connected_table[AFI_IP6], - (struct prefix *)&p); + rn = bgp_node_lookup(bgp->connected_table[AFI_IP6], &p); } if (!rn) diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c index 10e96497fb..7137c1a784 100644 --- a/bgpd/bgp_packet.c +++ b/bgpd/bgp_packet.c @@ -974,14 +974,21 @@ static int bgp_collision_detect(struct peer *new, struct in_addr remote_id) return -1; } else if ((peer->status == OpenConfirm) || (peer->status == OpenSent)) { - /* 1. The BGP Identifier of the local system is compared - to - the BGP Identifier of the remote system (as specified - in - the OPEN message). */ - + /* 1. The BGP Identifier of the local system is + * compared to the BGP Identifier of the remote + * system (as specified in the OPEN message). + * + * If the BGP Identifiers of the peers + * involved in the connection collision + * are identical, then the connection + * initiated by the BGP speaker with the + * larger AS number is preserved. + */ if (ntohl(peer->local_id.s_addr) - < ntohl(remote_id.s_addr)) + < ntohl(remote_id.s_addr) + || (ntohl(peer->local_id.s_addr) + == ntohl(remote_id.s_addr) + && peer->local_as < peer->as)) if (!CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER)) { /* 2. If the value of the local BGP @@ -1005,10 +1012,13 @@ static int bgp_collision_detect(struct peer *new, struct in_addr remote_id) return -1; } else { - if (ntohl(peer->local_id.s_addr) == - ntohl(remote_id.s_addr)) - flog_err(EC_BGP_ROUTER_ID_SAME, "Peer's router-id %s is the same as ours", - inet_ntoa(remote_id)); + if (ntohl(peer->local_id.s_addr) + == ntohl(remote_id.s_addr) + && peer->local_as == peer->as) + flog_err( + EC_BGP_ROUTER_ID_SAME, + "Peer's router-id %s is the same as ours", + inet_ntoa(remote_id)); /* 3. Otherwise, the local system closes newly created @@ -1197,10 +1207,17 @@ static int bgp_open_receive(struct peer *peer, bgp_size_t size) } } - /* remote router-id check. */ + /* rfc6286: + * If the BGP Identifier field of the OPEN message + * is zero, or if it is the same as the BGP Identifier + * of the local BGP speaker and the message is from an + * internal peer, then the Error Subcode is set to + * "Bad BGP Identifier". + */ if (remote_id.s_addr == INADDR_ANY || IPV4_CLASS_DE(ntohl(remote_id.s_addr)) - || ntohl(peer->local_id.s_addr) == ntohl(remote_id.s_addr)) { + || (peer->sort == BGP_PEER_IBGP + && ntohl(peer->local_id.s_addr) == ntohl(remote_id.s_addr))) { if (bgp_debug_neighbor_events(peer)) zlog_debug("%s bad OPEN, wrong router identifier %s", peer->host, inet_ntoa(remote_id)); @@ -1345,8 +1362,9 @@ static int bgp_open_receive(struct peer *peer, bgp_size_t size) peer->afc[AFI_IP6][SAFI_FLOWSPEC]; } - /* When collision is detected and this peer is closed. Retrun - immidiately. */ + /* When collision is detected and this peer is closed. + * Return immediately. + */ ret = bgp_collision_detect(peer, remote_id); if (ret < 0) return BGP_Stop; diff --git a/bgpd/bgp_pbr.c b/bgpd/bgp_pbr.c index fd3fad63f5..ab134b15c4 100644 --- a/bgpd/bgp_pbr.c +++ b/bgpd/bgp_pbr.c @@ -738,7 +738,8 @@ int bgp_pbr_build_and_validate_entry(const struct prefix *p, ecom_copy.val[0] &= ~ECOMMUNITY_ENCODE_TRANS_EXP; ecom_copy.val[1] = ECOMMUNITY_ROUTE_TARGET; - ecommunity_add_val(eckey, &ecom_copy); + ecommunity_add_val(eckey, &ecom_copy, + false, false); api_action->action = ACTION_REDIRECT; api_action->u.redirect_vrf = diff --git a/bgpd/bgp_rd.c b/bgpd/bgp_rd.c index 6a229602b2..66d64066c4 100644 --- a/bgpd/bgp_rd.c +++ b/bgpd/bgp_rd.c @@ -33,7 +33,7 @@ #include "bgpd/bgp_rd.h" #include "bgpd/bgp_attr.h" -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/rfapi_backend.h" #endif @@ -42,7 +42,7 @@ uint16_t decode_rd_type(const uint8_t *pnt) uint16_t v; v = ((uint16_t)*pnt++ << 8); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC /* * VNC L2 stores LHI in lower byte, so omit it */ @@ -85,7 +85,7 @@ void decode_rd_ip(const uint8_t *pnt, struct rd_ip *rd_ip) rd_ip->val |= (uint16_t)*pnt; } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC /* type == RD_TYPE_VNC_ETH */ void decode_rd_vnc_eth(const uint8_t *pnt, struct rd_vnc_eth *rd_vnc_eth) { @@ -186,7 +186,7 @@ char *prefix_rd2str(const struct prefix_rd *prd, char *buf, size_t size) rd_ip.val); return buf; } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC else if (type == RD_TYPE_VNC_ETH) { snprintf(buf, size, "LHI:%d, %02x:%02x:%02x:%02x:%02x:%02x", *(pnt + 1), /* LHI */ diff --git a/bgpd/bgp_rd.h b/bgpd/bgp_rd.h index 56b9023a2e..b5ad9d624d 100644 --- a/bgpd/bgp_rd.h +++ b/bgpd/bgp_rd.h @@ -28,7 +28,7 @@ #define RD_TYPE_IP 1 #define RD_TYPE_AS4 2 -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #define RD_TYPE_VNC_ETH 0xff00 /* VNC L2VPN */ #endif @@ -46,7 +46,7 @@ struct rd_ip { uint16_t val; }; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC struct rd_vnc_eth { uint16_t type; uint8_t local_nve_id; @@ -60,7 +60,7 @@ extern void encode_rd_type(uint16_t, uint8_t *); extern void decode_rd_as(const uint8_t *pnt, struct rd_as *rd_as); extern void decode_rd_as4(const uint8_t *pnt, struct rd_as *rd_as); extern void decode_rd_ip(const uint8_t *pnt, struct rd_ip *rd_ip); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC extern void decode_rd_vnc_eth(const uint8_t *pnt, struct rd_vnc_eth *rd_vnc_eth); #endif diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index 9a3ab0d8ee..09eb6fcf82 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -71,7 +71,7 @@ #include "bgpd/bgp_mac.h" #include "bgpd/bgp_network.h" -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/rfapi_backend.h" #include "bgpd/rfapi/vnc_import_bgp.h" #include "bgpd/rfapi/vnc_export_bgp.h" @@ -1560,6 +1560,8 @@ bool subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, afi_t afi; safi_t safi; int samepeer_safe = 0; /* for synthetic mplsvpns routes */ + bool nh_reset = false; + uint64_t cum_bw; if (DISABLE_BGP_ANNOUNCE) return false; @@ -1577,7 +1579,7 @@ bool subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, piattr = bgp_path_info_mpath_count(pi) ? bgp_path_info_mpath_attr(pi) : pi->attr; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (((afi == AFI_IP) || (afi == AFI_IP6)) && (safi == SAFI_MPLS_VPN) && ((pi->type == ZEBRA_ROUTE_BGP_DIRECT) || (pi->type == ZEBRA_ROUTE_BGP_DIRECT_EXT))) { @@ -1923,8 +1925,7 @@ bool subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, * benefit from consistent behavior across different BGP * implementations. */ - if (peer->bgp->ebgp_requires_policy - == DEFAULT_EBGP_POLICY_ENABLED) + if (CHECK_FLAG(bgp->flags, BGP_FLAG_EBGP_REQUIRES_POLICY)) if (!bgp_outbound_policy_exists(peer, filter)) return false; @@ -1934,7 +1935,7 @@ bool subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, * and RFC 5065 by eliminating AS_SET and AS_CONFED_SET types, * and obsoletes RFC 6472. */ - if (peer->bgp->reject_as_sets == BGP_REJECT_AS_SETS_ENABLED) + if (peer->bgp->reject_as_sets) if (aspath_check_as_sets(attr->aspath)) return false; @@ -1983,12 +1984,14 @@ bool subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, PEER_FLAG_FORCE_NEXTHOP_SELF)) { if (!reflect || CHECK_FLAG(peer->af_flags[afi][safi], - PEER_FLAG_FORCE_NEXTHOP_SELF)) + PEER_FLAG_FORCE_NEXTHOP_SELF)) { subgroup_announce_reset_nhop( (peer_cap_enhe(peer, afi, safi) ? AF_INET6 : p->family), attr); + nh_reset = true; + } } else if (peer->sort == BGP_PEER_EBGP) { /* Can also reset the nexthop if announcing to EBGP, but * only if @@ -1999,22 +2002,26 @@ bool subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, if ((p->family == AF_INET) && (!bgp_subgrp_multiaccess_check_v4( piattr->nexthop, - subgrp, from))) + subgrp, from))) { subgroup_announce_reset_nhop( (peer_cap_enhe(peer, afi, safi) ? AF_INET6 : p->family), attr); + nh_reset = true; + } if ((p->family == AF_INET6) && (!bgp_subgrp_multiaccess_check_v6( piattr->mp_nexthop_global, - subgrp, from))) + subgrp, from))) { subgroup_announce_reset_nhop( (peer_cap_enhe(peer, afi, safi) ? AF_INET6 : p->family), attr); + nh_reset = true; + } @@ -2032,6 +2039,7 @@ bool subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, "%s: BGP_PATH_ANNC_NH_SELF, family=%s", __func__, family2str(family)); subgroup_announce_reset_nhop(family, attr); + nh_reset = true; } } @@ -2044,10 +2052,25 @@ bool subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, * the same interface. */ if (p->family == AF_INET6 || peer_cap_enhe(peer, afi, safi)) { - if (IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_global)) + if (IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_global)) { subgroup_announce_reset_nhop(AF_INET6, attr); + nh_reset = true; + } } + /* + * When the next hop is set to ourselves, if all multipaths have + * link-bandwidth announce the cumulative bandwidth as that makes + * the most sense. However, don't modify if the link-bandwidth has + * been explicitly set by user policy. + */ + if (nh_reset && + bgp_path_info_mpath_chkwtd(bgp, pi) && + (cum_bw = bgp_path_info_mpath_cumbw(pi)) != 0 && + !CHECK_FLAG(attr->rmap_change_flags, BATTR_RMAP_LINK_BW_SET)) + attr->ecommunity = ecommunity_replace_linkbw( + bgp->as, attr->ecommunity, cum_bw); + return true; } @@ -2394,7 +2417,8 @@ bool bgp_zebra_has_route_changed(struct bgp_node *rn, * when the best path has an attribute change anyway. */ if (CHECK_FLAG(selected->flags, BGP_PATH_IGP_CHANGED) - || CHECK_FLAG(selected->flags, BGP_PATH_MULTIPATH_CHG)) + || CHECK_FLAG(selected->flags, BGP_PATH_MULTIPATH_CHG) + || CHECK_FLAG(selected->flags, BGP_PATH_LINK_BW_CHG)) return true; /* @@ -2419,6 +2443,54 @@ struct bgp_process_queue { unsigned int queued; }; +static void bgp_process_evpn_route_injection(struct bgp *bgp, afi_t afi, + safi_t safi, struct bgp_node *rn, + struct bgp_path_info *new_select, + struct bgp_path_info *old_select) +{ + const struct prefix *p = bgp_node_get_prefix(rn); + + if ((afi != AFI_IP && afi != AFI_IP6) || (safi != SAFI_UNICAST)) + return; + + if (advertise_type5_routes(bgp, afi) && new_select + && is_route_injectable_into_evpn(new_select)) { + + /* apply the route-map */ + if (bgp->adv_cmd_rmap[afi][safi].map) { + route_map_result_t ret; + struct bgp_path_info rmap_path; + struct bgp_path_info_extra rmap_path_extra; + struct attr dummy_attr; + + dummy_attr = *new_select->attr; + + /* Fill temp path_info */ + prep_for_rmap_apply(&rmap_path, &rmap_path_extra, rn, + new_select, new_select->peer, + &dummy_attr); + + RESET_FLAG(dummy_attr.rmap_change_flags); + + ret = route_map_apply(bgp->adv_cmd_rmap[afi][safi].map, + p, RMAP_BGP, &rmap_path); + + if (ret == RMAP_DENYMATCH) { + bgp_attr_flush(&dummy_attr); + bgp_evpn_withdraw_type5_route(bgp, p, afi, + safi); + } else + bgp_evpn_advertise_type5_route( + bgp, p, &dummy_attr, afi, safi); + } else { + bgp_evpn_advertise_type5_route(bgp, p, new_select->attr, + afi, safi); + } + } else if (advertise_type5_routes(bgp, afi) && old_select + && is_route_injectable_into_evpn(old_select)) + bgp_evpn_withdraw_type5_route(bgp, p, afi, safi); +} + /* * old_select = The old best path * new_select = the new best path @@ -2547,7 +2619,7 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_node *rn, && !CHECK_FLAG(old_select->flags, BGP_PATH_ATTR_CHANGED) && !bgp_addpath_is_addpath_used(&bgp->tx_addpath, afi, safi)) { if (bgp_zebra_has_route_changed(rn, old_select)) { -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC vnc_import_bgp_add_route(bgp, p, old_select); vnc_import_bgp_exterior_add_route(bgp, p, old_select); #endif @@ -2563,12 +2635,11 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_node *rn, bgp, afi, safi); } } - UNSET_FLAG(old_select->flags, BGP_PATH_MULTIPATH_CHG); - bgp_zebra_clear_route_change_flags(rn); /* If there is a change of interest to peers, reannounce the * route. */ if (CHECK_FLAG(old_select->flags, BGP_PATH_ATTR_CHANGED) + || CHECK_FLAG(old_select->flags, BGP_PATH_LINK_BW_CHG) || CHECK_FLAG(rn->flags, BGP_NODE_LABEL_CHANGED)) { group_announce_route(bgp, afi, safi, rn, new_select); @@ -2583,6 +2654,15 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_node *rn, UNSET_FLAG(rn->flags, BGP_NODE_LABEL_CHANGED); } + /* advertise/withdraw type-5 routes */ + if (CHECK_FLAG(old_select->flags, BGP_PATH_LINK_BW_CHG) + || CHECK_FLAG(old_select->flags, BGP_PATH_MULTIPATH_CHG)) + bgp_process_evpn_route_injection( + bgp, afi, safi, rn, old_select, old_select); + + UNSET_FLAG(old_select->flags, BGP_PATH_MULTIPATH_CHG); + UNSET_FLAG(old_select->flags, BGP_PATH_LINK_BW_CHG); + bgp_zebra_clear_route_change_flags(rn); UNSET_FLAG(rn->flags, BGP_NODE_PROCESS_SCHEDULED); return; } @@ -2613,9 +2693,10 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_node *rn, bgp_path_info_set_flag(rn, new_select, BGP_PATH_SELECTED); bgp_path_info_unset_flag(rn, new_select, BGP_PATH_ATTR_CHANGED); UNSET_FLAG(new_select->flags, BGP_PATH_MULTIPATH_CHG); + UNSET_FLAG(new_select->flags, BGP_PATH_LINK_BW_CHG); } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) { if (old_select != new_select) { if (old_select) { @@ -2668,53 +2749,8 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_node *rn, } } - /* advertise/withdraw type-5 routes */ - if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) { - const struct prefix *p = bgp_node_get_prefix(rn); - - if (advertise_type5_routes(bgp, afi) && - new_select && - is_route_injectable_into_evpn(new_select)) { - - /* apply the route-map */ - if (bgp->adv_cmd_rmap[afi][safi].map) { - route_map_result_t ret; - struct bgp_path_info rmap_path; - struct bgp_path_info_extra rmap_path_extra; - struct attr dummy_attr; - - dummy_attr = *new_select->attr; - - /* Fill temp path_info */ - prep_for_rmap_apply( - &rmap_path, &rmap_path_extra, - rn, new_select, new_select->peer, - &dummy_attr); - - RESET_FLAG(dummy_attr.rmap_change_flags); - - ret = route_map_apply( - bgp->adv_cmd_rmap[afi][safi].map, - p, RMAP_BGP, &rmap_path); - if (ret == RMAP_DENYMATCH) { - bgp_attr_flush(&dummy_attr); - bgp_evpn_withdraw_type5_route( - bgp, p, afi, safi); - } else - bgp_evpn_advertise_type5_route( - bgp, p, &dummy_attr, - afi, safi); - } else { - bgp_evpn_advertise_type5_route(bgp, p, - new_select->attr, - afi, safi); - - } - } else if (advertise_type5_routes(bgp, afi) && - old_select && - is_route_injectable_into_evpn(old_select)) - bgp_evpn_withdraw_type5_route(bgp, p, afi, safi); - } + bgp_process_evpn_route_injection(bgp, afi, safi, rn, new_select, + old_select); /* Clear any route change flags. */ bgp_zebra_clear_route_change_flags(rn); @@ -3090,7 +3126,7 @@ static void bgp_rib_withdraw(struct bgp_node *rn, struct bgp_path_info *pi, return; } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (safi == SAFI_MPLS_VPN) { struct bgp_node *prn = NULL; struct bgp_table *table = NULL; @@ -3202,7 +3238,7 @@ static bool bgp_update_martian_nexthop(struct bgp *bgp, afi_t afi, safi_t safi, uint8_t type, uint8_t stype, struct attr *attr, struct bgp_node *rn) { - bool ret = 0; + bool ret = false; /* Only validated for unicast and multicast currently. */ /* Also valid for EVPN where the nexthop is an IP address. */ @@ -3221,6 +3257,10 @@ static bool bgp_update_martian_nexthop(struct bgp *bgp, afi_t afi, safi_t safi, /* Note: For IPv6 nexthops, we only validate the global (1st) nexthop; * there is code in bgp_attr.c to ignore the link-local (2nd) nexthop if * it is not an IPv6 link-local address. + * + * If we receive an UPDATE with nexthop length set to 32 bytes + * we shouldn't discard an UPDATE if it's set to (::). + * The link-local (2st) is validated along the code path later. */ if (attr->mp_nexthop_len) { switch (attr->mp_nexthop_len) { @@ -3234,7 +3274,6 @@ static bool bgp_update_martian_nexthop(struct bgp *bgp, afi_t afi, safi_t safi, break; case BGP_ATTR_NHLEN_IPV6_GLOBAL: - case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL: case BGP_ATTR_NHLEN_VPNV6_GLOBAL: ret = (IN6_IS_ADDR_UNSPECIFIED(&attr->mp_nexthop_global) || IN6_IS_ADDR_LOOPBACK(&attr->mp_nexthop_global) @@ -3243,6 +3282,13 @@ static bool bgp_update_martian_nexthop(struct bgp *bgp, afi_t afi, safi_t safi, || bgp_nexthop_self(bgp, afi, type, stype, attr, rn)); break; + case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL: + ret = (IN6_IS_ADDR_LOOPBACK(&attr->mp_nexthop_global) + || IN6_IS_ADDR_MULTICAST( + &attr->mp_nexthop_global) + || bgp_nexthop_self(bgp, afi, type, stype, attr, + rn)); + break; default: ret = true; @@ -3277,7 +3323,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, uint8_t pi_type = 0; uint8_t pi_sub_type = 0; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC int vnc_implicit_withdraw = 0; #endif int same_attr = 0; @@ -3376,7 +3422,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, * benefit from consistent behavior across different BGP * implementations. */ - if (peer->bgp->ebgp_requires_policy == DEFAULT_EBGP_POLICY_ENABLED) + if (CHECK_FLAG(bgp->flags, BGP_FLAG_EBGP_REQUIRES_POLICY)) if (!bgp_inbound_policy_exists(peer, &peer->filter[afi][safi])) { reason = "inbound policy missing"; @@ -3389,7 +3435,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, * and RFC 5065 by eliminating AS_SET and AS_CONFED_SET types, * and obsoletes RFC 6472. */ - if (peer->bgp->reject_as_sets == BGP_REJECT_AS_SETS_ENABLED) + if (peer->bgp->reject_as_sets) if (aspath_check_as_sets(attr->aspath)) { reason = "as-path contains AS_SET or AS_CONFED_SET type;"; @@ -3577,7 +3623,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, if (!CHECK_FLAG(pi->flags, BGP_PATH_HISTORY)) bgp_damp_withdraw(pi, rn, afi, safi, 1); } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (safi == SAFI_MPLS_VPN) { struct bgp_node *prn = NULL; struct bgp_table *table = NULL; @@ -3667,7 +3713,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, } } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) { if (vnc_implicit_withdraw) { @@ -3750,7 +3796,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, } else bgp_path_info_set_flag(rn, pi, BGP_PATH_VALID); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (safi == SAFI_MPLS_VPN) { struct bgp_node *prn = NULL; struct bgp_table *table = NULL; @@ -3801,7 +3847,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, vpn_leak_to_vrf_update(bgp, pi); } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (SAFI_MPLS_VPN == safi) { mpls_label_t label_decoded = decode_label(label); @@ -3910,7 +3956,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, /* route_node_get lock */ bgp_unlock_node(rn); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (safi == SAFI_MPLS_VPN) { struct bgp_node *prn = NULL; struct bgp_table *table = NULL; @@ -3950,7 +3996,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, vpn_leak_to_vrf_update(bgp, new); } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (SAFI_MPLS_VPN == safi) { mpls_label_t label_decoded = decode_label(label); @@ -4007,7 +4053,7 @@ filtered: bgp_unlock_node(rn); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC /* * Filtered update is treated as an implicit withdrawal (see * bgp_rib_remove() @@ -4032,7 +4078,7 @@ int bgp_withdraw(struct peer *peer, const struct prefix *p, uint32_t addpath_id, struct bgp_node *rn; struct bgp_path_info *pi; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if ((SAFI_MPLS_VPN == safi) || (SAFI_ENCAP == safi)) { rfapiProcessWithdraw(peer, NULL, p, prd, NULL, afi, safi, type, 0); @@ -4530,7 +4576,7 @@ void bgp_clear_route_all(struct peer *peer) FOREACH_AFI_SAFI (afi, safi) bgp_clear_route(peer, afi, safi); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC rfapiProcessPeerDown(peer); #endif } @@ -4918,7 +4964,7 @@ void bgp_static_update(struct bgp *bgp, const struct prefix *p, struct attr attr; struct attr *attr_new; route_map_result_t ret; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC int vnc_implicit_withdraw = 0; #endif @@ -5000,7 +5046,7 @@ void bgp_static_update(struct bgp *bgp, const struct prefix *p, bgp_path_info_restore(rn, pi); else bgp_aggregate_decrement(bgp, p, pi, afi, safi); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) { if (CHECK_FLAG(pi->flags, BGP_PATH_SELECTED)) { @@ -5019,7 +5065,7 @@ void bgp_static_update(struct bgp *bgp, const struct prefix *p, bgp_attr_unintern(&pi->attr); pi->attr = attr_new; pi->uptime = bgp_clock(); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) { if (vnc_implicit_withdraw) { @@ -5188,7 +5234,7 @@ static void bgp_static_withdraw_safi(struct bgp *bgp, const struct prefix *p, /* Withdraw static BGP route from routing table. */ if (pi) { -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC rfapiProcessWithdraw( pi->peer, NULL, p, prd, pi->attr, afi, safi, pi->type, 1); /* Kill, since it is an administrative change */ @@ -5215,7 +5261,7 @@ static void bgp_static_update_safi(struct bgp *bgp, const struct prefix *p, struct attr *attr_new; struct attr attr = {0}; struct bgp_path_info *pi; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC mpls_label_t label = 0; #endif uint32_t num_labels = 0; @@ -5317,7 +5363,7 @@ static void bgp_static_update_safi(struct bgp *bgp, const struct prefix *p, bgp_attr_unintern(&pi->attr); pi->attr = attr_new; pi->uptime = bgp_clock(); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (pi->extra) label = decode_label(&pi->extra->label[0]); #endif @@ -5330,7 +5376,7 @@ static void bgp_static_update_safi(struct bgp *bgp, const struct prefix *p, && bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT) { vpn_leak_to_vrf_update(bgp, pi); } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC rfapiProcessUpdate(pi->peer, NULL, p, &bgp_static->prd, pi->attr, afi, safi, pi->type, pi->sub_type, &label); @@ -5351,7 +5397,7 @@ static void bgp_static_update_safi(struct bgp *bgp, const struct prefix *p, new->extra->label[0] = bgp_static->label; new->extra->num_labels = num_labels; } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC label = decode_label(&bgp_static->label); #endif @@ -5370,7 +5416,7 @@ static void bgp_static_update_safi(struct bgp *bgp, const struct prefix *p, && bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT) { vpn_leak_to_vrf_update(bgp, new); } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC rfapiProcessUpdate(new->peer, NULL, p, &bgp_static->prd, new->attr, afi, safi, new->type, new->sub_type, &label); #endif @@ -6854,7 +6900,7 @@ static int bgp_aggregate_set(struct vty *vty, const char *prefix_str, afi_t afi, * subsumed by the previously aggregated route) without AS_SET * or AS_CONFED_SET in the updates. */ - if (bgp->reject_as_sets == BGP_REJECT_AS_SETS_ENABLED) { + if (bgp->reject_as_sets) { if (as_set == AGGREGATE_AS_SET) { as_set_new = AGGREGATE_AS_UNSET; zlog_warn( @@ -7503,11 +7549,12 @@ static void route_vty_short_status_out(struct vty *vty, vty_out(vty, " "); } -static char *bgp_nexthop_hostname(struct peer *peer, struct attr *attr) +static char *bgp_nexthop_hostname(struct peer *peer, + struct bgp_nexthop_cache *bnc) { if (peer->hostname - && CHECK_FLAG(peer->bgp->flags, BGP_FLAG_SHOW_HOSTNAME) - && !(attr->flag & ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID))) + && CHECK_FLAG(peer->bgp->flags, BGP_FLAG_SHOW_HOSTNAME) && bnc + && CHECK_FLAG(bnc->flags, BGP_NEXTHOP_CONNECTED)) return peer->hostname; return NULL; } @@ -7529,7 +7576,8 @@ void route_vty_out(struct vty *vty, const struct prefix *p, bool nexthop_othervrf = false; vrf_id_t nexthop_vrfid = VRF_DEFAULT; const char *nexthop_vrfname = VRF_DEFAULT_NAME; - char *nexthop_hostname = bgp_nexthop_hostname(path->peer, attr); + char *nexthop_hostname = + bgp_nexthop_hostname(path->peer, path->nexthop); if (json_paths) json_path = json_object_new_object(); @@ -7819,32 +7867,19 @@ void route_vty_out(struct vty *vty, const struct prefix *p, /* MED/Metric */ if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC)) - if (json_paths) { - - /* - * Adding "metric" field to match with corresponding - * CLI. "med" will be deprecated in future. - */ - json_object_int_add(json_path, "med", attr->med); + if (json_paths) json_object_int_add(json_path, "metric", attr->med); - } else + else vty_out(vty, "%10u", attr->med); else if (!json_paths) vty_out(vty, " "); /* Local Pref */ if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)) - if (json_paths) { - - /* - * Adding "locPrf" field to match with corresponding - * CLI. "localPref" will be deprecated in future. - */ - json_object_int_add(json_path, "localpref", - attr->local_pref); + if (json_paths) json_object_int_add(json_path, "locPrf", - attr->local_pref); - } else + attr->local_pref); + else vty_out(vty, "%7u", attr->local_pref); else if (!json_paths) vty_out(vty, " "); @@ -7863,17 +7898,10 @@ void route_vty_out(struct vty *vty, const struct prefix *p, /* Print aspath */ if (attr->aspath) { - if (json_paths) { - - /* - * Adding "path" field to match with corresponding - * CLI. "aspath" will be deprecated in future. - */ - json_object_string_add(json_path, "aspath", - attr->aspath->str); + if (json_paths) json_object_string_add(json_path, "path", - attr->aspath->str); - } else + attr->aspath->str); + else aspath_print_vty(vty, "%s", attr->aspath, " "); } @@ -7936,7 +7964,7 @@ void route_vty_out(struct vty *vty, const struct prefix *p, vty_out(vty, "%s\n", attr->ecommunity->str); } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC /* prints an additional line, indented, with VNC info, if * present */ if ((safi == SAFI_MPLS_VPN) || (safi == SAFI_ENCAP)) @@ -8016,34 +8044,16 @@ void route_vty_out_tmp(struct vty *vty, const struct prefix *p, json_object_int_add(json_net, "metric", attr->med); - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)) { - - /* - * Adding "locPrf" field to match with - * corresponding CLI. "localPref" will be - * deprecated in future. - */ - json_object_int_add(json_net, "localPref", - attr->local_pref); + if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)) json_object_int_add(json_net, "locPrf", - attr->local_pref); - } + attr->local_pref); json_object_int_add(json_net, "weight", attr->weight); /* Print aspath */ - if (attr->aspath) { - - /* - * Adding "path" field to match with - * corresponding CLI. "localPref" will be - * deprecated in future. - */ - json_object_string_add(json_net, "asPath", - attr->aspath->str); + if (attr->aspath) json_object_string_add(json_net, "path", - attr->aspath->str); - } + attr->aspath->str); /* Print origin */ json_object_string_add(json_net, "bgpOriginCode", @@ -8308,7 +8318,7 @@ void route_vty_out_overlay(struct vty *vty, const struct prefix *p, mac = ecom_mac2str((char *)routermac->val); if (mac) { if (!json_path) { - vty_out(vty, "/%s", (char *)mac); + vty_out(vty, "/%s", mac); } else { json_object_string_add(json_overlay, "rmac", mac); @@ -8638,7 +8648,8 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, bool nexthop_self = CHECK_FLAG(path->flags, BGP_PATH_ANNC_NH_SELF) ? true : false; int i; - char *nexthop_hostname = bgp_nexthop_hostname(path->peer, attr); + char *nexthop_hostname = + bgp_nexthop_hostname(path->peer, path->nexthop); if (json_paths) { json_path = json_object_new_object(); @@ -9077,21 +9088,15 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, bgp_origin_long_str[attr->origin]); if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC)) { - if (json_paths) { - /* - * Adding "metric" field to match with - * corresponding CLI. "med" will be - * deprecated in future. - */ - json_object_int_add(json_path, "med", attr->med); + if (json_paths) json_object_int_add(json_path, "metric", attr->med); - } else + else vty_out(vty, ", metric %u", attr->med); } if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)) { if (json_paths) - json_object_int_add(json_path, "localpref", + json_object_int_add(json_path, "locPrf", attr->local_pref); else vty_out(vty, ", localpref %u", attr->local_pref); @@ -10291,7 +10296,7 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp, } } else if (safi == SAFI_EVPN) { struct bgp_node *longest_pfx; - bool is_exact_pfxlen_match = FALSE; + bool is_exact_pfxlen_match = false; for (rn = bgp_table_top(rib); rn; rn = bgp_route_next(rn)) { const struct prefix *rn_p = bgp_node_get_prefix(rn); @@ -10303,7 +10308,7 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp, continue; longest_pfx = NULL; - is_exact_pfxlen_match = FALSE; + is_exact_pfxlen_match = false; /* * Search through all the prefixes for a match. The * pfx's are enumerated in ascending order of pfxlens. @@ -10325,7 +10330,7 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp, bgp_evpn_get_type5_prefixlen( rm_p); if (type5_pfxlen == match.prefixlen) { - is_exact_pfxlen_match = TRUE; + is_exact_pfxlen_match = true; bgp_unlock_node(rm); break; } @@ -10552,45 +10557,177 @@ DEFUN (show_ip_bgp_large_community, } static int bgp_table_stats(struct vty *vty, struct bgp *bgp, afi_t afi, - safi_t safi); + safi_t safi, struct json_object *json); + + +DEFUN(show_ip_bgp_statistics_all, show_ip_bgp_statistics_all_cmd, + "show [ip] bgp [<view|vrf> VIEWVRFNAME] statistics-all [json]", + SHOW_STR IP_STR BGP_STR BGP_INSTANCE_HELP_STR + "Display number of prefixes for all afi/safi\n" JSON_STR) +{ + bool uj = use_json(argc, argv); + struct bgp *bgp = NULL; + safi_t safi; + afi_t afi; + int idx = 0; + struct json_object *json_all = NULL; + struct json_object *json_afi_safi = NULL; + + bgp_vty_find_and_parse_afi_safi_bgp(vty, argv, argc, &idx, &afi, &safi, + &bgp, false); + if (!bgp) + return CMD_WARNING; + + if (uj) + json_all = json_object_new_object(); + + FOREACH_AFI_SAFI (afi, safi) { + /* + * So limit output to those afi/safi pairs that + * actually have something interesting in them + */ + if (strmatch(get_afi_safi_str(afi, safi, true), + "Unknown")) { + continue; + } + if (uj) { + json_afi_safi = json_object_new_array(); + json_object_object_add( + json_all, + get_afi_safi_str(afi, safi, true), + json_afi_safi); + } else { + json_afi_safi = NULL; + } + bgp_table_stats(vty, bgp, afi, safi, json_afi_safi); + } + + if (uj) { + vty_out(vty, "%s", + json_object_to_json_string_ext( + json_all, JSON_C_TO_STRING_PRETTY)); + json_object_free(json_all); + } + + return CMD_SUCCESS; +} /* BGP route print out function without JSON */ -DEFUN (show_ip_bgp, - show_ip_bgp_cmd, - "show [ip] bgp [<view|vrf> VIEWVRFNAME] ["BGP_AFI_CMD_STR" ["BGP_SAFI_WITH_LABEL_CMD_STR"]]\ +DEFUN (show_ip_bgp_l2vpn_evpn_statistics, + show_ip_bgp_l2vpn_evpn_statistics_cmd, + "show [ip] bgp [<view|vrf> VIEWVRFNAME] l2vpn evpn statistics [json]", + SHOW_STR + IP_STR + BGP_STR + BGP_INSTANCE_HELP_STR + L2VPN_HELP_STR + EVPN_HELP_STR + "BGP RIB advertisement statistics\n" + JSON_STR) +{ + afi_t afi; + safi_t safi; + struct bgp *bgp = NULL; + int idx = 0, ret; + bool uj = use_json(argc, argv); + struct json_object *json_afi_safi = NULL, *json = NULL; + + bgp_vty_find_and_parse_afi_safi_bgp(vty, argv, argc, &idx, &afi, &safi, + &bgp, false); + if (!idx) + return CMD_WARNING; + + if (uj) + json_afi_safi = json_object_new_array(); + else + json_afi_safi = NULL; + + ret = bgp_table_stats(vty, bgp, afi, safi, json_afi_safi); + + if (uj) { + json = json_object_new_object(); + json_object_object_add(json, get_afi_safi_str(afi, safi, true), + json_afi_safi); + vty_out(vty, "%s", json_object_to_json_string_ext( + json, JSON_C_TO_STRING_PRETTY)); + json_object_free(json); + } + return ret; +} + +/* BGP route print out function without JSON */ +DEFUN(show_ip_bgp_afi_safi_statistics, show_ip_bgp_afi_safi_statistics_cmd, + "show [ip] bgp [<view|vrf> VIEWVRFNAME] [" BGP_AFI_CMD_STR + " [" BGP_SAFI_WITH_LABEL_CMD_STR + "]]\ + statistics [json]", + SHOW_STR IP_STR BGP_STR BGP_INSTANCE_HELP_STR BGP_AFI_HELP_STR + BGP_SAFI_WITH_LABEL_HELP_STR + "BGP RIB advertisement statistics\n" JSON_STR) +{ + afi_t afi; + safi_t safi; + struct bgp *bgp = NULL; + int idx = 0, ret; + bool uj = use_json(argc, argv); + struct json_object *json_afi_safi = NULL, *json = NULL; + + bgp_vty_find_and_parse_afi_safi_bgp(vty, argv, argc, &idx, &afi, &safi, + &bgp, false); + if (!idx) + return CMD_WARNING; + + if (uj) + json_afi_safi = json_object_new_array(); + else + json_afi_safi = NULL; + + ret = bgp_table_stats(vty, bgp, afi, safi, json_afi_safi); + + if (uj) { + json = json_object_new_object(); + json_object_object_add(json, get_afi_safi_str(afi, safi, true), + json_afi_safi); + vty_out(vty, "%s", + json_object_to_json_string_ext( + json, JSON_C_TO_STRING_PRETTY)); + json_object_free(json); + } + return ret; +} + +/* BGP route print out function without JSON */ +DEFUN(show_ip_bgp, show_ip_bgp_cmd, + "show [ip] bgp [<view|vrf> VIEWVRFNAME] [" BGP_AFI_CMD_STR + " [" BGP_SAFI_WITH_LABEL_CMD_STR + "]]\ <dampening <parameters>\ |route-map WORD\ |prefix-list WORD\ |filter-list WORD\ - |statistics\ |community-list <(1-500)|WORD> [exact-match]\ |A.B.C.D/M longer-prefixes\ |X:X::X:X/M longer-prefixes\ - >", - SHOW_STR - IP_STR - BGP_STR - BGP_INSTANCE_HELP_STR - BGP_AFI_HELP_STR - BGP_SAFI_WITH_LABEL_HELP_STR - "Display detailed information about dampening\n" - "Display detail of configured dampening parameters\n" - "Display routes matching the route-map\n" - "A route-map to match on\n" - "Display routes conforming to the prefix-list\n" - "Prefix-list name\n" - "Display routes conforming to the filter-list\n" - "Regular expression access list name\n" - "BGP RIB advertisement statistics\n" - "Display routes matching the community-list\n" - "community-list number\n" - "community-list name\n" - "Exact match of the communities\n" - "IPv4 prefix\n" - "Display route and more specific routes\n" - "IPv6 prefix\n" - "Display route and more specific routes\n") + >", + SHOW_STR IP_STR BGP_STR BGP_INSTANCE_HELP_STR BGP_AFI_HELP_STR + BGP_SAFI_WITH_LABEL_HELP_STR + "Display detailed information about dampening\n" + "Display detail of configured dampening parameters\n" + "Display routes matching the route-map\n" + "A route-map to match on\n" + "Display routes conforming to the prefix-list\n" + "Prefix-list name\n" + "Display routes conforming to the filter-list\n" + "Regular expression access list name\n" + "Display routes matching the community-list\n" + "community-list number\n" + "community-list name\n" + "Exact match of the communities\n" + "IPv4 prefix\n" + "Display route and more specific routes\n" + "IPv6 prefix\n" + "Display route and more specific routes\n") { afi_t afi = AFI_IP6; safi_t safi = SAFI_UNICAST; @@ -10616,9 +10753,6 @@ DEFUN (show_ip_bgp, return bgp_show_filter_list(vty, bgp, argv[idx + 1]->arg, afi, safi, bgp_show_type_filter_list); - if (argv_find(argv, argc, "statistics", &idx)) - return bgp_table_stats(vty, bgp, afi, safi); - if (argv_find(argv, argc, "route-map", &idx)) return bgp_show_route_map(vty, bgp, argv[idx + 1]->arg, afi, safi, bgp_show_type_route_map); @@ -11028,22 +11162,33 @@ enum bgp_stats { BGP_STATS_MAX, }; -static const char *const table_stats_strs[] = { - [BGP_STATS_PREFIXES] = "Total Prefixes", - [BGP_STATS_TOTPLEN] = "Average prefix length", - [BGP_STATS_RIB] = "Total Advertisements", - [BGP_STATS_UNAGGREGATEABLE] = "Unaggregateable prefixes", - [BGP_STATS_MAX_AGGREGATEABLE] = - "Maximum aggregateable prefixes", - [BGP_STATS_AGGREGATES] = "BGP Aggregate advertisements", - [BGP_STATS_SPACE] = "Address space advertised", - [BGP_STATS_ASPATH_COUNT] = "Advertisements with paths", - [BGP_STATS_ASPATH_MAXHOPS] = "Longest AS-Path (hops)", - [BGP_STATS_ASPATH_MAXSIZE] = "Largest AS-Path (bytes)", - [BGP_STATS_ASPATH_TOTHOPS] = "Average AS-Path length (hops)", - [BGP_STATS_ASPATH_TOTSIZE] = "Average AS-Path size (bytes)", - [BGP_STATS_ASN_HIGHEST] = "Highest public ASN", - [BGP_STATS_MAX] = NULL, +#define TABLE_STATS_IDX_VTY 0 +#define TABLE_STATS_IDX_JSON 1 + +static const char *table_stats_strs[][2] = { + [BGP_STATS_PREFIXES] = {"Total Prefixes", "totalPrefixes"}, + [BGP_STATS_TOTPLEN] = {"Average prefix length", "averagePrefixLength"}, + [BGP_STATS_RIB] = {"Total Advertisements", "totalAdvertisements"}, + [BGP_STATS_UNAGGREGATEABLE] = {"Unaggregateable prefixes", + "unaggregateablePrefixes"}, + [BGP_STATS_MAX_AGGREGATEABLE] = {"Maximum aggregateable prefixes", + "maximumAggregateablePrefixes"}, + [BGP_STATS_AGGREGATES] = {"BGP Aggregate advertisements", + "bgpAggregateAdvertisements"}, + [BGP_STATS_SPACE] = {"Address space advertised", + "addressSpaceAdvertised"}, + [BGP_STATS_ASPATH_COUNT] = {"Advertisements with paths", + "advertisementsWithPaths"}, + [BGP_STATS_ASPATH_MAXHOPS] = {"Longest AS-Path (hops)", + "longestAsPath"}, + [BGP_STATS_ASPATH_MAXSIZE] = {"Largest AS-Path (bytes)", + "largestAsPath"}, + [BGP_STATS_ASPATH_TOTHOPS] = {"Average AS-Path length (hops)", + "averageAsPathLengthHops"}, + [BGP_STATS_ASPATH_TOTSIZE] = {"Average AS-Path size (bytes)", + "averageAsPathSizeBytes"}, + [BGP_STATS_ASN_HIGHEST] = {"Highest public ASN", "highestPublicAsn"}, + [BGP_STATS_MAX] = {NULL, NULL} }; struct bgp_table_stats { @@ -11168,7 +11313,9 @@ static int bgp_table_stats_walker(struct thread *t) ts->counts[BGP_STATS_MAXBITLEN] = space; for (rn = top; rn; rn = bgp_route_next(rn)) { - if (ts->table->safi == SAFI_MPLS_VPN) { + if (ts->table->safi == SAFI_MPLS_VPN + || ts->table->safi == SAFI_ENCAP + || ts->table->safi == SAFI_EVPN) { struct bgp_table *table; table = bgp_node_get_bgp_table_info(rn); @@ -11188,18 +11335,36 @@ static int bgp_table_stats_walker(struct thread *t) } static int bgp_table_stats(struct vty *vty, struct bgp *bgp, afi_t afi, - safi_t safi) + safi_t safi, struct json_object *json_array) { struct bgp_table_stats ts; unsigned int i; + int ret = CMD_SUCCESS; + char temp_buf[20]; + struct json_object *json = NULL; + + if (json_array) + json = json_object_new_object(); if (!bgp->rib[afi][safi]) { - vty_out(vty, "%% No RIB exist's for the AFI(%d)/SAFI(%d)\n", - afi, safi); - return CMD_WARNING; + char warning_msg[50]; + + snprintf(warning_msg, sizeof(warning_msg), + "%% No RIB exist's for the AFI(%d)/SAFI(%d)", afi, + safi); + + if (!json) + vty_out(vty, "%s\n", warning_msg); + else + json_object_string_add(json, "warning", warning_msg); + + ret = CMD_WARNING; + goto end_table_stats; } - vty_out(vty, "BGP %s RIB statistics\n", get_afi_safi_str(afi, safi, false)); + if (!json) + vty_out(vty, "BGP %s RIB statistics\n", + get_afi_safi_str(afi, safi, false)); /* labeled-unicast routes live in the unicast table */ if (safi == SAFI_LABELED_UNICAST) @@ -11210,7 +11375,8 @@ static int bgp_table_stats(struct vty *vty, struct bgp *bgp, afi_t afi, thread_execute(bm->master, bgp_table_stats_walker, &ts, 0); for (i = 0; i < BGP_STATS_MAX; i++) { - if (!table_stats_strs[i]) + if ((!json && !table_stats_strs[i][TABLE_STATS_IDX_VTY]) + || (json && !table_stats_strs[i][TABLE_STATS_IDX_JSON])) continue; switch (i) { @@ -11225,54 +11391,166 @@ static int bgp_table_stats(struct vty *vty, struct bgp *bgp, afi_t afi, #endif case BGP_STATS_ASPATH_TOTHOPS: case BGP_STATS_ASPATH_TOTSIZE: - vty_out(vty, "%-30s: ", table_stats_strs[i]); - vty_out(vty, "%12.2f", - ts.counts[i] - ? (float)ts.counts[i] - / (float)ts.counts + if (!json) { + snprintf( + temp_buf, sizeof(temp_buf), "%12.2f", + ts.counts[i] + ? (float)ts.counts[i] + / (float)ts.counts + [BGP_STATS_ASPATH_COUNT] + : 0); + vty_out(vty, "%-30s: %s", + table_stats_strs[i] + [TABLE_STATS_IDX_VTY], + temp_buf); + } else { + json_object_double_add( + json, + table_stats_strs[i] + [TABLE_STATS_IDX_JSON], + ts.counts[i] + ? (double)ts.counts[i] + / (double)ts.counts [BGP_STATS_ASPATH_COUNT] - : 0); + : 0); + } break; case BGP_STATS_TOTPLEN: - vty_out(vty, "%-30s: ", table_stats_strs[i]); - vty_out(vty, "%12.2f", - ts.counts[i] - ? (float)ts.counts[i] - / (float)ts.counts + if (!json) { + snprintf( + temp_buf, sizeof(temp_buf), "%12.2f", + ts.counts[i] + ? (float)ts.counts[i] + / (float)ts.counts + [BGP_STATS_PREFIXES] + : 0); + vty_out(vty, "%-30s: %s", + table_stats_strs[i] + [TABLE_STATS_IDX_VTY], + temp_buf); + } else { + json_object_double_add( + json, + table_stats_strs[i] + [TABLE_STATS_IDX_JSON], + ts.counts[i] + ? (double)ts.counts[i] + / (double)ts.counts [BGP_STATS_PREFIXES] - : 0); + : 0); + } break; case BGP_STATS_SPACE: - vty_out(vty, "%-30s: ", table_stats_strs[i]); - vty_out(vty, "%12g\n", ts.total_space); - + if (!json) { + snprintf(temp_buf, sizeof(temp_buf), "%12g", + ts.total_space); + vty_out(vty, "%-30s: %s\n", + table_stats_strs[i] + [TABLE_STATS_IDX_VTY], + temp_buf); + } else { + json_object_double_add( + json, + table_stats_strs[i] + [TABLE_STATS_IDX_JSON], + (double)ts.total_space); + } if (afi == AFI_IP6) { - vty_out(vty, "%30s: ", "/32 equivalent "); - vty_out(vty, "%12g\n", - ts.total_space * pow(2.0, -128 + 32)); - vty_out(vty, "%30s: ", "/48 equivalent "); - vty_out(vty, "%12g\n", - ts.total_space * pow(2.0, -128 + 48)); + if (!json) { + snprintf(temp_buf, sizeof(temp_buf), + "%12g", + ts.total_space + * pow(2.0, -128 + 32)); + vty_out(vty, "%30s: %s\n", + "/32 equivalent %s\n", + temp_buf); + } else { + json_object_double_add( + json, "/32equivalent", + (double)(ts.total_space + * pow(2.0, + -128 + 32))); + } + if (!json) { + snprintf(temp_buf, sizeof(temp_buf), + "%12g", + ts.total_space + * pow(2.0, -128 + 48)); + vty_out(vty, "%30s: %s\n", + "/48 equivalent %s\n", + temp_buf); + } else { + json_object_double_add( + json, "/48equivalent", + (double)(ts.total_space + * pow(2.0, + -128 + 48))); + } } else { - vty_out(vty, "%30s: ", "% announced "); - vty_out(vty, "%12.2f\n", - ts.total_space * 100. * pow(2.0, -32)); - vty_out(vty, "%30s: ", "/8 equivalent "); - vty_out(vty, "%12.2f\n", - ts.total_space * pow(2.0, -32 + 8)); - vty_out(vty, "%30s: ", "/24 equivalent "); - vty_out(vty, "%12.2f\n", - ts.total_space * pow(2.0, -32 + 24)); + if (!json) { + snprintf(temp_buf, sizeof(temp_buf), + "%12.2f", + ts.total_space * 100. + * pow(2.0, -32)); + vty_out(vty, "%30s: %s\n", + "% announced ", temp_buf); + } else { + json_object_double_add( + json, "%announced", + (double)(ts.total_space * 100. + * pow(2.0, -32))); + } + if (!json) { + snprintf(temp_buf, sizeof(temp_buf), + "%12.2f", + ts.total_space + * pow(2.0, -32 + 8)); + vty_out(vty, "%30s: %s\n", + "/8 equivalent ", temp_buf); + } else { + json_object_double_add( + json, "/8equivalent", + (double)(ts.total_space + * pow(2.0, -32 + 8))); + } + if (!json) { + snprintf(temp_buf, sizeof(temp_buf), + "%12.2f", + ts.total_space + * pow(2.0, -32 + 24)); + vty_out(vty, "%30s: %s\n", + "/24 equivalent ", temp_buf); + } else { + json_object_double_add( + json, "/24equivalent", + (double)(ts.total_space + * pow(2.0, -32 + 24))); + } } break; default: - vty_out(vty, "%-30s: ", table_stats_strs[i]); - vty_out(vty, "%12llu", ts.counts[i]); + if (!json) { + snprintf(temp_buf, sizeof(temp_buf), "%12llu", + ts.counts[i]); + vty_out(vty, "%-30s: %s", + table_stats_strs[i] + [TABLE_STATS_IDX_VTY], + temp_buf); + } else { + json_object_int_add( + json, + table_stats_strs[i] + [TABLE_STATS_IDX_JSON], + ts.counts[i]); + } } - - vty_out(vty, "\n"); + if (!json) + vty_out(vty, "\n"); } - return CMD_SUCCESS; +end_table_stats: + if (json) + json_object_array_add(json_array, json); + return ret; } enum bgp_pcounts { @@ -11610,8 +11888,8 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi, struct bgp_table *table; struct bgp_adj_in *ain; struct bgp_adj_out *adj; - unsigned long output_count; - unsigned long filtered_count; + unsigned long output_count = 0; + unsigned long filtered_count = 0; struct bgp_node *rn; int header1 = 1; struct bgp *bgp; @@ -11901,6 +12179,12 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi, vty_out(vty, "%s\n", json_object_to_json_string_ext( json, JSON_C_TO_STRING_PRETTY)); + + if (!output_count && !filtered_count) { + json_object_free(json_scode); + json_object_free(json_ocode); + } + json_object_free(json); } else if (output_count > 0) { if (filtered_count > 0) @@ -12284,7 +12568,7 @@ static int bgp_distance_set(struct vty *vty, const char *distance_str, distance = atoi(distance_str); /* Get BGP distance node. */ - rn = bgp_node_get(bgp_distance_table[afi][safi], (struct prefix *)&p); + rn = bgp_node_get(bgp_distance_table[afi][safi], &p); bdistance = bgp_node_get_bgp_distance_info(rn); if (bdistance) bgp_unlock_node(rn); @@ -12325,8 +12609,7 @@ static int bgp_distance_unset(struct vty *vty, const char *distance_str, return CMD_WARNING_CONFIG_FAILED; } - rn = bgp_node_lookup(bgp_distance_table[afi][safi], - (struct prefix *)&p); + rn = bgp_node_lookup(bgp_distance_table[afi][safi], &p); if (!rn) { vty_out(vty, "Can't find specified prefix\n"); return CMD_WARNING_CONFIG_FAILED; @@ -13149,9 +13432,12 @@ void bgp_route_init(void) /* IPv4 labeled-unicast configuration. */ install_element(VIEW_NODE, &show_ip_bgp_instance_all_cmd); install_element(VIEW_NODE, &show_ip_bgp_cmd); + install_element(VIEW_NODE, &show_ip_bgp_afi_safi_statistics_cmd); + install_element(VIEW_NODE, &show_ip_bgp_l2vpn_evpn_statistics_cmd); install_element(VIEW_NODE, &show_ip_bgp_json_cmd); install_element(VIEW_NODE, &show_ip_bgp_route_cmd); install_element(VIEW_NODE, &show_ip_bgp_regexp_cmd); + install_element(VIEW_NODE, &show_ip_bgp_statistics_all_cmd); install_element(VIEW_NODE, &show_ip_bgp_instance_neighbor_advertised_route_cmd); diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h index 9438b328ad..ad08bbf440 100644 --- a/bgpd/bgp_route.h +++ b/bgpd/bgp_route.h @@ -125,7 +125,7 @@ struct bgp_path_info_extra { struct in6_addr sid[BGP_MAX_SIDS]; uint32_t num_sids; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC union { struct { @@ -237,6 +237,7 @@ struct bgp_path_info { #define BGP_PATH_MULTIPATH_CHG (1 << 12) #define BGP_PATH_RIB_ATTR_CHG (1 << 13) #define BGP_PATH_ANNC_NH_SELF (1 << 14) +#define BGP_PATH_LINK_BW_CHG (1 << 15) /* BGP route type. This can be static, RIP, OSPF, BGP etc. */ uint8_t type; @@ -477,6 +478,10 @@ static inline void prep_for_rmap_apply(struct bgp_path_info *dst_pi, dst_pi->peer = peer; dst_pi->attr = attr; dst_pi->net = rn; + dst_pi->flags = src_pi->flags; + dst_pi->type = src_pi->type; + dst_pi->sub_type = src_pi->sub_type; + dst_pi->mpath = src_pi->mpath; if (src_pi->extra) { memcpy(dst_pie, src_pi->extra, sizeof(struct bgp_path_info_extra)); diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c index 2d92136450..6b57afc5c1 100644 --- a/bgpd/bgp_routemap.c +++ b/bgpd/bgp_routemap.c @@ -39,6 +39,7 @@ #include "hash.h" #include "queue.h" #include "frrstr.h" +#include "network.h" #include "bgpd/bgpd.h" #include "bgpd/bgp_table.h" @@ -63,8 +64,9 @@ #include "bgpd/bgp_pbr.h" #include "bgpd/bgp_flowspec_util.h" #include "bgpd/bgp_encap_types.h" +#include "bgpd/bgp_mpath.h" -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/bgp_rfapi_cfg.h" #endif @@ -1534,7 +1536,7 @@ static enum route_map_cmd_result_t route_match_probability(void *rule, const struct prefix *prefix, route_map_object_t type, void *object) { - long r = random(); + long r = frr_weak_random(); switch (*(long *)rule) { case 0: @@ -2531,6 +2533,146 @@ static const struct route_map_rule_cmd route_set_ecommunity_soo_cmd = { route_set_ecommunity_free, }; +/* `set extcommunity bandwidth' */ + +struct rmap_ecomm_lb_set { + uint8_t lb_type; +#define RMAP_ECOMM_LB_SET_VALUE 1 +#define RMAP_ECOMM_LB_SET_CUMUL 2 +#define RMAP_ECOMM_LB_SET_NUM_MPATH 3 + bool non_trans; + uint32_t bw; +}; + +static enum route_map_cmd_result_t +route_set_ecommunity_lb(void *rule, const struct prefix *prefix, + route_map_object_t type, void *object) +{ + struct rmap_ecomm_lb_set *rels = rule; + struct bgp_path_info *path; + struct peer *peer; + struct ecommunity ecom_lb = {0}; + struct ecommunity_val lb_eval; + uint32_t bw_bytes = 0; + uint16_t mpath_count = 0; + struct ecommunity *new_ecom; + struct ecommunity *old_ecom; + as_t as; + + if (type != RMAP_BGP) + return RMAP_OKAY; + + path = object; + peer = path->peer; + if (!peer || !peer->bgp) + return RMAP_ERROR; + + /* Build link bandwidth extended community */ + as = (peer->bgp->as > BGP_AS_MAX) ? BGP_AS_TRANS : peer->bgp->as; + if (rels->lb_type == RMAP_ECOMM_LB_SET_VALUE) { + bw_bytes = ((uint64_t)rels->bw * 1000 * 1000) / 8; + } else if (rels->lb_type == RMAP_ECOMM_LB_SET_CUMUL) { + /* process this only for the best path. */ + if (!CHECK_FLAG(path->flags, BGP_PATH_SELECTED)) + return RMAP_OKAY; + + bw_bytes = (uint32_t)bgp_path_info_mpath_cumbw(path); + if (!bw_bytes) + return RMAP_OKAY; + + } else if (rels->lb_type == RMAP_ECOMM_LB_SET_NUM_MPATH) { + + /* process this only for the best path. */ + if (!CHECK_FLAG(path->flags, BGP_PATH_SELECTED)) + return RMAP_OKAY; + + bw_bytes = ((uint64_t)peer->bgp->lb_ref_bw * 1000 * 1000) / 8; + mpath_count = bgp_path_info_mpath_count(path) + 1; + bw_bytes *= mpath_count; + } + + encode_lb_extcomm(as, bw_bytes, rels->non_trans, &lb_eval); + + /* add to route or merge with existing */ + old_ecom = path->attr->ecommunity; + if (old_ecom) { + new_ecom = ecommunity_dup(old_ecom); + ecommunity_add_val(new_ecom, &lb_eval, true, true); + if (!old_ecom->refcnt) + ecommunity_free(&old_ecom); + } else { + ecom_lb.size = 1; + ecom_lb.val = (uint8_t *)lb_eval.val; + new_ecom = ecommunity_dup(&ecom_lb); + } + + /* new_ecom will be intern()'d or attr_flush()'d in call stack */ + path->attr->ecommunity = new_ecom; + path->attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES); + + /* Mark that route-map has set link bandwidth; used in attribute + * setting decisions. + */ + SET_FLAG(path->attr->rmap_change_flags, BATTR_RMAP_LINK_BW_SET); + + return RMAP_OKAY; +} + +static void *route_set_ecommunity_lb_compile(const char *arg) +{ + struct rmap_ecomm_lb_set *rels; + uint8_t lb_type; + uint32_t bw = 0; + char bw_str[40] = {0}; + char *p, *str; + bool non_trans = false; + + str = (char *)arg; + p = strchr(arg, ' '); + if (p) { + int len; + + len = p - arg; + memcpy(bw_str, arg, len); + non_trans = true; + str = bw_str; + } + + if (strcmp(str, "cumulative") == 0) + lb_type = RMAP_ECOMM_LB_SET_CUMUL; + else if (strcmp(str, "num-multipaths") == 0) + lb_type = RMAP_ECOMM_LB_SET_NUM_MPATH; + else { + char *end = NULL; + + bw = strtoul(str, &end, 10); + if (*end != '\0') + return NULL; + lb_type = RMAP_ECOMM_LB_SET_VALUE; + } + + rels = XCALLOC(MTYPE_ROUTE_MAP_COMPILED, + sizeof(struct rmap_ecomm_lb_set)); + rels->lb_type = lb_type; + rels->bw = bw; + rels->non_trans = non_trans; + + return rels; +} + +static void route_set_ecommunity_lb_free(void *rule) +{ + XFREE(MTYPE_ROUTE_MAP_COMPILED, rule); +} + +/* Set community rule structure. */ +struct route_map_rule_cmd route_set_ecommunity_lb_cmd = { + "extcommunity bandwidth", + route_set_ecommunity_lb, + route_set_ecommunity_lb_compile, + route_set_ecommunity_lb_free, +}; + /* `set origin ORIGIN' */ /* For origin set. */ @@ -3749,7 +3891,7 @@ static void bgp_route_map_process_update_cb(char *rmap_name) for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) { bgp_route_map_process_update(bgp, rmap_name, 1); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC /* zlog_debug("%s: calling vnc_routemap_update", __func__); */ vnc_routemap_update(bgp, __func__); #endif @@ -3793,7 +3935,7 @@ static void bgp_route_map_mark_update(const char *rmap_name) } else { for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) bgp_route_map_process_update(bgp, rmap_name, 0); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC zlog_debug("%s: calling vnc_routemap_update", __func__); vnc_routemap_update(bgp, __func__); #endif @@ -5004,6 +5146,53 @@ ALIAS (no_set_ecommunity_soo, "GP extended community attribute\n" "Site-of-Origin extended community\n") +DEFUN (set_ecommunity_lb, + set_ecommunity_lb_cmd, + "set extcommunity bandwidth <(1-25600)|cumulative|num-multipaths> [non-transitive]", + SET_STR + "BGP extended community attribute\n" + "Link bandwidth extended community\n" + "Bandwidth value in Mbps\n" + "Cumulative bandwidth of all multipaths (outbound-only)\n" + "Internally computed bandwidth based on number of multipaths (outbound-only)\n" + "Attribute is set as non-transitive\n") +{ + int idx_lb = 3; + int ret; + char *str; + + str = argv_concat(argv, argc, idx_lb); + ret = generic_set_add(vty, VTY_GET_CONTEXT(route_map_index), + "extcommunity bandwidth", str); + XFREE(MTYPE_TMP, str); + return ret; +} + + +DEFUN (no_set_ecommunity_lb, + no_set_ecommunity_lb_cmd, + "no set extcommunity bandwidth <(1-25600)|cumulative|num-multipaths> [non-transitive]", + NO_STR + SET_STR + "BGP extended community attribute\n" + "Link bandwidth extended community\n" + "Bandwidth value in Mbps\n" + "Cumulative bandwidth of all multipaths (outbound-only)\n" + "Internally computed bandwidth based on number of multipaths (outbound-only)\n" + "Attribute is set as non-transitive\n") +{ + return generic_set_delete(vty, VTY_GET_CONTEXT(route_map_index), + "extcommunity bandwidth", NULL); +} + +ALIAS (no_set_ecommunity_lb, + no_set_ecommunity_lb_short_cmd, + "no set extcommunity bandwidth", + NO_STR + SET_STR + "BGP extended community attribute\n" + "Link bandwidth extended community\n") + DEFUN (set_origin, set_origin_cmd, "set origin <egp|igp|incomplete>", @@ -5549,6 +5738,7 @@ void bgp_route_map_init(void) route_map_install_set(&route_set_originator_id_cmd); route_map_install_set(&route_set_ecommunity_rt_cmd); route_map_install_set(&route_set_ecommunity_soo_cmd); + route_map_install_set(&route_set_ecommunity_lb_cmd); route_map_install_set(&route_set_tag_cmd); route_map_install_set(&route_set_label_index_cmd); @@ -5632,6 +5822,9 @@ void bgp_route_map_init(void) install_element(RMAP_NODE, &set_ecommunity_soo_cmd); install_element(RMAP_NODE, &no_set_ecommunity_soo_cmd); install_element(RMAP_NODE, &no_set_ecommunity_soo_short_cmd); + install_element(RMAP_NODE, &set_ecommunity_lb_cmd); + install_element(RMAP_NODE, &no_set_ecommunity_lb_cmd); + install_element(RMAP_NODE, &no_set_ecommunity_lb_short_cmd); #ifdef KEEP_OLD_VPN_COMMANDS install_element(RMAP_NODE, &set_vpn_nexthop_cmd); install_element(RMAP_NODE, &no_set_vpn_nexthop_cmd); diff --git a/bgpd/bgp_rpki.c b/bgpd/bgp_rpki.c index e40c7231a7..1ba07e95e6 100644 --- a/bgpd/bgp_rpki.c +++ b/bgpd/bgp_rpki.c @@ -104,7 +104,7 @@ static struct rtr_mgr_group *get_connected_group(void); static void print_prefix_table(struct vty *vty); static void install_cli_commands(void); static int config_write(struct vty *vty); -static void overwrite_exit_commands(void); +static int config_on_exit(struct vty *vty); static void free_cache(struct cache *cache); static struct rtr_mgr_group *get_groups(void); #if defined(FOUND_SSH) @@ -143,7 +143,14 @@ static unsigned int retry_interval; static int rpki_sync_socket_rtr; static int rpki_sync_socket_bgpd; -static struct cmd_node rpki_node = {RPKI_NODE, "%s(config-rpki)# ", 1}; +static struct cmd_node rpki_node = { + .name = "rpki", + .node = RPKI_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-rpki)# ", + .config_write = config_write, + .node_exit = config_on_exit, +}; static const struct route_map_rule_cmd route_match_rpki_cmd = { "rpki", route_match, route_match_compile, route_match_free}; @@ -1394,35 +1401,10 @@ DEFUN (show_rpki_cache_connection, return CMD_SUCCESS; } -DEFUN_NOSH (rpki_exit, - rpki_exit_cmd, - "exit", - "Exit rpki configuration and restart rpki session\n") +static int config_on_exit(struct vty *vty) { reset(false); - - vty->node = CONFIG_NODE; - return CMD_SUCCESS; -} - -DEFUN_NOSH (rpki_quit, - rpki_quit_cmd, - "quit", - "Exit rpki configuration mode\n") -{ - return rpki_exit(self, vty, argc, argv); -} - -DEFUN_NOSH (rpki_end, - rpki_end_cmd, - "end", - "End rpki configuration, restart rpki session and change to enable mode.\n") -{ - int ret = reset(false); - - vty_config_exit(vty); - vty->node = ENABLE_NODE; - return ret == SUCCESS ? CMD_SUCCESS : CMD_WARNING; + return 1; } DEFUN (rpki_reset, @@ -1516,32 +1498,11 @@ DEFUN (no_match_rpki, return CMD_SUCCESS; } -static void overwrite_exit_commands(void) -{ - unsigned int i; - vector cmd_vector = rpki_node.cmd_vector; - - for (i = 0; i < cmd_vector->active; ++i) { - struct cmd_element *cmd = vector_lookup(cmd_vector, i); - - if (strcmp(cmd->string, "exit") == 0 - || strcmp(cmd->string, "quit") == 0 - || strcmp(cmd->string, "end") == 0) { - uninstall_element(RPKI_NODE, cmd); - } - } - - install_element(RPKI_NODE, &rpki_exit_cmd); - install_element(RPKI_NODE, &rpki_quit_cmd); - install_element(RPKI_NODE, &rpki_end_cmd); -} - static void install_cli_commands(void) { // TODO: make config write work - install_node(&rpki_node, &config_write); + install_node(&rpki_node); install_default(RPKI_NODE); - overwrite_exit_commands(); install_element(CONFIG_NODE, &rpki_cmd); install_element(ENABLE_NODE, &rpki_cmd); diff --git a/bgpd/bgp_snmp.c b/bgpd/bgp_snmp.c index 28eea46a5a..e7aa02863c 100644 --- a/bgpd/bgp_snmp.c +++ b/bgpd/bgp_snmp.c @@ -330,7 +330,7 @@ static uint8_t *bgpVersion(struct variable *v, oid name[], size_t *length, /* Return octet string length 1. */ *var_len = 1; - return (uint8_t *)&version; + return &version; } static uint8_t *bgpLocalAs(struct variable *v, oid name[], size_t *length, diff --git a/bgpd/bgp_vnc_types.h b/bgpd/bgp_vnc_types.h index f4202ff75e..04847ce6c9 100644 --- a/bgpd/bgp_vnc_types.h +++ b/bgpd/bgp_vnc_types.h @@ -19,7 +19,7 @@ #ifndef _QUAGGA_BGP_VNC_TYPES_H #define _QUAGGA_BGP_VNC_TYPES_H -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC typedef enum { BGP_VNC_SUBTLV_TYPE_LIFETIME = 1, BGP_VNC_SUBTLV_TYPE_RFPOPTION = 2, /* deprecated */ diff --git a/bgpd/bgp_vpn.c b/bgpd/bgp_vpn.c index 2fd2443512..af632a1340 100644 --- a/bgpd/bgp_vpn.c +++ b/bgpd/bgp_vpn.c @@ -155,7 +155,7 @@ int show_adj_route_vpn(struct vty *vty, struct peer *peer, uint16_t type; struct rd_as rd_as = {0}; struct rd_ip rd_ip = {0}; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC struct rd_vnc_eth rd_vnc_eth = {0}; #endif const uint8_t *pnt; @@ -171,7 +171,7 @@ int show_adj_route_vpn(struct vty *vty, struct peer *peer, decode_rd_as4(pnt + 2, &rd_as); else if (type == RD_TYPE_IP) decode_rd_ip(pnt + 2, &rd_ip); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC else if (type == RD_TYPE_VNC_ETH) decode_rd_vnc_eth(pnt, &rd_vnc_eth); #endif @@ -200,7 +200,7 @@ int show_adj_route_vpn(struct vty *vty, struct peer *peer, vty_out(vty, "%s:%d", inet_ntoa(rd_ip.ip), rd_ip.val); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC else if (type == RD_TYPE_VNC_ETH) vty_out(vty, "%u:%02x:%02x:%02x:%02x:%02x:%02x", diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index aea6dbbf4e..a356564813 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -68,25 +68,25 @@ #include "bgpd/bgp_addpath.h" #include "bgpd/bgp_mac.h" #include "bgpd/bgp_flowspec.h" -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/bgp_rfapi_cfg.h" #endif FRR_CFG_DEFAULT_BOOL(BGP_IMPORT_CHECK, - { .val_long = true, .match_profile = "datacenter", }, - { .val_long = false }, + { .val_bool = true, .match_profile = "datacenter", }, + { .val_bool = false }, ) FRR_CFG_DEFAULT_BOOL(BGP_SHOW_HOSTNAME, - { .val_long = true, .match_profile = "datacenter", }, - { .val_long = false }, + { .val_bool = true, .match_profile = "datacenter", }, + { .val_bool = false }, ) FRR_CFG_DEFAULT_BOOL(BGP_LOG_NEIGHBOR_CHANGES, - { .val_long = true, .match_profile = "datacenter", }, - { .val_long = false }, + { .val_bool = true, .match_profile = "datacenter", }, + { .val_bool = false }, ) FRR_CFG_DEFAULT_BOOL(BGP_DETERMINISTIC_MED, - { .val_long = true, .match_profile = "datacenter", }, - { .val_long = false }, + { .val_bool = true, .match_profile = "datacenter", }, + { .val_bool = false }, ) FRR_CFG_DEFAULT_ULONG(BGP_CONNECT_RETRY, { .val_ulong = 10, .match_profile = "datacenter", }, @@ -100,6 +100,11 @@ FRR_CFG_DEFAULT_ULONG(BGP_KEEPALIVE, { .val_ulong = 3, .match_profile = "datacenter", }, { .val_ulong = 60 }, ) +FRR_CFG_DEFAULT_BOOL(BGP_EBGP_REQUIRES_POLICY, + { .val_bool = false, .match_profile = "datacenter", }, + { .val_bool = false, .match_version = "< 7.4", }, + { .val_bool = true }, +) DEFINE_HOOK(bgp_inst_config_write, (struct bgp *bgp, struct vty *vty), @@ -417,6 +422,8 @@ int bgp_get_vty(struct bgp **bgp, as_t *as, const char *name, SET_FLAG((*bgp)->flags, BGP_FLAG_LOG_NEIGHBOR_CHANGES); if (DFLT_BGP_DETERMINISTIC_MED) SET_FLAG((*bgp)->flags, BGP_FLAG_DETERMINISTIC_MED); + if (DFLT_BGP_EBGP_REQUIRES_POLICY) + SET_FLAG((*bgp)->flags, BGP_FLAG_EBGP_REQUIRES_POLICY); ret = BGP_SUCCESS; } @@ -2036,7 +2043,7 @@ DEFUN(bgp_ebgp_requires_policy, bgp_ebgp_requires_policy_cmd, "Require in and out policy for eBGP peers (RFC8212)\n") { VTY_DECLVAR_CONTEXT(bgp, bgp); - bgp->ebgp_requires_policy = DEFAULT_EBGP_POLICY_ENABLED; + SET_FLAG(bgp->flags, BGP_FLAG_EBGP_REQUIRES_POLICY); return CMD_SUCCESS; } @@ -2047,7 +2054,7 @@ DEFUN(no_bgp_ebgp_requires_policy, no_bgp_ebgp_requires_policy_cmd, "Require in and out policy for eBGP peers (RFC8212)\n") { VTY_DECLVAR_CONTEXT(bgp, bgp); - bgp->ebgp_requires_policy = DEFAULT_EBGP_POLICY_DISABLED; + UNSET_FLAG(bgp->flags, BGP_FLAG_EBGP_REQUIRES_POLICY); return CMD_SUCCESS; } @@ -2060,7 +2067,7 @@ DEFUN(bgp_reject_as_sets, bgp_reject_as_sets_cmd, struct listnode *node, *nnode; struct peer *peer; - bgp->reject_as_sets = BGP_REJECT_AS_SETS_ENABLED; + bgp->reject_as_sets = true; /* Reset existing BGP sessions to reject routes * with aspath containing AS_SET or AS_CONFED_SET. @@ -2086,7 +2093,7 @@ DEFUN(no_bgp_reject_as_sets, no_bgp_reject_as_sets_cmd, struct listnode *node, *nnode; struct peer *peer; - bgp->reject_as_sets = BGP_REJECT_AS_SETS_DISABLED; + bgp->reject_as_sets = false; /* Reset existing BGP sessions to reject routes * with aspath containing AS_SET or AS_CONFED_SET. @@ -2973,6 +2980,49 @@ DEFUN (no_bgp_bestpath_med, return CMD_SUCCESS; } +/* "bgp bestpath bandwidth" configuration. */ +DEFPY (bgp_bestpath_bw, + bgp_bestpath_bw_cmd, + "[no$no] bgp bestpath bandwidth [<ignore|skip-missing|default-weight-for-missing>$bw_cfg]", + NO_STR + "BGP specific commands\n" + "Change the default bestpath selection\n" + "Link Bandwidth attribute\n" + "Ignore link bandwidth (i.e., do regular ECMP, not weighted)\n" + "Ignore paths without link bandwidth for ECMP (if other paths have it)\n" + "Assign a low default weight (value 1) to paths not having link bandwidth\n") +{ + VTY_DECLVAR_CONTEXT(bgp, bgp); + afi_t afi; + safi_t safi; + + if (no) { + bgp->lb_handling = BGP_LINK_BW_ECMP; + } else { + if (!bw_cfg) { + vty_out(vty, "%% Bandwidth configuration must be specified\n"); + return CMD_ERR_INCOMPLETE; + } + if (!strcmp(bw_cfg, "ignore")) + bgp->lb_handling = BGP_LINK_BW_IGNORE_BW; + else if (!strcmp(bw_cfg, "skip-missing")) + bgp->lb_handling = BGP_LINK_BW_SKIP_MISSING; + else if (!strcmp(bw_cfg, "default-weight-for-missing")) + bgp->lb_handling = BGP_LINK_BW_DEFWT_4_MISSING; + else + return CMD_ERR_NO_MATCH; + } + + /* This config is used in route install, so redo that. */ + FOREACH_AFI_SAFI (afi, safi) { + if (!bgp_fibupd_safi(safi)) + continue; + bgp_zebra_announce_table(bgp, afi, safi); + } + + return CMD_SUCCESS; +} + /* "no bgp default ipv4-unicast". */ DEFUN (no_bgp_default_ipv4_unicast, no_bgp_default_ipv4_unicast_cmd, @@ -9036,21 +9086,23 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, json_object_int_add(json_peer, "msgSent", PEER_TOTAL_TX(peer)); + atomic_size_t outq_count, inq_count; + outq_count = atomic_load_explicit( + &peer->obuf->count, + memory_order_relaxed); + inq_count = atomic_load_explicit( + &peer->ibuf->count, + memory_order_relaxed); + json_object_int_add(json_peer, "tableVersion", peer->version[afi][safi]); json_object_int_add(json_peer, "outq", - peer->obuf->count); - json_object_int_add(json_peer, "inq", 0); + outq_count); + json_object_int_add(json_peer, "inq", + inq_count); peer_uptime(peer->uptime, timebuf, BGP_UPTIME_LEN, use_json, json_peer); - /* - * Adding "pfxRcd" field to match with the corresponding - * CLI. "prefixReceivedCount" will be deprecated in - * future. - */ - json_object_int_add(json_peer, "prefixReceivedCount", - peer->pcount[afi][pfx_rcd_safi]); json_object_int_add(json_peer, "pfxRcd", peer->pcount[afi][pfx_rcd_safi]); @@ -9122,12 +9174,21 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, vty_out(vty, "%*s", max_neighbor_width - len, " "); + atomic_size_t outq_count, inq_count; + outq_count = atomic_load_explicit( + &peer->obuf->count, + memory_order_relaxed); + inq_count = atomic_load_explicit( + &peer->ibuf->count, + memory_order_relaxed); + vty_out(vty, - "4 %10u %9u %9u %8" PRIu64 " %4d %4zu %8s", + "4 %10u %9u %9u %8" PRIu64 + " %4zu %4zu %8s", peer->as, PEER_TOTAL_RX(peer), PEER_TOTAL_TX(peer), - peer->version[afi][safi], 0, - peer->obuf->count, + peer->version[afi][safi], inq_count, + outq_count, peer_uptime(peer->uptime, timebuf, BGP_UPTIME_LEN, 0, NULL)); @@ -10095,14 +10156,14 @@ static void bgp_show_peer_afi(struct vty *vty, struct peer *p, afi_t afi, filter->map[RMAP_OUT].name); /* ebgp-requires-policy (inbound) */ - if (p->bgp->ebgp_requires_policy == DEFAULT_EBGP_POLICY_ENABLED + if (CHECK_FLAG(p->bgp->flags, BGP_FLAG_EBGP_REQUIRES_POLICY) && !bgp_inbound_policy_exists(p, filter)) json_object_string_add( json_addr, "inboundEbgpRequiresPolicy", "Inbound updates discarded due to missing policy"); /* ebgp-requires-policy (outbound) */ - if (p->bgp->ebgp_requires_policy == DEFAULT_EBGP_POLICY_ENABLED + if (CHECK_FLAG(p->bgp->flags, BGP_FLAG_EBGP_REQUIRES_POLICY) && (!bgp_outbound_policy_exists(p, filter))) json_object_string_add( json_addr, "outboundEbgpRequiresPolicy", @@ -10391,13 +10452,13 @@ static void bgp_show_peer_afi(struct vty *vty, struct peer *p, afi_t afi, filter->map[RMAP_OUT].name); /* ebgp-requires-policy (inbound) */ - if (p->bgp->ebgp_requires_policy == DEFAULT_EBGP_POLICY_ENABLED + if (CHECK_FLAG(p->bgp->flags, BGP_FLAG_EBGP_REQUIRES_POLICY) && !bgp_inbound_policy_exists(p, filter)) vty_out(vty, " Inbound updates discarded due to missing policy\n"); /* ebgp-requires-policy (outbound) */ - if (p->bgp->ebgp_requires_policy == DEFAULT_EBGP_POLICY_ENABLED + if (CHECK_FLAG(p->bgp->flags, BGP_FLAG_EBGP_REQUIRES_POLICY) && !bgp_outbound_policy_exists(p, filter)) vty_out(vty, " Outbound updates discarded due to missing policy\n"); @@ -11711,9 +11772,17 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json, json_object *json_stat = NULL; json_stat = json_object_new_object(); /* Packet counts. */ - json_object_int_add(json_stat, "depthInq", 0); + + atomic_size_t outq_count, inq_count; + outq_count = atomic_load_explicit(&p->obuf->count, + memory_order_relaxed); + inq_count = atomic_load_explicit(&p->ibuf->count, + memory_order_relaxed); + + json_object_int_add(json_stat, "depthInq", + (unsigned long)inq_count); json_object_int_add(json_stat, "depthOutq", - (unsigned long)p->obuf->count); + (unsigned long)outq_count); json_object_int_add(json_stat, "opensSent", atomic_load_explicit(&p->open_out, memory_order_relaxed)); @@ -11754,11 +11823,16 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json, json_object_int_add(json_stat, "totalRecv", PEER_TOTAL_RX(p)); json_object_object_add(json_neigh, "messageStats", json_stat); } else { + atomic_size_t outq_count, inq_count; + outq_count = atomic_load_explicit(&p->obuf->count, + memory_order_relaxed); + inq_count = atomic_load_explicit(&p->ibuf->count, + memory_order_relaxed); + /* Packet counts. */ vty_out(vty, " Message statistics:\n"); - vty_out(vty, " Inq depth is 0\n"); - vty_out(vty, " Outq depth is %lu\n", - (unsigned long)p->obuf->count); + vty_out(vty, " Inq depth is %zu\n", inq_count); + vty_out(vty, " Outq depth is %zu\n", outq_count); vty_out(vty, " Sent Rcvd\n"); vty_out(vty, " Opens: %10d %10d\n", atomic_load_explicit(&p->open_out, @@ -12125,14 +12199,20 @@ static int bgp_show_neighbor_graceful_restart(struct vty *vty, struct bgp *bgp, enum show_type type, union sockunion *su, const char *conf_if, afi_t afi, - bool use_json, json_object *json) + bool use_json) { struct listnode *node, *nnode; struct peer *peer; int find = 0; safi_t safi = SAFI_UNICAST; + json_object *json = NULL; json_object *json_neighbor = NULL; + if (use_json) { + json = json_object_new_object(); + json_neighbor = json_object_new_object(); + } + for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE)) @@ -12141,16 +12221,15 @@ static int bgp_show_neighbor_graceful_restart(struct vty *vty, struct bgp *bgp, if ((peer->afc[afi][safi]) == 0) continue; - if (use_json) - json_neighbor = json_object_new_object(); - if (type == show_all) { bgp_show_peer_gr_status(vty, peer, use_json, json_neighbor); - if (use_json) + if (use_json) { json_object_object_add(json, peer->host, json_neighbor); + json_neighbor = NULL; + } } else if (type == show_peer) { if (conf_if) { @@ -12176,8 +12255,10 @@ static int bgp_show_neighbor_graceful_restart(struct vty *vty, struct bgp *bgp, json_neighbor); } - if (find) + if (find) { + json_neighbor = NULL; break; + } } if (type == show_peer && !find) { @@ -12190,6 +12271,10 @@ static int bgp_show_neighbor_graceful_restart(struct vty *vty, struct bgp *bgp, vty_out(vty, "%s\n", json_object_to_json_string_ext( json, JSON_C_TO_STRING_PRETTY)); + + if (json_neighbor) + json_object_free(json_neighbor); + json_object_free(json); } else { vty_out(vty, "\n"); } @@ -12311,7 +12396,6 @@ static void bgp_show_neighbor_graceful_restart_vty(struct vty *vty, int ret; struct bgp *bgp; union sockunion su; - json_object *json = NULL; bgp = bgp_get_default(); @@ -12322,20 +12406,17 @@ static void bgp_show_neighbor_graceful_restart_vty(struct vty *vty, bgp_show_global_graceful_restart_mode_vty(vty, bgp, use_json, NULL); - json = json_object_new_object(); if (ip_str) { ret = str2sockunion(ip_str, &su); if (ret < 0) - bgp_show_neighbor_graceful_restart(vty, bgp, type, NULL, - ip_str, afi, - use_json, json); - else bgp_show_neighbor_graceful_restart( - vty, bgp, type, &su, NULL, afi, use_json, json); + vty, bgp, type, NULL, ip_str, afi, use_json); + else + bgp_show_neighbor_graceful_restart(vty, bgp, type, &su, + NULL, afi, use_json); } else bgp_show_neighbor_graceful_restart(vty, bgp, type, NULL, NULL, - afi, use_json, json); - json_object_free(json); + afi, use_json); } static void bgp_show_all_instances_neighbors_vty(struct vty *vty, @@ -14992,12 +15073,16 @@ int bgp_config_write(struct vty *vty) vty_out(vty, " bgp always-compare-med\n"); /* RFC8212 default eBGP policy. */ - if (bgp->ebgp_requires_policy - == DEFAULT_EBGP_POLICY_ENABLED) - vty_out(vty, " bgp ebgp-requires-policy\n"); + if (!!CHECK_FLAG(bgp->flags, BGP_FLAG_EBGP_REQUIRES_POLICY) + != SAVE_BGP_EBGP_REQUIRES_POLICY) + vty_out(vty, " %sbgp ebgp-requires-policy\n", + CHECK_FLAG(bgp->flags, + BGP_FLAG_EBGP_REQUIRES_POLICY) + ? "" + : "no "); /* draft-ietf-idr-deprecate-as-set-confed-set */ - if (bgp->reject_as_sets == BGP_REJECT_AS_SETS_ENABLED) + if (bgp->reject_as_sets) vty_out(vty, " bgp reject-as-sets\n"); /* BGP default ipv4-unicast. */ @@ -15161,6 +15246,14 @@ int bgp_config_write(struct vty *vty) vty_out(vty, "\n"); } + /* Link bandwidth handling. */ + if (bgp->lb_handling == BGP_LINK_BW_IGNORE_BW) + vty_out(vty, " bgp bestpath bandwidth ignore\n"); + else if (bgp->lb_handling == BGP_LINK_BW_SKIP_MISSING) + vty_out(vty, " bgp bestpath bandwidth skip-missing\n"); + else if (bgp->lb_handling == BGP_LINK_BW_DEFWT_4_MISSING) + vty_out(vty, " bgp bestpath bandwidth default-weight-for-missing\n"); + /* BGP network import check. */ if (!!CHECK_FLAG(bgp->flags, BGP_FLAG_IMPORT_CHECK) != SAVE_BGP_IMPORT_CHECK) @@ -15241,7 +15334,7 @@ int bgp_config_write(struct vty *vty) hook_call(bgp_inst_config_write, bgp, vty); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC bgp_rfapi_cfg_write(vty, bgp); #endif @@ -15253,50 +15346,96 @@ int bgp_config_write(struct vty *vty) /* BGP node structure. */ static struct cmd_node bgp_node = { - BGP_NODE, "%s(config-router)# ", 1, + .name = "bgp", + .node = BGP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", + .config_write = bgp_config_write, }; static struct cmd_node bgp_ipv4_unicast_node = { - BGP_IPV4_NODE, "%s(config-router-af)# ", 1, + .name = "bgp ipv4 unicast", + .node = BGP_IPV4_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", }; static struct cmd_node bgp_ipv4_multicast_node = { - BGP_IPV4M_NODE, "%s(config-router-af)# ", 1, + .name = "bgp ipv4 multicast", + .node = BGP_IPV4M_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", }; static struct cmd_node bgp_ipv4_labeled_unicast_node = { - BGP_IPV4L_NODE, "%s(config-router-af)# ", 1, + .name = "bgp ipv4 labeled unicast", + .node = BGP_IPV4L_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", }; static struct cmd_node bgp_ipv6_unicast_node = { - BGP_IPV6_NODE, "%s(config-router-af)# ", 1, + .name = "bgp ipv6", + .node = BGP_IPV6_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", }; static struct cmd_node bgp_ipv6_multicast_node = { - BGP_IPV6M_NODE, "%s(config-router-af)# ", 1, + .name = "bgp ipv6 multicast", + .node = BGP_IPV6M_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", }; static struct cmd_node bgp_ipv6_labeled_unicast_node = { - BGP_IPV6L_NODE, "%s(config-router-af)# ", 1, + .name = "bgp ipv6 labeled unicast", + .node = BGP_IPV6L_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", }; -static struct cmd_node bgp_vpnv4_node = {BGP_VPNV4_NODE, - "%s(config-router-af)# ", 1}; +static struct cmd_node bgp_vpnv4_node = { + .name = "bgp vpnv4", + .node = BGP_VPNV4_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node bgp_vpnv6_node = {BGP_VPNV6_NODE, - "%s(config-router-af-vpnv6)# ", 1}; +static struct cmd_node bgp_vpnv6_node = { + .name = "bgp vpnv6", + .node = BGP_VPNV6_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af-vpnv6)# ", +}; -static struct cmd_node bgp_evpn_node = {BGP_EVPN_NODE, - "%s(config-router-evpn)# ", 1}; +static struct cmd_node bgp_evpn_node = { + .name = "bgp evpn", + .node = BGP_EVPN_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-evpn)# ", +}; -static struct cmd_node bgp_evpn_vni_node = {BGP_EVPN_VNI_NODE, - "%s(config-router-af-vni)# ", 1}; +static struct cmd_node bgp_evpn_vni_node = { + .name = "bgp evpn vni", + .node = BGP_EVPN_VNI_NODE, + .parent_node = BGP_EVPN_NODE, + .prompt = "%s(config-router-af-vni)# ", +}; -static struct cmd_node bgp_flowspecv4_node = {BGP_FLOWSPECV4_NODE, - "%s(config-router-af)# ", 1}; +static struct cmd_node bgp_flowspecv4_node = { + .name = "bgp ipv4 flowspec", + .node = BGP_FLOWSPECV4_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node bgp_flowspecv6_node = {BGP_FLOWSPECV6_NODE, - "%s(config-router-af-vpnv6)# ", 1}; +static struct cmd_node bgp_flowspecv6_node = { + .name = "bgp ipv6 flowspec", + .node = BGP_FLOWSPECV6_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af-vpnv6)# ", +}; static void community_list_vty(void); @@ -15359,19 +15498,19 @@ void bgp_vty_init(void) cmd_variable_handler_register(bgp_var_peergroup); /* Install bgp top node. */ - install_node(&bgp_node, bgp_config_write); - install_node(&bgp_ipv4_unicast_node, NULL); - install_node(&bgp_ipv4_multicast_node, NULL); - install_node(&bgp_ipv4_labeled_unicast_node, NULL); - install_node(&bgp_ipv6_unicast_node, NULL); - install_node(&bgp_ipv6_multicast_node, NULL); - install_node(&bgp_ipv6_labeled_unicast_node, NULL); - install_node(&bgp_vpnv4_node, NULL); - install_node(&bgp_vpnv6_node, NULL); - install_node(&bgp_evpn_node, NULL); - install_node(&bgp_evpn_vni_node, NULL); - install_node(&bgp_flowspecv4_node, NULL); - install_node(&bgp_flowspecv6_node, NULL); + install_node(&bgp_node); + install_node(&bgp_ipv4_unicast_node); + install_node(&bgp_ipv4_multicast_node); + install_node(&bgp_ipv4_labeled_unicast_node); + install_node(&bgp_ipv6_unicast_node); + install_node(&bgp_ipv6_multicast_node); + install_node(&bgp_ipv6_labeled_unicast_node); + install_node(&bgp_vpnv4_node); + install_node(&bgp_vpnv6_node); + install_node(&bgp_evpn_node); + install_node(&bgp_evpn_vni_node); + install_node(&bgp_flowspecv4_node); + install_node(&bgp_flowspecv6_node); /* Install default VTY commands to new nodes. */ install_default(BGP_NODE); @@ -15567,6 +15706,9 @@ void bgp_vty_init(void) install_element(BGP_NODE, &bgp_bestpath_med_cmd); install_element(BGP_NODE, &no_bgp_bestpath_med_cmd); + /* "bgp bestpath bandwidth" commands */ + install_element(BGP_NODE, &bgp_bestpath_bw_cmd); + /* "no bgp default ipv4-unicast" commands. */ install_element(BGP_NODE, &no_bgp_default_ipv4_unicast_cmd); install_element(BGP_NODE, &bgp_default_ipv4_unicast_cmd); @@ -17773,13 +17915,17 @@ static int community_list_config_write(struct vty *vty) return write; } +static int community_list_config_write(struct vty *vty); static struct cmd_node community_list_node = { - COMMUNITY_LIST_NODE, "", 1 /* Export to vtysh. */ + .name = "community list", + .node = COMMUNITY_LIST_NODE, + .prompt = "", + .config_write = community_list_config_write, }; static void community_list_vty(void) { - install_node(&community_list_node, community_list_config_write); + install_node(&community_list_node); /* Community-list. */ install_element(CONFIG_NODE, &bgp_community_list_standard_cmd); diff --git a/bgpd/bgp_vty.h b/bgpd/bgp_vty.h index fa7b96d87b..d6ca198d09 100644 --- a/bgpd/bgp_vty.h +++ b/bgpd/bgp_vty.h @@ -175,6 +175,8 @@ extern int bgp_vty_find_and_parse_afi_safi_bgp(struct vty *vty, int argc, int *idx, afi_t *afi, safi_t *safi, struct bgp **bgp, bool use_json); +int bgp_vty_find_and_parse_bgp(struct vty *vty, struct cmd_token **argv, + int argc, struct bgp **bgp, bool use_json); extern int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi, safi_t safi, bool show_failed, bool use_json); diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index 404f17f69e..4f54bc81fb 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -51,7 +51,7 @@ #include "bgpd/bgp_nht.h" #include "bgpd/bgp_bfd.h" #include "bgpd/bgp_label.h" -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/rfapi_backend.h" #include "bgpd/rfapi/vnc_export_bgp.h" #endif @@ -1148,6 +1148,31 @@ static bool update_ipv6nh_for_route_install(int nh_othervrf, struct bgp *nh_bgp, return true; } +static bool bgp_zebra_use_nhop_weighted(struct bgp *bgp, struct attr *attr, + uint64_t tot_bw, uint32_t *nh_weight) +{ + uint32_t bw; + uint64_t tmp; + + bw = attr->link_bw; + /* zero link-bandwidth and link-bandwidth not present are treated + * as the same situation. + */ + if (!bw) { + /* the only situations should be if we're either told + * to skip or use default weight. + */ + if (bgp->lb_handling == BGP_LINK_BW_SKIP_MISSING) + return false; + *nh_weight = BGP_ZEBRA_DEFAULT_NHOP_WEIGHT; + } else { + tmp = (uint64_t)bw * 100; + *nh_weight = ((uint32_t)(tmp / tot_bw)); + } + + return true; +} + void bgp_zebra_announce(struct bgp_node *rn, const struct prefix *p, struct bgp_path_info *info, struct bgp *bgp, afi_t afi, safi_t safi) @@ -1170,6 +1195,8 @@ void bgp_zebra_announce(struct bgp_node *rn, const struct prefix *p, char buf_prefix[PREFIX_STRLEN]; /* filled in if we are debugging */ bool is_evpn; int nh_updated; + bool do_wt_ecmp; + uint64_t cum_bw = 0; /* Don't try to install if we're not connected to Zebra or Zebra doesn't * know of this instance. @@ -1240,11 +1267,20 @@ void bgp_zebra_announce(struct bgp_node *rn, const struct prefix *p, /* Metric is currently based on the best-path only */ metric = info->attr->med; + + /* Determine if we're doing weighted ECMP or not */ + do_wt_ecmp = bgp_path_info_mpath_chkwtd(bgp, info); + if (do_wt_ecmp) + cum_bw = bgp_path_info_mpath_cumbw(info); + for (mpinfo = info; mpinfo; mpinfo = bgp_path_info_mpath_next(mpinfo)) { + uint32_t nh_weight; + if (valid_nh_count >= multipath_num) break; *mpinfo_cp = *mpinfo; + nh_weight = 0; /* Get nexthop address-family */ if (p->family == AF_INET @@ -1257,6 +1293,15 @@ void bgp_zebra_announce(struct bgp_node *rn, const struct prefix *p, else continue; + /* If processing for weighted ECMP, determine the next hop's + * weight. Based on user setting, we may skip the next hop + * in some situations. + */ + if (do_wt_ecmp) { + if (!bgp_zebra_use_nhop_weighted(bgp, mpinfo->attr, + cum_bw, &nh_weight)) + continue; + } api_nh = &api.nexthops[valid_nh_count]; if (nh_family == AF_INET) { if (bgp_debug_zebra(&api.prefix)) { @@ -1356,6 +1401,8 @@ void bgp_zebra_announce(struct bgp_node *rn, const struct prefix *p, } memcpy(&api_nh->rmac, &(mpinfo->attr->rmac), sizeof(struct ethaddr)); + api_nh->weight = nh_weight; + valid_nh_count++; } @@ -1435,9 +1482,10 @@ void bgp_zebra_announce(struct bgp_node *rn, const struct prefix *p, snprintf(eth_buf, sizeof(eth_buf), " RMAC %s", prefix_mac2str(&api_nh->rmac, buf1, sizeof(buf1))); - zlog_debug(" nhop [%d]: %s if %u VRF %u %s %s", + zlog_debug(" nhop [%d]: %s if %u VRF %u wt %u %s %s", i + 1, nh_buf, api_nh->ifindex, - api_nh->vrf_id, label_buf, eth_buf); + api_nh->vrf_id, api_nh->weight, + label_buf, eth_buf); } } @@ -1605,7 +1653,7 @@ int bgp_redistribute_set(struct bgp *bgp, afi_t afi, int type, if (vrf_bitmap_check(zclient->redist[afi][type], bgp->vrf_id)) return CMD_WARNING; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (EVPN_ENABLED(bgp) && type == ZEBRA_ROUTE_VNC_DIRECT) { vnc_export_bgp_enable( bgp, afi); /* only enables if mode bits cfg'd */ @@ -1766,7 +1814,7 @@ int bgp_redistribute_unset(struct bgp *bgp, afi_t afi, int type, * they operate within bgpd irrespective of zebra connection * status. red lookup fails if there is no zebra connection. */ -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (EVPN_ENABLED(bgp) && type == ZEBRA_ROUTE_VNC_DIRECT) { vnc_export_bgp_disable(bgp, afi); } diff --git a/bgpd/bgp_zebra.h b/bgpd/bgp_zebra.h index e546cd5da7..a069d01503 100644 --- a/bgpd/bgp_zebra.h +++ b/bgpd/bgp_zebra.h @@ -23,6 +23,9 @@ #include "vxlan.h" +/* Default weight for next hop, if doing weighted ECMP. */ +#define BGP_ZEBRA_DEFAULT_NHOP_WEIGHT 1 + extern void bgp_zebra_init(struct thread_master *master, unsigned short instance); extern void bgp_zebra_init_tm_connect(struct bgp *bgp); diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index 6534ac1900..5d28b138d6 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -65,7 +65,7 @@ #include "bgpd/bgp_nexthop.h" #include "bgpd/bgp_damp.h" #include "bgpd/bgp_mplsvpn.h" -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/bgp_rfapi_cfg.h" #include "bgpd/rfapi/rfapi_backend.h" #endif @@ -2970,13 +2970,14 @@ static struct bgp *bgp_create(as_t *as, const char *name, bgp->rib_stale_time = BGP_DEFAULT_RIB_STALE_TIME; bgp->dynamic_neighbors_limit = BGP_DYNAMIC_NEIGHBORS_LIMIT_DEFAULT; bgp->dynamic_neighbors_count = 0; - bgp->ebgp_requires_policy = DEFAULT_EBGP_POLICY_DISABLED; - bgp->reject_as_sets = BGP_REJECT_AS_SETS_DISABLED; + bgp->lb_ref_bw = BGP_LINK_BW_REF_BW; + bgp->lb_handling = BGP_LINK_BW_ECMP; + bgp->reject_as_sets = false; bgp_addpath_init_bgp_data(&bgp->tx_addpath); bgp->as = *as; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (inst_type != BGP_INSTANCE_TYPE_VRF) { bgp->rfapi = bgp_rfapi_new(bgp); assert(bgp->rfapi); @@ -3375,7 +3376,7 @@ int bgp_delete(struct bgp *bgp) /* TODO - Other memory may need to be freed - e.g., NHT */ -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC rfapi_delete(bgp); #endif bgp_cleanup_routes(bgp); @@ -5800,7 +5801,7 @@ static void peer_distribute_update(struct access_list *access) } } } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC vnc_prefix_list_update(bgp); #endif } @@ -6171,8 +6172,7 @@ static void peer_aslist_update(const char *aslist_name) static void peer_aslist_add(char *aslist_name) { peer_aslist_update(aslist_name); - route_map_notify_dependencies((char *)aslist_name, - RMAP_EVENT_ASLIST_ADDED); + route_map_notify_dependencies(aslist_name, RMAP_EVENT_ASLIST_ADDED); } static void peer_aslist_del(const char *aslist_name) @@ -7036,7 +7036,7 @@ void bgp_init(unsigned short instance) /* Init zebra. */ bgp_zebra_init(bm->master, instance); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC vnc_zebra_init(bm->master); #endif @@ -7051,7 +7051,7 @@ void bgp_init(unsigned short instance) bgp_route_map_init(); bgp_scan_vty_init(); bgp_mplsvpn_init(); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC rfapi_init(); #endif bgp_ethernetvpn_init(); diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index 2b67a39efd..4a5772a53b 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -284,6 +284,20 @@ enum global_gr_command { #define BGP_GR_SUCCESS 0 #define BGP_GR_FAILURE 1 +/* Handling of BGP link bandwidth (LB) on receiver - whether and how to + * do weighted ECMP. Note: This applies after multipath computation. + */ +enum bgp_link_bw_handling { + /* Do ECMP if some paths don't have LB - default */ + BGP_LINK_BW_ECMP, + /* Completely ignore LB, just do regular ECMP */ + BGP_LINK_BW_IGNORE_BW, + /* Skip paths without LB, do wECMP on others */ + BGP_LINK_BW_SKIP_MISSING, + /* Do wECMP with default weight for paths not having LB */ + BGP_LINK_BW_DEFWT_4_MISSING +}; + /* BGP instance structure. */ struct bgp { /* AS number of this BGP instance. */ @@ -395,6 +409,14 @@ struct bgp { #define BGP_UPDATE_DELAY_MIN 0 #define BGP_UPDATE_DELAY_MAX 3600 + /* Reference bandwidth for BGP link-bandwidth. Used when + * the LB value has to be computed based on some other + * factor (e.g., number of multipaths for the prefix) + * Value is in Mbps + */ + uint32_t lb_ref_bw; +#define BGP_LINK_BW_REF_BW 1 + /* BGP flags. */ uint32_t flags; #define BGP_FLAG_ALWAYS_COMPARE_MED (1 << 0) @@ -424,6 +446,7 @@ struct bgp { #define BGP_FLAG_DELETE_IN_PROGRESS (1 << 22) #define BGP_FLAG_SELECT_DEFER_DISABLE (1 << 23) #define BGP_FLAG_GR_DISABLE_EOR (1 << 24) +#define BGP_FLAG_EBGP_REQUIRES_POLICY (1 << 25) enum global_mode GLOBAL_GR_FSM[BGP_GLOBAL_GR_MODE] [BGP_GLOBAL_GR_EVENT_CMD]; @@ -571,17 +594,10 @@ struct bgp { /* EVPN enable - advertise local VNIs and their MACs etc. */ int advertise_all_vni; - /* RFC 8212 - prevent route leaks. */ - int ebgp_requires_policy; -#define DEFAULT_EBGP_POLICY_DISABLED 0 -#define DEFAULT_EBGP_POLICY_ENABLED 1 - /* draft-ietf-idr-deprecate-as-set-confed-set * Reject aspaths with AS_SET and/or AS_CONFED_SET. */ bool reject_as_sets; -#define BGP_REJECT_AS_SETS_DISABLED 0 -#define BGP_REJECT_AS_SETS_ENABLED 1 struct bgp_evpn_info *evpn_info; @@ -650,6 +666,9 @@ struct bgp { /* Count of peers in established state */ uint32_t established_peers; + /* Weighted ECMP related config. */ + enum bgp_link_bw_handling lb_handling; + QOBJ_FIELDS }; DECLARE_QOBJ_TYPE(bgp) diff --git a/bgpd/rfapi/bgp_rfapi_cfg.c b/bgpd/rfapi/bgp_rfapi_cfg.c index acfab53d2b..dd21a83913 100644 --- a/bgpd/rfapi/bgp_rfapi_cfg.c +++ b/bgpd/rfapi/bgp_rfapi_cfg.c @@ -2965,10 +2965,18 @@ DEFUN_NOSH (exit_vnc, } static struct cmd_node bgp_vnc_defaults_node = { - BGP_VNC_DEFAULTS_NODE, "%s(config-router-vnc-defaults)# ", 1}; + .name = "bgp vnc defaults", + .node = BGP_VNC_DEFAULTS_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-vnc-defaults)# ", +}; static struct cmd_node bgp_vnc_nve_group_node = { - BGP_VNC_NVE_GROUP_NODE, "%s(config-router-vnc-nve-group)# ", 1}; + .name = "bgp vnc nve", + .node = BGP_VNC_NVE_GROUP_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-vnc-nve-group)# ", +}; /*------------------------------------------------------------------------- * VNC nve-group @@ -3388,7 +3396,11 @@ DEFUN_NOSH (exit_vrf_policy, } static struct cmd_node bgp_vrf_policy_node = { - BGP_VRF_POLICY_NODE, "%s(config-router-vrf-policy)# ", 1}; + .name = "bgp vrf policy", + .node = BGP_VRF_POLICY_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-vrf-policy)# ", +}; /*------------------------------------------------------------------------- * vnc-l2-group @@ -3624,7 +3636,11 @@ DEFUN (vnc_l2_group_rt, static struct cmd_node bgp_vnc_l2_group_node = { - BGP_VNC_L2_GROUP_NODE, "%s(config-router-vnc-l2-group)# ", 1}; + .name = "bgp vnc l2", + .node = BGP_VNC_L2_GROUP_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-vnc-l2-group)# ", +}; struct rfapi_l2_group_cfg * bgp_rfapi_get_group_by_lni_label(struct bgp *bgp, uint32_t logical_net_id, @@ -3681,10 +3697,10 @@ bgp_rfapi_get_ecommunity_by_lni_label(struct bgp *bgp, uint32_t is_import, void bgp_rfapi_cfg_init(void) { - install_node(&bgp_vnc_defaults_node, NULL); - install_node(&bgp_vnc_nve_group_node, NULL); - install_node(&bgp_vrf_policy_node, NULL); - install_node(&bgp_vnc_l2_group_node, NULL); + install_node(&bgp_vnc_defaults_node); + install_node(&bgp_vnc_nve_group_node); + install_node(&bgp_vrf_policy_node); + install_node(&bgp_vnc_l2_group_node); install_default(BGP_VRF_POLICY_NODE); install_default(BGP_VNC_DEFAULTS_NODE); install_default(BGP_VNC_NVE_GROUP_NODE); diff --git a/bgpd/rfapi/bgp_rfapi_cfg.h b/bgpd/rfapi/bgp_rfapi_cfg.h index b72d38220b..f1548a6173 100644 --- a/bgpd/rfapi/bgp_rfapi_cfg.h +++ b/bgpd/rfapi/bgp_rfapi_cfg.h @@ -24,7 +24,7 @@ #include "lib/table.h" #include "lib/routemap.h" -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "rfapi.h" struct rfapi_l2_group_cfg { diff --git a/bgpd/rfapi/rfapi.c b/bgpd/rfapi/rfapi.c index 435b61edf0..a0d8995a0f 100644 --- a/bgpd/rfapi/rfapi.c +++ b/bgpd/rfapi/rfapi.c @@ -838,7 +838,7 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */ beec.val[1] = ECOMMUNITY_OPAQUE_SUBTYPE_ENCAP; beec.val[6] = ((TunnelType) >> 8) & 0xff; beec.val[7] = (TunnelType)&0xff; - ecommunity_add_val(attr.ecommunity, &beec); + ecommunity_add_val(attr.ecommunity, &beec, false, false); } /* @@ -2650,7 +2650,8 @@ int rfapi_register(void *handle, struct rfapi_ip_prefix *prefix, ecom_value.val[7] = (l2o->logical_net_id >> 0) & 0xff; rtlist = ecommunity_new(); - ecommunity_add_val(rtlist, &ecom_value); + ecommunity_add_val(rtlist, &ecom_value, + false, false); } if (l2o->tag_id) { as_t as = bgp->as; @@ -2675,7 +2676,8 @@ int rfapi_register(void *handle, struct rfapi_ip_prefix *prefix, ecom_value.val[7] = val & 0xff; if (rtlist == NULL) rtlist = ecommunity_new(); - ecommunity_add_val(rtlist, &ecom_value); + ecommunity_add_val(rtlist, &ecom_value, + false, false); } } diff --git a/bgpd/rfapi/rfapi.h b/bgpd/rfapi/rfapi.h index 6af2ebeeb8..beb44aa780 100644 --- a/bgpd/rfapi/rfapi.h +++ b/bgpd/rfapi/rfapi.h @@ -21,7 +21,7 @@ #ifndef _QUAGGA_BGP_RFAPI_H #define _QUAGGA_BGP_RFAPI_H -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include <stdint.h> #include <netinet/in.h> diff --git a/bgpd/rfapi/rfapi_backend.h b/bgpd/rfapi/rfapi_backend.h index 4facc31c60..4d2ae0b02f 100644 --- a/bgpd/rfapi/rfapi_backend.h +++ b/bgpd/rfapi/rfapi_backend.h @@ -21,7 +21,7 @@ #ifndef _QUAGGA_BGP_RFAPI_BACKEND_H #define _QUAGGA_BGP_RFAPI_BACKEND_H -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "bgpd/bgp_route.h" #include "bgpd/bgp_nexthop.h" diff --git a/bgpd/rfapi/rfapi_import.c b/bgpd/rfapi/rfapi_import.c index d058fe3b28..41e6a64a64 100644 --- a/bgpd/rfapi/rfapi_import.c +++ b/bgpd/rfapi/rfapi_import.c @@ -573,7 +573,7 @@ struct rfapi_import_table *rfapiMacImportTableGet(struct bgp *bgp, uint32_t lni) eval.val[7] = (lni >> 0) & 0xff; enew = ecommunity_new(); - ecommunity_add_val(enew, &eval); + ecommunity_add_val(enew, &eval, false, false); it->rt_import_list = enew; for (afi = AFI_IP; afi < AFI_MAX; ++afi) { @@ -2239,7 +2239,7 @@ static struct bgp_path_info *rfapiItBiIndexSearch( bpi_fake.peer = peer; bpi_fake.extra = &bpi_extra; - bpi_fake.extra->vnc.import.rd = *(struct prefix_rd *)prd; + bpi_fake.extra->vnc.import.rd = *prd; if (aux_prefix) { bpi_fake.extra->vnc.import.aux_prefix = *aux_prefix; } else { diff --git a/bgpd/rfapi/rfapi_monitor.c b/bgpd/rfapi/rfapi_monitor.c index 0b8dfc3554..cd26892b84 100644 --- a/bgpd/rfapi/rfapi_monitor.c +++ b/bgpd/rfapi/rfapi_monitor.c @@ -870,10 +870,9 @@ void rfapiMonitorItNodeChanged( if ((sl = RFAPI_MONITOR_ETH(rn))) { for (cursor = NULL, - rc = skiplist_next(sl, NULL, (void **)&m, - (void **)&cursor); + rc = skiplist_next(sl, NULL, (void **)&m, &cursor); !rc; rc = skiplist_next(sl, NULL, (void **)&m, - (void **)&cursor)) { + &cursor)) { if (skiplist_search(nves_seen, m->rfd, NULL)) { /* diff --git a/bgpd/rfapi/rfapi_rib.c b/bgpd/rfapi/rfapi_rib.c index 04a538dc63..95b8582b95 100644 --- a/bgpd/rfapi/rfapi_rib.c +++ b/bgpd/rfapi/rfapi_rib.c @@ -268,8 +268,8 @@ static void rfapi_info_free(struct rfapi_info *goner) if (goner->timer) { struct rfapi_rib_tcb *tcb; - tcb = ((struct thread *)goner->timer)->arg; - thread_cancel((struct thread *)goner->timer); + tcb = goner->timer->arg; + thread_cancel(goner->timer); XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb); goner->timer = NULL; } diff --git a/bgpd/rfapi/rfapi_vty.c b/bgpd/rfapi/rfapi_vty.c index 5a84d14bd9..7a42e5aed9 100644 --- a/bgpd/rfapi/rfapi_vty.c +++ b/bgpd/rfapi/rfapi_vty.c @@ -25,7 +25,6 @@ #include "lib/memory.h" #include "lib/routemap.h" #include "lib/log.h" -#include "lib/log_int.h" #include "lib/linklist.h" #include "lib/command.h" @@ -371,7 +370,7 @@ int rfapiStream2Vty(void *stream, /* input */ *fp = (int (*)(void *, const char *, ...))rfapiDebugPrintf; *outstream = NULL; *vty_newline = str_vty_newline(*vty); - return (vzlog_test(LOG_DEBUG)); + return 1; } if (((uintptr_t)stream == (uintptr_t)1) @@ -3428,7 +3427,7 @@ static void clear_vnc_nve_closer(struct rfapi_local_reg_delete_arg *cda) &cursor)) { if (pValue->rfd) { - ((struct rfapi_descriptor *)pValue->rfd)->flags |= + pValue->rfd->flags |= RFAPI_HD_FLAG_CLOSING_ADMINISTRATIVELY; rfapi_close(pValue->rfd); } diff --git a/bgpd/rfapi/vnc_debug.c b/bgpd/rfapi/vnc_debug.c index 2c5e188328..5c627efbee 100644 --- a/bgpd/rfapi/vnc_debug.c +++ b/bgpd/rfapi/vnc_debug.c @@ -173,11 +173,17 @@ static int bgp_vnc_config_write_debug(struct vty *vty) return write; } -static struct cmd_node debug_node = {DEBUG_VNC_NODE, "", 1}; +static int bgp_vnc_config_write_debug(struct vty *vty); +static struct cmd_node debug_node = { + .name = "vnc debug", + .node = DEBUG_VNC_NODE, + .prompt = "", + .config_write = bgp_vnc_config_write_debug, +}; void vnc_debug_init(void) { - install_node(&debug_node, bgp_vnc_config_write_debug); + install_node(&debug_node); install_element(ENABLE_NODE, &show_debugging_bgp_vnc_cmd); install_element(ENABLE_NODE, &debug_bgp_vnc_cmd); diff --git a/bgpd/rfapi/vnc_debug.h b/bgpd/rfapi/vnc_debug.h index dd49383072..c472b6366e 100644 --- a/bgpd/rfapi/vnc_debug.h +++ b/bgpd/rfapi/vnc_debug.h @@ -20,7 +20,7 @@ #ifndef _QUAGGA_BGP_VNC_DEBUG_H #define _QUAGGA_BGP_VNC_DEBUG_H -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC /* * debug state storage diff --git a/bgpd/rfapi/vnc_export_bgp.c b/bgpd/rfapi/vnc_export_bgp.c index a7aa4c66fa..bd3395b49f 100644 --- a/bgpd/rfapi/vnc_export_bgp.c +++ b/bgpd/rfapi/vnc_export_bgp.c @@ -532,7 +532,7 @@ static struct ecommunity *vnc_route_origin_ecom(struct agg_node *rn) &bpi->attr->mp_nexthop_global_in.s_addr, 4); roec.val[6] = 0; roec.val[7] = 0; - ecommunity_add_val(new, &roec); + ecommunity_add_val(new, &roec, false, false); break; case AF_INET6: /* No support for IPv6 addresses in extended communities @@ -563,7 +563,7 @@ static struct ecommunity *vnc_route_origin_ecom_single(struct in_addr *origin) new = ecommunity_new(); assert(new); - ecommunity_add_val(new, &roec); + ecommunity_add_val(new, &roec, false, false); if (!new->size) { ecommunity_free(&new); diff --git a/bgpd/rfapi/vnc_import_bgp.c b/bgpd/rfapi/vnc_import_bgp.c index 915dfaabf2..ac5beed0e3 100644 --- a/bgpd/rfapi/vnc_import_bgp.c +++ b/bgpd/rfapi/vnc_import_bgp.c @@ -415,7 +415,7 @@ static int process_unicast_route(struct bgp *bgp, /* in */ localadmin = htons(hc->resolve_nve_roo_local_admin); memcpy(vnc_gateway_magic.val + 6, (char *)&localadmin, 2); - ecommunity_add_val(*ecom, &vnc_gateway_magic); + ecommunity_add_val(*ecom, &vnc_gateway_magic, false, false); } return 0; diff --git a/bgpd/valgrind.supp b/bgpd/valgrind.supp index 7a25c88363..ed236a6dc5 100644 --- a/bgpd/valgrind.supp +++ b/bgpd/valgrind.supp @@ -7,3 +7,11 @@ fun:ly_load_plugins_dir fun:ly_load_plugins } +{ + <zlog_keep_working_at_exit> + Memcheck:Leak + match-leak-kinds: reachable + fun:calloc + fun:qcalloc + fun:zlog_target_clone +} diff --git a/configure.ac b/configure.ac index fe389ebb35..6f7ca9defd 100755 --- a/configure.ac +++ b/configure.ac @@ -295,6 +295,7 @@ AC_C_FLAG([-Wmissing-declarations]) AC_C_FLAG([-Wpointer-arith]) AC_C_FLAG([-Wbad-function-cast]) AC_C_FLAG([-Wwrite-strings]) +AC_C_FLAG([-Wundef]) if test "$enable_gcc_ultra_verbose" = "yes" ; then AC_C_FLAG([-Wcast-qual]) AC_C_FLAG([-Wstrict-prototypes]) @@ -859,7 +860,7 @@ fi AC_SUBST([EXTRAVERSION]) if test "$with_pkg_git_version" = "yes"; then - if test -d "${srcdir}/.git"; then + if test -e "${srcdir}/.git"; then AC_DEFINE([GIT_VERSION], [1], [include git version info]) else with_pkg_git_version="no" AC_MSG_WARN([--with-pkg-git-version given, but this is not a git checkout]) @@ -958,7 +959,7 @@ int main(int argc, char **argv) { AC_CHECK_HEADERS([pthread_np.h],,, [ #include <pthread.h> ]) -AC_CHECK_FUNCS([pthread_setname_np pthread_set_name_np]) +AC_CHECK_FUNCS([pthread_setname_np pthread_set_name_np pthread_getthreadid_np]) needsync=true @@ -1201,7 +1202,11 @@ dnl other functions dnl --------------- AC_CHECK_FUNCS([ \ strlcat strlcpy \ - getgrouplist]) + getgrouplist \ + openat \ + unlinkat \ + posix_fallocate \ + ]) dnl ########################################################################## dnl LARGE if block spans a lot of "configure"! @@ -2196,22 +2201,12 @@ if test "$enable_backtrace" != "no" ; then fi if test "$backtrace_ok" = "no"; then - case "$host_os" in - sunos* | solaris2*) - AC_CHECK_FUNCS([printstack], [ - AC_DEFINE([HAVE_PRINTSTACK], [1], [Solaris printstack]) + AC_CHECK_HEADER([execinfo.h], [ + AC_SEARCH_LIBS([backtrace], [execinfo], [ + AC_DEFINE([HAVE_GLIBC_BACKTRACE], [1], [Glibc backtrace]) backtrace_ok=yes - ]) - ;; - esac - if test "$backtrace_ok" = "no"; then - AC_CHECK_HEADER([execinfo.h], [ - AC_SEARCH_LIBS([backtrace], [execinfo], [ - AC_DEFINE([HAVE_GLIBC_BACKTRACE], [1], [Glibc backtrace]) - backtrace_ok=yes - ],, [-lm]) - ]) - fi + ],, [-lm]) + ]) fi if test "$enable_backtrace" = "yes" -a "$backtrace_ok" = "no"; then diff --git a/debian/README.Debian b/debian/README.Debian index cbd70f82f6..01b9213ae4 100644 --- a/debian/README.Debian +++ b/debian/README.Debian @@ -52,31 +52,6 @@ used. This option should only be used for systems that do not have systemd, e.g. Ubuntu 14.04. -* Why has SNMP support been disabled? -===================================== -FRR used to link against the NetSNMP libraries to provide SNMP -support. Those libraries sadly link against the OpenSSL libraries -to provide crypto support for SNMPv3 among others. -OpenSSL now is not compatible with the GNU GENERAL PUBLIC LICENSE (GPL) -licence that FRR is distributed under. For more explanation read: - http://www.gnome.org/~markmc/openssl-and-the-gpl.html - http://www.gnu.org/licenses/gpl-faq.html#GPLIncompatibleLibs -Updating the licence to explicitly allow linking against OpenSSL -would requite the affirmation of all people that ever contributed -a significant part to Zebra / Quagga or FRR and thus are the collective -"copyright holder". That's too much work. Using a shrinked down -version of NetSNMP without OpenSSL or convincing the NetSNMP people -to change to GnuTLS are maybe good solutions but not reachable -during the last days before the Sarge release :-( - - *BUT* - -It is allowed by the used licence mix that you fetch the sources and -build FRR yourself with SNMP with - # apt-get -b source -Ppkg.frr.snmp frr -Just distributing it in binary form, linked against OpenSSL, is forbidden. - - * Debian Policy compliance notes ================================ diff --git a/debian/control b/debian/control index ab2df20432..f4275471d5 100644 --- a/debian/control +++ b/debian/control @@ -31,10 +31,10 @@ Build-Depends: python3-sphinx, python3-pytest <!nocheck>, texinfo (>= 4.7) -Standards-Version: 4.2.1 +Standards-Version: 4.4.1 Homepage: https://www.frrouting.org/ -Vcs-Browser: https://github.com/FRRouting/frr/ -Vcs-Git: https://github.com/FRRouting/frr.git +Vcs-Browser: https://github.com/FRRouting/frr/tree/debian/master +Vcs-Git: https://github.com/FRRouting/frr.git -b debian/master Package: frr Architecture: linux-any @@ -104,6 +104,7 @@ Build-Profiles: <!pkg.frr.nortrlib> Package: frr-doc Section: doc Architecture: all +Multi-Arch: foreign Depends: ${misc:Depends}, libjs-jquery, diff --git a/debian/frr.install b/debian/frr.install index 5917c0da84..e2485fe8b8 100644 --- a/debian/frr.install +++ b/debian/frr.install @@ -9,6 +9,7 @@ usr/lib/frr/*d usr/lib/frr/watchfrr usr/lib/frr/zebra usr/lib/*/frr/modules/zebra_cumulus_mlag.so +usr/lib/*/frr/modules/dplane_fpm_nl.so usr/lib/*/frr/modules/zebra_irdp.so usr/lib/*/frr/modules/zebra_fpm.so usr/lib/*/frr/modules/bgpd_bmp.so diff --git a/debian/frr.lintian-overrides b/debian/frr.lintian-overrides index a3e6fcdc25..616f265e01 100644 --- a/debian/frr.lintian-overrides +++ b/debian/frr.lintian-overrides @@ -6,5 +6,8 @@ frr binary: spelling-error-in-binary usr/lib/frr/zebra writen written frr binary: spelling-error-in-binary usr/lib/frr/pimd writen written frr binary: spelling-error-in-binary usr/lib/frr/pimd iif if +# prefixed man pages for off-PATH daemons +manpage-without-executable + # personal name spelling-error-in-copyright Ang And diff --git a/doc/developer/fpm.rst b/doc/developer/fpm.rst new file mode 100644 index 0000000000..9849869133 --- /dev/null +++ b/doc/developer/fpm.rst @@ -0,0 +1,103 @@ +FPM +=== + +FPM stands for Forwarding Plane Manager and it's a module for use with Zebra. + +The encapsulation header for the messages exchanged with the FPM is +defined by the file :file:`fpm/fpm.h` in the frr tree. The routes +themselves are encoded in Netlink or protobuf format, with Netlink +being the default. + +Netlink is standard format for encoding messages to talk with kernel space +in Linux and it is also the name of the socket type used by it. +The FPM netlink usage differs from Linux's in: + +- Linux netlink sockets use datagrams in a multicast fashion, FPM uses + as a stream and it is unicast. +- FPM netlink messages might have more or less information than a normal + Linux netlink socket message (example: RTM_NEWROUTE might add an extra + route attribute to signalize VxLAN encapsulation). + +Protobuf is one of a number of new serialization formats wherein the +message schema is expressed in a purpose-built language. Code for +encoding/decoding to/from the wire format is generated from the +schema. Protobuf messages can be extended easily while maintaining +backward-compatibility with older code. Protobuf has the following +advantages over Netlink: + +- Code for serialization/deserialization is generated automatically. This + reduces the likelihood of bugs, allows third-party programs to be integrated + quickly, and makes it easy to add fields. +- The message format is not tied to an OS (Linux), and can be evolved + independently. + +.. note:: + + Currently there are two FPM modules in ``zebra``: + + * ``fpm`` + * ``dplane_fpm_nl`` + +fpm +^^^ + +The first FPM implementation that was built using hooks in ``zebra`` route +handling functions. It uses its own netlink/protobuf encoding functions to +translate ``zebra`` route data structures into formatted binary data. + + +dplane_fpm_nl +^^^^^^^^^^^^^ + +The newer FPM implementation that was built using ``zebra``'s data plane +framework as a plugin. It only supports netlink and it shares ``zebra``'s +netlink functions to translate route event snapshots into formatted binary +data. + + +Protocol Specification +---------------------- + +FPM (in any mode) uses a TCP connection to talk with external applications. +It operates as TCP client and uses the CLI configured address/port to connect +to the FPM server (defaults to port ``2620``). + +FPM frames all data with a header to help the external reader figure how +many bytes it has to read in order to read the full message (this helps +simulates datagrams like in the original netlink Linux kernel usage). + +Frame header: + +:: + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +---------------+---------------+-------------------------------+ + | Version | Message type | Message length | + +---------------+---------------+-------------------------------+ + | Data... | + +---------------------------------------------------------------+ + + +Version +^^^^^^^ + +Currently there is only one version, so it should be always ``1``. + + +Message Type +^^^^^^^^^^^^ + +Defines what underlining protocol we are using: netlink (``1``) or protobuf (``2``). + + +Message Length +^^^^^^^^^^^^^^ + +Amount of data in this frame in network byte order. + + +Data +^^^^ + +The netlink or protobuf message payload. diff --git a/doc/developer/index.rst b/doc/developer/index.rst index 3a33d9a5ec..26b590c876 100644 --- a/doc/developer/index.rst +++ b/doc/developer/index.rst @@ -11,6 +11,7 @@ FRRouting Developer's Guide library testing bgpd + fpm ospf zebra vtysh diff --git a/doc/developer/logging.rst b/doc/developer/logging.rst index db577c9216..0430ad72a3 100644 --- a/doc/developer/logging.rst +++ b/doc/developer/logging.rst @@ -1,7 +1,7 @@ .. _logging: -Developer's Guide to Logging -============================ +Logging +======= One of the most frequent decisions to make while writing code for FRR is what to log, what level to log it at, and when to log it. Here is a list of @@ -116,8 +116,11 @@ AS-Safety while AS-Safe) * extensions are only AS-Safe if their printer is AS-Safe +Log levels +---------- + Errors and warnings -------------------- +^^^^^^^^^^^^^^^^^^^ If it is something that the user will want to look at and maybe do something, it is either an **error** or a **warning**. @@ -163,7 +166,7 @@ Examples for errors: Informational messages ----------------------- +^^^^^^^^^^^^^^^^^^^^^^ Anything that provides introspection to the user during normal operation is an **info** message. @@ -202,7 +205,7 @@ Examples: Debug messages and asserts --------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^ Everything that is only interesting on-demand, or only while developing, is a **debug** message. It might be interesting to the user for a @@ -239,3 +242,180 @@ Examples: * some field that is absolutely needed is :code:`NULL` * any other kind of data structure corruption that will cause the daemon to crash sooner or later, one way or another + +Thread-local buffering +---------------------- + +The core logging code in :file:`lib/zlog.c` allows setting up per-thread log +message buffers in order to improve logging performance. The following rules +apply for this buffering: + +* Only messages of priority *DEBUG* or *INFO* are buffered. +* Any higher-priority message causes the thread's entire buffer to be flushed, + thus message ordering is preserved on a per-thread level. +* There is no guarantee on ordering between different threads; in most cases + this is arbitrary to begin with since the threads essentially race each + other in printing log messages. If an order is established with some + synchronization primitive, add calls to :c:func:`zlog_tls_buffer_flush()`. +* The buffers are only ever accessed by the thread they are created by. This + means no locking is necessary. + +Both the main/default thread and additional threads created by +:c:func:`frr_pthread_new()` with the default :c:func:`frr_run()` handler will +initialize thread-local buffering and call :c:func:`zlog_tls_buffer_flush()` +when idle. + +If some piece of code runs for an extended period, it may be useful to insert +calls to :c:func:`zlog_tls_buffer_flush()` in appropriate places: + +.. c:function:: void zlog_tls_buffer_flush(void) + + Write out any pending log messages that the calling thread may have in its + buffer. This function is safe to call regardless of the per-thread log + buffer being set up / in use or not. + +When working with threads that do not use the :c:type:`struct thread_master` +event loop, per-thread buffers can be managed with: + +.. c:function:: void zlog_tls_buffer_init(void) + + Set up thread-local buffering for log messages. This function may be + called repeatedly without adverse effects, but remember to call + :c:func:`zlog_tls_buffer_fini()` at thread exit. + + .. warning:: + + If this function is called, but :c:func:`zlog_tls_buffer_flush()` is + not used, log message output will lag behind since messages will only be + written out when the buffer is full. + + Exiting the thread without calling :c:func:`zlog_tls_buffer_fini()` + will cause buffered log messages to be lost. + +.. c:function:: void zlog_tls_buffer_fini(void) + + Flush pending messages and tear down thread-local log message buffering. + This function may be called repeatedly regardless of whether + :c:func:`zlog_tls_buffer_init()` was ever called. + +Log targets +----------- + +The actual logging subsystem (in :file:`lib/zlog.c`) is heavily separated +from the actual log writers. It uses an atomic linked-list (`zlog_targets`) +with RCU to maintain the log targets to be called. This list is intended to +function as "backend" only, it **is not used for configuration**. + +Logging targets provide their configuration layer on top of this and maintain +their own capability to enumerate and store their configuration. Some targets +(e.g. syslog) are inherently single instance and just stuff their config in +global variables. Others (e.g. file/fd output) are multi-instance capable. +There is another layer boundary here between these and the VTY configuration +that they use. + +Basic internals +^^^^^^^^^^^^^^^ + +.. c:type:: struct zlog_target + + This struct needs to be filled in by any log target and then passed to + :c:func:`zlog_target_replace()`. After it has been registered, + **RCU semantics apply**. Most changes to associated data should make a + copy, change that, and then replace the entire struct. + + Additional per-target data should be "appended" by embedding this struct + into a larger one, for use with `containerof()`, and + :c:func:`zlog_target_clone()` and :c:func:`zlog_target_free()` should be + used to allocate/free the entire container struct. + + Do not use this structure to maintain configuration. It should only + contain (a copy of) the data needed to perform the actual logging. For + example, the syslog target uses this: + + .. code-block:: c + + struct zlt_syslog { + struct zlog_target zt; + int syslog_facility; + }; + + static void zlog_syslog(struct zlog_target *zt, struct zlog_msg *msgs[], size_t nmsgs) + { + struct zlt_syslog *zte = container_of(zt, struct zlt_syslog, zt); + size_t i; + + for (i = 0; i < nmsgs; i++) + if (zlog_msg_prio(msgs[i]) <= zt->prio_min) + syslog(zlog_msg_prio(msgs[i]) | zte->syslog_facility, "%s", + zlog_msg_text(msgs[i], NULL)); + } + + +.. c:function:: struct zlog_target *zlog_target_clone(struct memtype *mt, struct zlog_target *oldzt, size_t size) + + Allocates a logging target struct. Note that the ``oldzt`` argument may be + ``NULL`` to allocate a "from scratch". If ``oldzt`` is not ``NULL``, the + generic bits in :c:type:`struct zlog_target` are copied. **Target specific + bits are not copied.** + +.. c:function:: struct zlog_target *zlog_target_replace(struct zlog_target *oldzt, struct zlog_target *newzt) + + Adds, replaces or deletes a logging target (either ``oldzt`` or ``newzt`` may be ``NULL``.) + + Returns ``oldzt`` for freeing. The target remains possibly in use by + other threads until the RCU cycle ends. This implies you cannot release + resources (e.g. memory, file descriptors) immediately. + + The replace operation is not atomic; for a brief period it is possible that + messages are delivered on both ``oldzt`` and ``newzt``. + + .. warning:: + + ``oldzt`` must remain **functional** until the RCU cycle ends. + +.. c:function:: void zlog_target_free(struct memtype *mt, struct zlog_target *zt) + + Counterpart to :c:func:`zlog_target_clone()`, frees a target (using RCU.) + +.. c:member:: void (*zlog_target.logfn)(struct zlog_target *zt, struct zlog_msg *msgs[], size_t nmsg) + + Called on a target to deliver "normal" logging messages. ``msgs`` is an + array of opaque structs containing the actual message. Use ``zlog_msg_*`` + functions to access message data (this is done to allow some optimizations, + e.g. lazy formatting the message text and timestamp as needed.) + + .. note:: + + ``logfn()`` must check each individual message's priority value against + the configured ``prio_min``. While the ``prio_min`` field is common to + all targets and used by the core logging code to early-drop unneeded log + messages, the array is **not** filtered for each ``logfn()`` call. + +.. c:member:: void (*zlog_target.logfn_sigsafe)(struct zlog_target *zt, const char *text, size_t len) + + Called to deliver "exception" logging messages (i.e. SEGV messages.) + Must be Async-Signal-Safe (may not allocate memory or call "complicated" + libc functions.) May be ``NULL`` if the log target cannot handle this. + +Standard targets +^^^^^^^^^^^^^^^^ + +:file:`lib/zlog_targets.c` provides the standard file / fd / syslog targets. +The syslog target is single-instance while file / fd targets can be +instantiated as needed. There are 3 built-in targets that are fully +autonomous without any config: + +- startup logging to `stderr`, until either :c:func:`zlog_startup_end()` or + :c:func:`zlog_aux_init()` is called. +- stdout logging for non-daemon programs using :c:func:`zlog_aux_init()` +- crashlogs written to :file:`/var/tmp/frr.daemon.crashlog` + +The regular CLI/command-line logging setup is handled by :file:`lib/log_vty.c` +which makes the appropriate instantiations of syslog / file / fd targets. + +.. todo:: + + :c:func:`zlog_startup_end()` should do an explicit switchover from + startup stderr logging to configured logging. Currently, configured logging + starts in parallel as soon as the respective setup is executed. This results + in some duplicate logging. diff --git a/doc/developer/ospf-sr.rst b/doc/developer/ospf-sr.rst index d798ba78ef..070465db5b 100644 --- a/doc/developer/ospf-sr.rst +++ b/doc/developer/ospf-sr.rst @@ -22,7 +22,7 @@ Interoperability ---------------- * Tested on various topology including point-to-point and LAN interfaces - in a mix of Free Range Routing instance and Cisco IOS-XR 6.0.x + in a mix of FRRouting instance and Cisco IOS-XR 6.0.x * Check OSPF LSA conformity with latest wireshark release 2.5.0-rc Implementation details diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst index 33ebe06d2f..7e627781e0 100644 --- a/doc/developer/topotests.rst +++ b/doc/developer/topotests.rst @@ -360,6 +360,7 @@ This is the recommended test writing routine: - Write a topology (Graphviz recommended) - Obtain configuration files - Write the test itself +- Format the new code using `black <https://github.com/psf/black>`_ - Create a Pull Request Topotest File Hierarchy @@ -760,6 +761,8 @@ Requirements: inside folders named after the equipment. - Tests must be able to run without any interaction. To make sure your test conforms with this, run it without the :option:`-s` parameter. +- Use `black <https://github.com/psf/black>`_ code formatter before creating + a pull request. This ensures we have a unified code style. Tips: diff --git a/doc/developer/workflow.rst b/doc/developer/workflow.rst index 8ce3bdeeb2..6885a41e0f 100644 --- a/doc/developer/workflow.rst +++ b/doc/developer/workflow.rst @@ -203,7 +203,6 @@ Submitting Patches and Enhancements FRR accepts patches from two sources: -- Email (git format-patch) - GitHub pull request Contributors are highly encouraged to use GitHub's fork-and-PR workflow. It is @@ -228,29 +227,6 @@ summary of the included patches. The description should provide additional details that will help the reviewer to understand the context of the included patches. -Patch Submission via Mailing List ---------------------------------- - -As an alternative submission method, a patch can be mailed to the -development mailing list. Patches received on the mailing list will be -picked up by Patchwork and tested against the latest development branch. - -The recommended way to send the patch (or series of NN patches) to the -list is by using ``git send-email`` as follows (assuming they are the N -most recent commit(s) in your git history):: - - git send-email -NN --annotate --to=dev@lists.frrouting.org - -If your commits do not already contain a ``Signed-off-by`` line, then -use the following command to add it (after making sure you agree to the -Developer Certificate of Origin as outlined above):: - - git send-email -NN --annotate --signoff --to=dev@lists.frrouting.org - -Submitting multi-commit patches as a GitHub pull request is **strongly -encouraged** and increases the probability of your patch getting reviewed and -merged in a timely manner. - .. _license-for-contributions: License for Contributions @@ -377,6 +353,14 @@ After Submitting Your Changes - An author must never delete or manually dismiss someone else's comments or review. (A review may be overridden by agreement in the weekly technical meeting.) + - When you have addressed someone's review comments, please click the + "re-request review" button (in the top-right corner of the PR page, next + to the reviewer's name, an icon that looks like "reload") + - The responsibility for keeping a PR moving rests with the author at + least as long as there are either negative CI results or negative review + comments. If you forget to mark a review comment as addressed (by + clicking re-request review), the reviewer may very well not notice and + won't come back to your PR. - Automatically generated comments, e.g., those generated by CI systems, may be deleted by authors and others when such comments are not the most recent results from that automated comment source. @@ -459,6 +443,24 @@ Guidelines for code review code change is large enough/significant enough to warrant such a requirement. +For project members with merge permissions, the following patterns have +emerged: + +- a PR with any reviews requesting changes may not be merged. + +- a PR with any negative CI result may not be merged. + +- an open "yellow" review mark ("review requested, but not done") should be + given some time (a few days up to weeks, depending on the size of the PR), + but is not a merge blocker. + +- a "textbubble" review mark ("review comments, but not positive/negative") + should be read through but is not a merge blocker. + +- non-trivial PRs are generally given some time (again depending on the size) + for people to mark an interest in reviewing. Trivial PRs may be merged + immediately when CI is green. + Coding Practices & Style ======================== @@ -539,6 +541,28 @@ your new claim at the end of the list. * ... */ +Defensive coding requirements +----------------------------- + +In general, code submitted into FRR will be rejected if it uses unsafe +programming practices. While there is no enforced overall ruleset, the +following requirements have achieved consensus: + +- ``strcpy``, ``strcat`` and ``sprintf`` are inacceptable without exception. + Use ``strlcpy``, ``strlcat`` and ``snprintf`` instead. (Rationale: even if + you know the operation cannot overflow the buffer, a future code change may + inadvertedly introduce an overflow.) + +- buffer size arguments, particularly to ``strlcpy`` and ``snprintf``, must + use ``sizeof()`` whereever possible. Particularly, do not use a size + constant in these cases. (Rationale: changing a buffer to another size + constant may leave the write operations on a now-incorrect size limit.) + +Other than these specific rules, coding practices from the Linux kernel as +well as CERT or MISRA C guidelines may provide useful input on safe C code. +However, these rules are not applied as-is; some of them expressly collide +with established practice. + Code Formatting --------------- @@ -992,6 +1016,11 @@ Miscellaneous When in doubt, follow the guidelines in the Linux kernel style guide, or ask on the development mailing list / public Slack instance. +JSON Output +^^^^^^^^^^^ + +All JSON keys are to be camelCased, with no spaces. + .. _documentation: diff --git a/doc/developer/zebra.rst b/doc/developer/zebra.rst index e3526d1843..e2f887ef28 100644 --- a/doc/developer/zebra.rst +++ b/doc/developer/zebra.rst @@ -9,13 +9,20 @@ Zebra Overview of the Zebra Protocol ============================== -The Zebra protocol is used by protocol daemons to communicate with the -**zebra** daemon. - -Each protocol daemon may request and send information to and from the **zebra** -daemon such as interface states, routing state, nexthop-validation, and so on. -Protocol daemons may also install routes with **zebra**. The **zebra** daemon -manages which routes are installed into the forwarding table with the kernel. +The Zebra protocol (or ``ZAPI``) is used by protocol daemons to +communicate with the **zebra** daemon. + +Each protocol daemon may request and send information to and from the +**zebra** daemon such as interface states, routing state, +nexthop-validation, and so on. Protocol daemons may also install +routes with **zebra**. The **zebra** daemon manages which routes are +installed into the forwarding table with the kernel. Some daemons use +more than one ZAPI connection. This is supported: each ZAPI session is +identified by a tuple of: ``{protocol, instance, session_id}``. LDPD +is an example: it uses a second, synchronous ZAPI session to manage +label blocks. The default value for ``session_id`` is zero; daemons +who use multiple ZAPI sessions must assign unique values to the +sessions' ids. The Zebra protocol is a streaming protocol, with a common header. Version 0 lacks a version field and is implicitly versioned. Version 1 and all subsequent diff --git a/doc/user/basic.rst b/doc/user/basic.rst index edcfce45ad..5b7786de18 100644 --- a/doc/user/basic.rst +++ b/doc/user/basic.rst @@ -86,6 +86,15 @@ Basic Config Commands debugging. Note that the existing code logs its most important messages with severity ``errors``. + .. warning:: + + FRRouting uses the ``writev()`` system call to write log messages. This + call is supposed to be atomic, but in reality this does not hold for + pipes or terminals, only regular files. This means that in rare cases, + concurrent log messages from distinct threads may get jumbled in + terminal output. Use a log file and ``tail -f`` if this rare chance is + inacceptable to your setup. + .. index:: single: no log file [FILENAME [LEVEL]] single: log file FILENAME [LEVEL] @@ -104,14 +113,6 @@ Basic Config Commands deprecated ``log trap`` command) will be used. The ``no`` form of the command disables logging to a file. - .. note:: - - If you do not configure any file logging, and a daemon crashes due to a - signal or an assertion failure, it will attempt to save the crash - information in a file named :file:`/var/tmp/frr.<daemon name>.crashlog`. - For security reasons, this will not happen if the file exists already, so - it is important to delete the file after reporting the crash information. - .. index:: single: no log syslog [LEVEL] single: log syslog [LEVEL] diff --git a/doc/user/bfd.rst b/doc/user/bfd.rst index e6a3c4977a..32397d1303 100644 --- a/doc/user/bfd.rst +++ b/doc/user/bfd.rst @@ -476,13 +476,36 @@ You can also clear packet counters per session with the following commands, only Session down events: 0 Zebra notifications: 4 -Logging / debugging -=================== +Debugging +========= -There are no fine grained debug controls for bfdd. Just enable debug logs. +By default only informational, warning and errors messages are going to be +displayed. If you want to get debug messages and other diagnostics then make +sure you have `debugging` level enabled: :: config log file /var/log/frr/frr.log debugging log syslog debugging + +You may also fine tune the debug messages by selecting one or more of the +debug levels: + +.. index:: [no] debug bfd network +.. clicmd:: [no] debug bfd network + + Toggle network events: show messages about socket failures and unexpected + BFD messages that may not belong to registered peers. + +.. index:: [no] debug bfd peer +.. clicmd:: [no] debug bfd peer + + Toggle peer event log messages: show messages about peer creation/removal + and state changes. + +.. index:: [no] debug bfd zebra +.. clicmd:: [no] debug bfd zebra + + Toggle zebra message events: show messages about interfaces, local + addresses, VRF and daemon peer registrations. diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst index 85ccc277a8..eb718007e8 100644 --- a/doc/user/bgp.rst +++ b/doc/user/bgp.rst @@ -414,7 +414,11 @@ Require policy on EBGP .. index:: [no] bgp ebgp-requires-policy .. clicmd:: [no] bgp ebgp-requires-policy - This command requires incoming and outgoing filters to be applied for eBGP sessions. Without the incoming filter, no routes will be accepted. Without the outgoing filter, no routes will be announced. + This command requires incoming and outgoing filters to be applied + for eBGP sessions. Without the incoming filter, no routes will be + accepted. Without the outgoing filter, no routes will be announced. + + This is enabled by default. Reject routes with AS_SET or AS_CONFED_SET types ------------------------------------------------ @@ -1997,6 +2001,18 @@ BGP Extended Communities in Route Map This command set Site of Origin value. +.. index:: set extcommunity bandwidth <(1-25600) | cumulative | num-multipaths> [non-transitive] +.. clicmd:: set extcommunity bandwidth <(1-25600) | cumulative | num-multipaths> [non-transitive] + + This command sets the BGP link-bandwidth extended community for the prefix + (best path) for which it is applied. The link-bandwidth can be specified as + an ``explicit value`` (specified in Mbps), or the router can be told to use + the ``cumulative bandwidth`` of all multipaths for the prefix or to compute + it based on the ``number of multipaths``. The link bandwidth extended + community is encoded as ``transitive`` unless the set command explicitly + configures it as ``non-transitive``. + +.. seealso:: :ref:`wecmp_linkbw` Note that the extended expanded community is only used for `match` rule, not for `set` actions. @@ -2634,7 +2650,14 @@ structure is extended with :clicmd:`show bgp [afi] [safi]`. These commands display BGP routes for the specific routing table indicated by the selected afi and the selected safi. If no afi and no safi value is given, - the command falls back to the default IPv6 routing table + the command falls back to the default IPv6 routing table. + For EVPN prefixes, you can display the full BGP table for this AFI/SAFI + using the standard `show bgp [afi] [safi]` syntax. + +.. index:: show bgp l2vpn evpn route [type <macip|2|multicast|3|es|4|prefix|5>] +.. clicmd:: show bgp l2vpn evpn route [type <macip|2|multicast|3|es|4|prefix|5>] + + Additionally, you can also filter this output by route type. .. index:: show bgp [afi] [safi] summary .. clicmd:: show bgp [afi] [safi] summary @@ -2665,6 +2688,16 @@ structure is extended with :clicmd:`show bgp [afi] [safi]`. Display flap statistics of routes of the selected afi and safi selected. +.. index:: show bgp [afi] [safi] statistics +.. clicmd:: show bgp [afi] [safi] statistics + + Display statistics of routes of the selected afi and safi. + +.. index:: show bgp statistics-all +.. clicmd:: show bgp statistics-all + + Display statistics of routes of all the afi and safi. + .. _bgp-display-routes-by-community: Displaying Routes by Community Attribute @@ -3152,6 +3185,8 @@ Example of how to set up a 6-Bone connection. .. include:: rpki.rst +.. include:: wecmp_linkbw.rst + .. include:: flowspec.rst .. [#med-transitivity-rant] For some set of objects to have an order, there *must* be some binary ordering relation that is defined for *every* combination of those objects, and that relation *must* be transitive. I.e.:, if the relation operator is <, and if a < b and b < c then that relation must carry over and it *must* be that a < c for the objects to have an order. The ordering relation may allow for equality, i.e. a < b and b < a may both be true and imply that a and b are equal in the order and not distinguished by it, in which case the set has a partial order. Otherwise, if there is an order, all the objects have a distinct place in the order and the set has a total order) diff --git a/doc/user/conf.py b/doc/user/conf.py index 5582847431..d8a188b152 100644 --- a/doc/user/conf.py +++ b/doc/user/conf.py @@ -132,7 +132,8 @@ language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build', 'rpki.rst', 'routeserver.rst', - 'ospf_fundamentals.rst', 'flowspec.rst', 'snmptrap.rst'] + 'ospf_fundamentals.rst', 'flowspec.rst', 'snmptrap.rst', + 'wecmp_linkbw.rst'] # The reST default role (used for this markup: `text`) to use for all # documents. diff --git a/doc/user/ipv6.rst b/doc/user/ipv6.rst index f3f064b850..8af54ee23d 100644 --- a/doc/user/ipv6.rst +++ b/doc/user/ipv6.rst @@ -91,6 +91,17 @@ Router Advertisement Default: enabled .. index:: + single: ipv6 nd ra-hop-limit (0-255) + single: no ipv6 nd ra-hop-limit [(0-255)] +.. clicmd:: [no] ipv6 nd ra-hop-limit [(0-255)] + + The value to be placed in the hop count field of router advertisements sent + from the interface, in hops. Indicates the maximum diameter of the network. + Setting the value to zero indicates that the value is unspecified by this + router. Must be between zero or 255 hops. + Default: ``64`` + +.. index:: single: ipv6 nd ra-lifetime (0-9000) single: no ipv6 nd ra-lifetime [(0-9000)] .. clicmd:: [no] ipv6 nd ra-lifetime [(0-9000)] diff --git a/doc/user/isisd.rst b/doc/user/isisd.rst index 6684a83e1f..9a0a0afb0c 100644 --- a/doc/user/isisd.rst +++ b/doc/user/isisd.rst @@ -111,6 +111,12 @@ writing, *isisd* does not support multiple ISIS processes. Enable or disable :rfc:`6232` purge originator identification. +.. index:: [no] lsp-mtu (128-4352) +.. clicmd:: [no] lsp-mtu (128-4352) + + Configure the maximum size of generated LSPs, in bytes. + + .. _isis-timer: ISIS Timer diff --git a/doc/user/overview.rst b/doc/user/overview.rst index c9934d1c68..cf8cc44097 100644 --- a/doc/user/overview.rst +++ b/doc/user/overview.rst @@ -300,6 +300,8 @@ BGP :t:`The Generalized TTL Security Mechanism (GTSM). V. Gill, J. Heasley, D. Meyer, P. Savola, C. Pingnataro. October 2007.` - :rfc:`5575` :t:`Dissemination of Flow Specification Rules. P. Marques, N. Sheth, R. Raszuk, B. Greene, J. Mauch, D. McPherson. August 2009` +- :rfc:`6286` + :t:`Autonomous-System-Wide Unique BGP Identifier for BGP-4. E. Chen, J. Yuan, June 2011.` - :rfc:`6608` :t:`Subcodes for BGP Finite State Machine Error. J. Dong, M. Chen, Huawei Technologies, A. Suryanarayana, Cisco Systems. May 2012.` - :rfc:`6810` diff --git a/doc/user/pim.rst b/doc/user/pim.rst index 2aa66d9dd9..2944e0b705 100644 --- a/doc/user/pim.rst +++ b/doc/user/pim.rst @@ -166,6 +166,11 @@ Certain signals have special meanings to *pimd*. urib-only Lookup in the Unicast Rib only. +.. index:: no ip msdp mesh-group [WORD] +.. clicmd:: no ip msdp mesh-group [WORD] + + Delete multicast source discovery protocol mesh-group + .. index:: ip igmp generate-query-once [version (2-3)] .. clicmd:: ip igmp generate-query-once [version (2-3)] diff --git a/doc/user/setup.rst b/doc/user/setup.rst index 6d61a970d2..f60a66b9fd 100644 --- a/doc/user/setup.rst +++ b/doc/user/setup.rst @@ -6,6 +6,22 @@ Basic Setup After installing FRR, some basic configuration must be completed before it is ready to use. +Crash logs +---------- + +If any daemon should crash for some reason (segmentation fault, assertion +failure, etc.), it will attempt to write a backtrace to a file located in +:file:`/var/tmp/frr/<daemon>[-<instance>].<pid>/crashlog`. This feature is +not affected by any configuration options. + +The crashlog file's directory also contains files corresponding to per-thread +message buffers in files named +:file:`/var/tmp/frr/<daemon>[-<instance>].<pid>/logbuf.<tid>`. In case of a +crash, these may contain unwritten buffered log messages. To show the contents +of these buffers, pipe their contents through ``tr '\0' '\n'``. A blank line +marks the end of valid unwritten data (it will generally be followed by +garbled, older log messages since the buffer is not cleared.) + Daemons Configuration File -------------------------- After a fresh install, starting FRR will do nothing. This is because daemons diff --git a/doc/user/subdir.am b/doc/user/subdir.am index ce519fbfbf..0b64232f3d 100644 --- a/doc/user/subdir.am +++ b/doc/user/subdir.am @@ -44,6 +44,7 @@ user_RSTFILES = \ doc/user/bfd.rst \ doc/user/flowspec.rst \ doc/user/watchfrr.rst \ + doc/user/wecmp_linkbw.rst \ # end EXTRA_DIST += \ diff --git a/doc/user/wecmp_linkbw.rst b/doc/user/wecmp_linkbw.rst new file mode 100644 index 0000000000..0d2fe9d756 --- /dev/null +++ b/doc/user/wecmp_linkbw.rst @@ -0,0 +1,298 @@ +.. _wecmp_linkbw: + +Weighted ECMP using BGP link bandwidth +====================================== + +.. _features-of-wecmp-linkbw: + +Overview +-------- + +In normal equal cost multipath (ECMP), the route to a destination has +multiple next hops and traffic is expected to be equally distributed +across these next hops. In practice, flow-based hashing is used so that +all traffic associated with a particular flow uses the same next hop, +and by extension, the same path across the network. + +Weigted ECMP using BGP link bandwidth introduces support for network-wide +unequal cost multipathing (UCMP) to an IP destination. The unequal cost +load balancing is implemented by the forwarding plane based on the weights +associated with the next hops of the IP prefix. These weights are computed +based on the bandwidths of the corresponding multipaths which are encoded +in the ``BGP link bandwidth extended community`` as specified in +[Draft-IETF-idr-link-bandwidth]_. Exchange of an appropriate BGP link +bandwidth value for a prefix across the network results in network-wide +unequal cost multipathing. + +One of the primary use cases of this capability is in the data center when +a service (represented by its anycast IP) has an unequal set of resources +across the regions (e.g., PODs) of the data center and the network itself +provides the load balancing function instead of an external load balancer. +Refer to [Draft-IETF-mohanty-bess-ebgp-dmz]_ and :rfc:`7938` for details +on this use case. This use case is applicable in a pure L3 network as +well as in a EVPN network. + +The traditional use case for BGP link bandwidth to load balance traffic +to the exit routers in the AS based on the bandwidth of their external +eBGP peering links is also supported. + + +Design Principles +----------------- + +Next hop weight computation and usage +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +As described, in UCMP, there is a weight associated with each next hop of an +IP prefix, and traffic is expected to be distributed across the next hops in +proportion to their weight. The weight of a next hop is a simple factoring +of the bandwidth of the corresponding path against the total bandwidth of +all multipaths, mapped to the range 1 to 100. What happens if not all the +paths in the multipath set have link bandwidth associated with them? In such +a case, in adherence to [Draft-IETF-idr-link-bandwidth]_, the behavior +reverts to standard ECMP among all the multipaths, with the link bandwidth +being effectively ignored. + +Note that there is no change to either the BGP best path selection algorithm +or to the multipath computation algorithm; the mapping of link bandwidth to +weight happens at the time of installation of the route in the RIB. + +If data forwarding is implemented by means of the Linux kernel, the next hop’s +weight is used in the hash calculation. The kernel uses the Hash threshold +algorithm and use of the next hop weight is built into it; next hops need +not be expanded to achieve UCMP. UCMP for IPv4 is available in older Linux +kernels too, while UCMP for IPv6 is available from the 4.16 kernel onwards. + +If data forwarding is realized in hardware, common implementations expand +the next hops (i.e., they are repeated) in the ECMP container in proportion +to their weight. For example, if the weights associated with 3 next hops for +a particular route are 50, 25 and 25 and the ECMP container has a size of 16 +next hops, the first next hop will be repeated 8 times and the other 2 next +hops repeated 4 times each. Other implementations are also possible. + +Unequal cost multipath across a network +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For the use cases listed above, it is not sufficient to support UCMP on just +one router (e.g., egress router), or individually, on multiple routers; UCMP +must be deployed across the entire network. This is achieved by employing the +BGP link-bandwidth extended community. + +At the router which originates the BGP link bandwidth, there has to be user +configuration to trigger it, which is described below. Receiving routers +would use the received link bandwidth from their downstream routers to +determine the next hop weight as described in the earlier section. Further, +if the received link bandwidth is a transitive attribute, it would be +propagated to eBGP peers, with the additional change that if the next hop +is set to oneself, the cumulative link bandwidth of all downstream paths +is propagated to other routers. In this manner, the entire network will +know how to distribute traffic to an anycast service across the network. + +The BGP link-bandwidth extended community is encoded in bytes-per-second. +In the use case where UCMP must be based on the number of paths, a reference +bandwidth of 1 Mbps is used. So, for example, if there are 4 equal cost paths +to an anycast IP, the encoded bandwidth in the extended community will be +500,000. The actual value itself doesn’t matter as long as all routers +originating the link-bandwidth are doing it in the same way. + + +Configuration Guide +------------------- + +The configuration for weighted ECMP using BGP link bandwidth requires +one essential step - using a route-map to inject the link bandwidth +extended community. An additional option is provided to control the +processing of received link bandwidth. + +Injecting link bandwidth into the network +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +At the "entry point" router that is injecting the prefix to which weighted +load balancing must be performed, a route-map must be configured to +attach the link bandwidth extended community. + +For the use case of providing weighted load balancing for an anycast service, +this configuration will typically need to be applied at the TOR or Leaf +router that is connected to servers which provide the anycast service and +the bandwidth would be based on the number of multipaths for the destination. + +For the use case of load balancing to the exit router, the exit router should +be configured with the route map specifying the a bandwidth value that +corresponds to the bandwidth of the link connecting to its eBGP peer in the +adjoining AS. In addition, the link bandwidth extended community must be +explicitly configured to be non-transitive. + +The complete syntax of the route-map set command can be found at +:ref:`bgp-extended-communities-in-route-map` + +This route-map is supported only at two attachment points: +(a) the outbound route-map attached to a peer or peer-group, per address-family +(b) the EVPN advertise route-map used to inject IPv4 or IPv6 unicast routes +into EVPN as type-5 routes. + +Since the link bandwidth origination is done by using a route-map, it can +be constrained to certain prefixes (e.g., only for anycast services) or it +can be generated for all prefixes. Further, when the route-map is used in +the neighbor context, the link bandwidth usage can be constrained to certain +peers only. + +A sample configuration is shown below and illustrates link bandwidth +advertisement towards the "SPINE" peer-group for anycast IPs in the +range 192.168.x.x + +.. code-block:: frr + + ip prefix-list anycast_ip seq 10 permit 192.168.0.0/16 le 32 + route-map anycast_ip permit 10 + match ip address prefix-list anycast_ip + set extcommunity bandwidth num-multipaths + route-map anycast_ip permit 20 + ! + router bgp 65001 + neighbor SPINE peer-group + neighbor SPINE remote-as external + neighbor 172.16.35.1 peer-group SPINE + neighbor 172.16.36.1 peer-group SPINE + ! + address-family ipv4 unicast + network 110.0.0.1/32 + network 192.168.44.1/32 + neighbor SPINE route-map anycast_ip out + exit-address-family + ! + + +Controlling link bandwidth processing on the receiver +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There is no configuration necessary to process received link bandwidth and +translate it into the weight associated with the corresponding next hop; +that happens by default. If some of the multipaths do not have the link +bandwidth extended community, the default behavior is to revert to normal +ECMP as recommended in [Draft-IETF-idr-link-bandwidth]_. + +The operator can change these behaviors with the following configuration: + +.. index:: bgp bestpath bandwidth <ignore | skip-missing | default-weight-for-missing> +.. clicmd:: bgp bestpath bandwidth <ignore | skip-missing | default-weight-for-missing> + +The different options imply behavior as follows: + +- ignore: Ignore link bandwidth completely for route installation + (i.e., do regular ECMP, not weighted) +- skip-missing: Skip paths without link bandwidth and do UCMP among + the others (if at least some paths have link-bandwidth) +- default-weight-for-missing: Assign a low default weight (value 1) + to paths not having link bandwidth + +This configuration is per BGP instance similar to other BGP route-selection +controls; it operates on both IPv4-unicast and IPv6-unicast routes in that +instance. In an EVPN network, this configuration (if required) should be +implemented in the tenant VRF and is again applicable for IPv4-unicast and +IPv6-unicast, including the ones sourced from EVPN type-5 routes. + +A sample snippet of FRR configuration on a receiver to skip paths without +link bandwidth and do weighted ECMP among the other paths (if some of them +have link bandwidth) is as shown below. + +.. code-block:: frr + + router bgp 65021 + bgp bestpath as-path multipath-relax + bgp bestpath bandwidth skip-missing + neighbor LEAF peer-group + neighbor LEAF remote-as external + neighbor 172.16.35.2 peer-group LEAF + neighbor 172.16.36.2 peer-group LEAF + ! + address-family ipv4 unicast + network 130.0.0.1/32 + exit-address-family + ! + + +Stopping the propagation of the link bandwidth outside a domain +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The link bandwidth extended community will get automatically propagated +with the prefix to EBGP peers, if it is encoded as a transitive attribute +by the originator. If this propagation has to be stopped outside of a +particular domain (e.g., stopped from being propagated to routers outside +of the data center core network), the mechanism available is to disable +the advertisement of all BGP extended communities on the specific peering/s. +In other words, the propagation cannot be blocked just for the link bandwidth +extended community. The configuration to disable all extended communities +can be applied to a peer or peer-group (per address-family). + +Of course, the other common way to stop the propagation of the link bandwidth +outside the domain is to block the prefixes themselves from being advertised +and possibly, announce only an aggregate route. This would be quite common +in a EVPN network. + +BGP link bandwidth and UCMP monitoring & troubleshooting +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Existing operational commands to display the BGP routing table for a specific +prefix will show the link bandwidth extended community also, if present. + +An example of an IPv4-unicast route received with the link bandwidth +attribute from two peers is shown below: + +.. code-block:: frr + + CLI# show bgp ipv4 unicast 192.168.10.1/32 + BGP routing table entry for 192.168.10.1/32 + Paths: (2 available, best #2, table default) + Advertised to non peer-group peers: + l1(swp1) l2(swp2) l3(swp3) l4(swp4) + 65002 + fe80::202:ff:fe00:1b from l2(swp2) (110.0.0.2) + (fe80::202:ff:fe00:1b) (used) + Origin IGP, metric 0, valid, external, multipath, bestpath-from-AS 65002 + Extended Community: LB:65002:125000000 (1000.000 Mbps) + Last update: Thu Feb 20 18:34:16 2020 + + 65001 + fe80::202:ff:fe00:15 from l1(swp1) (110.0.0.1) + (fe80::202:ff:fe00:15) (used) + Origin IGP, metric 0, valid, external, multipath, bestpath-from-AS 65001, best (Older Path) + Extended Community: LB:65001:62500000 (500.000 Mbps) + Last update: Thu Feb 20 18:22:34 2020 + +The weights associated with the next hops of a route can be seen by querying +the RIB for a specific route. + +For example, the next hop weights corresponding to the link bandwidths in the +above example is illustrated below: + +.. code-block:: frr + + spine1# show ip route 192.168.10.1/32 + Routing entry for 192.168.10.1/32 + Known via "bgp", distance 20, metric 0, best + Last update 00:00:32 ago + * fe80::202:ff:fe00:1b, via swp2, weight 66 + * fe80::202:ff:fe00:15, via swp1, weight 33 + +For troubleshooting, existing debug logs ``debug bgp updates``, +``debug bgp bestpath <prefix>``, ``debug bgp zebra`` and +``debug zebra kernel`` can be used. + +A debug log snippet when ``debug bgp zebra`` is enabled and a route is +installed by BGP in the RIB with next hop weights is shown below: + +.. code-block:: frr + + 2020-02-29T06:26:19.927754+00:00 leaf1 bgpd[5459]: bgp_zebra_announce: p=192.168.150.1/32, bgp_is_valid_label: 0 + 2020-02-29T06:26:19.928096+00:00 leaf1 bgpd[5459]: Tx route add VRF 33 192.168.150.1/32 metric 0 tag 0 count 2 + 2020-02-29T06:26:19.928289+00:00 leaf1 bgpd[5459]: nhop [1]: 110.0.0.6 if 35 VRF 33 wt 50 RMAC 0a:11:2f:7d:35:20 + 2020-02-29T06:26:19.928479+00:00 leaf1 bgpd[5459]: nhop [2]: 110.0.0.5 if 35 VRF 33 wt 50 RMAC 32:1e:32:a3:6c:bf + 2020-02-29T06:26:19.928668+00:00 leaf1 bgpd[5459]: bgp_zebra_announce: 192.168.150.1/32: announcing to zebra (recursion NOT set) + + +References +---------- + +.. [Draft-IETF-idr-link-bandwidth] <https://tools.ietf.org/html/draft-ietf-idr-link-bandwidth> +.. [Draft-IETF-mohanty-bess-ebgp-dmz] <https://tools.ietf.org/html/draft-mohanty-bess-ebgp-dmz> + diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst index 520080e83a..3629b47877 100644 --- a/doc/user/zebra.rst +++ b/doc/user/zebra.rst @@ -736,43 +736,30 @@ these cases, the FIB needs to be maintained reliably in the fast path as well. We refer to the component that programs the forwarding plane (directly or indirectly) as the Forwarding Plane Manager or FPM. -The FIB push interface comprises of a TCP connection between zebra and -the FPM. The connection is initiated by zebra -- that is, the FPM acts -as the TCP server. - .. program:: configure The relevant zebra code kicks in when zebra is configured with the -:option:`--enable-fpm` flag. Zebra periodically attempts to connect to -the well-known FPM port. Once the connection is up, zebra starts -sending messages containing routes over the socket to the FPM. Zebra -sends a complete copy of the forwarding table to the FPM, including -routes that it may have picked up from the kernel. The existing -interaction of zebra with the kernel remains unchanged -- that is, the -kernel continues to receive FIB updates as before. - -The encapsulation header for the messages exchanged with the FPM is -defined by the file :file:`fpm/fpm.h` in the frr tree. The routes -themselves are encoded in Netlink or protobuf format, with Netlink -being the default. - -Protobuf is one of a number of new serialization formats wherein the -message schema is expressed in a purpose-built language. Code for -encoding/decoding to/from the wire format is generated from the -schema. Protobuf messages can be extended easily while maintaining -backward-compatibility with older code. Protobuf has the following -advantages over Netlink: - -- Code for serialization/deserialization is generated automatically. This - reduces the likelihood of bugs, allows third-party programs to be integrated - quickly, and makes it easy to add fields. -- The message format is not tied to an OS (Linux), and can be evolved - independently. - -As mentioned before, zebra encodes routes sent to the FPM in Netlink -format by default. The format can be controlled via the FPM module's -load-time option to zebra, which currently takes the values `Netlink` -and `protobuf`. +:option:`--enable-fpm` flag and started with the module (``-M fpm`` +or ``-M dplane_fpm_nl``). + +.. note:: + + The ``fpm`` implementation attempts to connect to ``127.0.0.1`` port ``2620`` + by default without configurations. The ``dplane_fpm_nl`` only attempts to + connect to a server if configured. + +Zebra periodically attempts to connect to the well-known FPM port (``2620``). +Once the connection is up, zebra starts sending messages containing routes +over the socket to the FPM. Zebra sends a complete copy of the forwarding +table to the FPM, including routes that it may have picked up from the kernel. +The existing interaction of zebra with the kernel remains unchanged -- that +is, the kernel continues to receive FIB updates as before. + +The default FPM message format is netlink, however it can be controlled +with the module load-time option. The modules accept the following options: + +- ``fpm``: ``netlink`` and ``protobuf``. +- ``dplane_fpm_nl``: none, it only implements netlink. The zebra FPM interface uses replace semantics. That is, if a 'route add' message for a prefix is followed by another 'route add' message, @@ -782,6 +769,119 @@ replaces the information sent in the first message. If the connection to the FPM goes down for some reason, zebra sends the FPM a complete copy of the forwarding table(s) when it reconnects. +For more details on the implementation, please read the developer's manual FPM +section. + +FPM Commands +============ + +``fpm`` implementation +---------------------- + +.. index:: fpm connection ip A.B.C.D port (1-65535) +.. clicmd:: fpm connection ip A.B.C.D port (1-65535) + + Configure ``zebra`` to connect to a different FPM server than + ``127.0.0.1`` port ``2620``. + + +.. index:: no fpm connection ip A.B.C.D port (1-65535) +.. clicmd:: no fpm connection ip A.B.C.D port (1-65535) + + Configure ``zebra`` to connect to the default FPM server at ``127.0.0.1`` + port ``2620``. + + +.. index:: show zebra fpm stats +.. clicmd:: show zebra fpm stats + + Shows the FPM statistics. + + Sample output: + + :: + + Counter Total Last 10 secs + + connect_calls 3 2 + connect_no_sock 0 0 + read_cb_calls 2 2 + write_cb_calls 2 0 + write_calls 1 0 + partial_writes 0 0 + max_writes_hit 0 0 + t_write_yields 0 0 + nop_deletes_skipped 6 0 + route_adds 5 0 + route_dels 0 0 + updates_triggered 11 0 + redundant_triggers 0 0 + dests_del_after_update 0 0 + t_conn_down_starts 0 0 + t_conn_down_dests_processed 0 0 + t_conn_down_yields 0 0 + t_conn_down_finishes 0 0 + t_conn_up_starts 1 0 + t_conn_up_dests_processed 11 0 + t_conn_up_yields 0 0 + t_conn_up_aborts 0 0 + t_conn_up_finishes 1 0 + + +.. index:: clear zebra fpm stats +.. clicmd:: clear zebra fpm stats + + Resets all FPM counters. + + +``dplane_fpm_nl`` implementation +-------------------------------- + +.. index:: fpm address <A.B.C.D|X:X::X:X> [port (1-65535)] +.. clicmd:: fpm address <A.B.C.D|X:X::X:X> [port (1-65535)] + + Configures the FPM server address. Once configured ``zebra`` will attempt + to connect to it immediately. + + +.. index:: no fpm address [<A.B.C.D|X:X::X:X> [port (1-65535)]] +.. clicmd:: no fpm address [<A.B.C.D|X:X::X:X> [port (1-65535)]] + + Disables FPM entirely. ``zebra`` will close any current connections and + will not attempt to connect to it anymore. + + +.. index:: show fpm counters [json] +.. clicmd:: show fpm counters [json] + + Show the FPM statistics (plain text or JSON formatted). + + Sample output: + + :: + + FPM counters + ============ + Input bytes: 0 + Output bytes: 308 + Output buffer current size: 0 + Output buffer peak size: 308 + Connection closes: 0 + Connection errors: 0 + Data plane items processed: 0 + Data plane items enqueued: 0 + Data plane items queue peak: 0 + Buffer full hits: 0 + User FPM configurations: 1 + User FPM disable requests: 0 + + +.. index:: clear fpm counters +.. clicmd:: clear fpm counters + + Resets all FPM counters. + + .. _zebra-dplane: Dataplane Commands diff --git a/eigrpd/eigrp_cli.c b/eigrpd/eigrp_cli.c index a93d4c8280..c76e067685 100644 --- a/eigrpd/eigrp_cli.c +++ b/eigrpd/eigrp_cli.c @@ -838,7 +838,14 @@ void eigrp_cli_show_keychain(struct vty *vty, struct lyd_node *dnode, /* * CLI installation procedures. */ -static struct cmd_node eigrp_node = {EIGRP_NODE, "%s(config-router)# ", 1}; +static int eigrp_config_write(struct vty *vty); +static struct cmd_node eigrp_node = { + .name = "eigrp", + .node = EIGRP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", + .config_write = eigrp_config_write, +}; static int eigrp_config_write(struct vty *vty) { @@ -854,8 +861,14 @@ static int eigrp_config_write(struct vty *vty) return written; } -static struct cmd_node eigrp_interface_node = {INTERFACE_NODE, - "%s(config-if)# ", 1}; +static int eigrp_write_interface(struct vty *vty); +static struct cmd_node eigrp_interface_node = { + .name = "interface", + .node = INTERFACE_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-if)# ", + .config_write = eigrp_write_interface, +}; static int eigrp_write_interface(struct vty *vty) @@ -888,7 +901,7 @@ eigrp_cli_init(void) install_element(CONFIG_NODE, &router_eigrp_cmd); install_element(CONFIG_NODE, &no_router_eigrp_cmd); - install_node(&eigrp_node, eigrp_config_write); + install_node(&eigrp_node); install_default(EIGRP_NODE); install_element(EIGRP_NODE, &eigrp_router_id_cmd); @@ -906,7 +919,7 @@ eigrp_cli_init(void) install_element(EIGRP_NODE, &eigrp_neighbor_cmd); install_element(EIGRP_NODE, &eigrp_redistribute_source_metric_cmd); - install_node(&eigrp_interface_node, eigrp_write_interface); + install_node(&eigrp_interface_node); if_cmd_init(); install_element(INTERFACE_NODE, &eigrp_if_delay_cmd); diff --git a/eigrpd/eigrp_dump.c b/eigrpd/eigrp_dump.c index 7278b002d8..a3d6eb2e9e 100644 --- a/eigrpd/eigrp_dump.c +++ b/eigrpd/eigrp_dump.c @@ -555,14 +555,18 @@ DEFUN (no_debug_eigrp_packets, } /* Debug node. */ +static int config_write_debug(struct vty *vty); static struct cmd_node eigrp_debug_node = { - DEBUG_NODE, "", 1 /* VTYSH */ + .name = "debug", + .node = DEBUG_NODE, + .prompt = "", + .config_write = config_write_debug, }; /* Initialize debug commands. */ void eigrp_debug_init(void) { - install_node(&eigrp_debug_node, config_write_debug); + install_node(&eigrp_debug_node); install_element(ENABLE_NODE, &show_debugging_eigrp_cmd); install_element(ENABLE_NODE, &debug_eigrp_packets_all_cmd); diff --git a/eigrpd/eigrp_main.c b/eigrpd/eigrp_main.c index add758fa21..cdf1c6acdb 100644 --- a/eigrpd/eigrp_main.c +++ b/eigrpd/eigrp_main.c @@ -141,6 +141,7 @@ static const struct frr_yang_module_info *const eigrpd_yang_modules[] = { &frr_eigrpd_info, &frr_interface_info, &frr_route_map_info, + &frr_vrf_info, }; FRR_DAEMON_INFO(eigrpd, EIGRP, .vty_port = EIGRP_VTY_PORT, diff --git a/eigrpd/eigrp_network.c b/eigrpd/eigrp_network.c index 39008a01c4..072ff29705 100644 --- a/eigrpd/eigrp_network.c +++ b/eigrpd/eigrp_network.c @@ -218,7 +218,7 @@ int eigrp_network_set(struct eigrp *eigrp, struct prefix *p) struct route_node *rn; struct interface *ifp; - rn = route_node_get(eigrp->networks, (struct prefix *)p); + rn = route_node_get(eigrp->networks, p); if (rn->info) { /* There is already same network statement. */ route_unlock_node(rn); diff --git a/isisd/fabricd.c b/isisd/fabricd.c index 28cc65380f..4a4b25fa1d 100644 --- a/isisd/fabricd.c +++ b/isisd/fabricd.c @@ -3,7 +3,7 @@ * * Copyright (C) 2018 Christian Franke * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/isisd/fabricd.h b/isisd/fabricd.h index 6e93440f3a..9455cdb0f0 100644 --- a/isisd/fabricd.h +++ b/isisd/fabricd.h @@ -3,7 +3,7 @@ * * Copyright (C) 2018 Christian Franke * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/isisd/isis_adjacency.c b/isisd/isis_adjacency.c index 9beed206e8..4e0ee4448b 100644 --- a/isisd/isis_adjacency.c +++ b/isisd/isis_adjacency.c @@ -201,13 +201,14 @@ void isis_adj_process_threeway(struct isis_adjacency *adj, fabricd_initial_sync_hello(adj->circuit); if (next_tw_state == ISIS_THREEWAY_DOWN) { - isis_adj_state_change(adj, ISIS_ADJ_DOWN, "Neighbor restarted"); + isis_adj_state_change(&adj, ISIS_ADJ_DOWN, + "Neighbor restarted"); return; } if (next_tw_state == ISIS_THREEWAY_UP) { if (adj->adj_state != ISIS_ADJ_UP) { - isis_adj_state_change(adj, ISIS_ADJ_UP, NULL); + isis_adj_state_change(&adj, ISIS_ADJ_UP, NULL); adj->adj_usage = adj_usage; } } @@ -219,12 +220,13 @@ void isis_adj_process_threeway(struct isis_adjacency *adj, adj->threeway_state = next_tw_state; } -void isis_adj_state_change(struct isis_adjacency *adj, +void isis_adj_state_change(struct isis_adjacency **padj, enum isis_adj_state new_state, const char *reason) { + struct isis_adjacency *adj = *padj; enum isis_adj_state old_state = adj->adj_state; struct isis_circuit *circuit = adj->circuit; - bool del; + bool del = false; adj->adj_state = new_state; if (new_state != old_state) { @@ -262,7 +264,6 @@ void isis_adj_state_change(struct isis_adjacency *adj, #endif /* ifndef FABRICD */ if (circuit->circ_type == CIRCUIT_T_BROADCAST) { - del = false; for (int level = IS_LEVEL_1; level <= IS_LEVEL_2; level++) { if ((adj->level & level) == 0) continue; @@ -299,11 +300,7 @@ void isis_adj_state_change(struct isis_adjacency *adj, lsp_regenerate_schedule_pseudo(circuit, level); } - if (del) - isis_delete_adj(adj); - } else if (circuit->circ_type == CIRCUIT_T_P2P) { - del = false; for (int level = IS_LEVEL_1; level <= IS_LEVEL_2; level++) { if ((adj->level & level) == 0) continue; @@ -336,9 +333,11 @@ void isis_adj_state_change(struct isis_adjacency *adj, del = true; } } + } - if (del) - isis_delete_adj(adj); + if (del) { + isis_delete_adj(adj); + *padj = NULL; } } @@ -402,7 +401,7 @@ int isis_adj_expire(struct thread *thread) adj->t_expire = NULL; /* trigger the adj expire event */ - isis_adj_state_change(adj, ISIS_ADJ_DOWN, "holding time expired"); + isis_adj_state_change(&adj, ISIS_ADJ_DOWN, "holding time expired"); return 0; } diff --git a/isisd/isis_adjacency.h b/isisd/isis_adjacency.h index 93583fc122..8f3d63c297 100644 --- a/isisd/isis_adjacency.h +++ b/isisd/isis_adjacency.h @@ -118,7 +118,7 @@ void isis_adj_process_threeway(struct isis_adjacency *adj, struct isis_threeway_adj *tw_adj, enum isis_adj_usage adj_usage); DECLARE_HOOK(isis_adj_state_change_hook, (struct isis_adjacency *adj), (adj)) -void isis_adj_state_change(struct isis_adjacency *adj, +void isis_adj_state_change(struct isis_adjacency **adj, enum isis_adj_state state, const char *reason); void isis_adj_print(struct isis_adjacency *adj); const char *isis_adj_yang_state(enum isis_adj_state state); diff --git a/isisd/isis_bfd.c b/isisd/isis_bfd.c index 68be9c1a99..2ff5979d14 100644 --- a/isisd/isis_bfd.c +++ b/isisd/isis_bfd.c @@ -138,7 +138,7 @@ static void bfd_adj_event(struct isis_adjacency *adj, struct prefix *dst, return; } - isis_adj_state_change(adj, ISIS_ADJ_DOWN, "bfd session went down"); + isis_adj_state_change(&adj, ISIS_ADJ_DOWN, "bfd session went down"); } static int isis_bfd_interface_dest_update(ZAPI_CALLBACK_ARGS) diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c index 7d4f7b355d..4add1e6bfd 100644 --- a/isisd/isis_circuit.c +++ b/isisd/isis_circuit.c @@ -67,7 +67,6 @@ DEFINE_HOOK(isis_if_new_hook, (struct interface *ifp), (ifp)) /* * Prototypes. */ -int isis_interface_config_write(struct vty *); int isis_if_new_hook(struct interface *); int isis_if_delete_hook(struct interface *); @@ -969,7 +968,7 @@ DEFINE_HOOK(isis_circuit_config_write, (circuit, vty)) #ifdef FABRICD -int isis_interface_config_write(struct vty *vty) +static int isis_interface_config_write(struct vty *vty) { struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT); int write = 0; @@ -1192,7 +1191,7 @@ int isis_interface_config_write(struct vty *vty) return write; } #else -int isis_interface_config_write(struct vty *vty) +static int isis_interface_config_write(struct vty *vty) { struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT); int write = 0; @@ -1336,7 +1335,11 @@ ferr_r isis_circuit_passwd_hmac_md5_set(struct isis_circuit *circuit, } struct cmd_node interface_node = { - INTERFACE_NODE, "%s(config-if)# ", 1, + .name = "interface", + .node = INTERFACE_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-if)# ", + .config_write = isis_interface_config_write, }; void isis_circuit_circ_type_set(struct isis_circuit *circuit, int circ_type) @@ -1441,7 +1444,7 @@ void isis_circuit_init(void) hook_register_prio(if_del, 0, isis_if_delete_hook); /* Install interface node */ - install_node(&interface_node, isis_interface_config_write); + install_node(&interface_node); if_cmd_init(); if_zapi_callbacks(isis_ifp_create, isis_ifp_up, isis_ifp_down, isis_ifp_destroy); diff --git a/isisd/isis_cli.c b/isisd/isis_cli.c index 2483d26833..c12c7fa936 100644 --- a/isisd/isis_cli.c +++ b/isisd/isis_cli.c @@ -402,21 +402,7 @@ DEFPY(no_is_type, no_is_type_cmd, "Act as both a station router and an area router\n" "Act as an area router only\n") { - const char *value = NULL; - struct isis_area *area; - - area = nb_running_get_entry(NULL, VTY_CURR_XPATH, false); - - /* - * Put the is-type back to defaults: - * - level-1-2 on first area - * - level-1 for the rest - */ - if (area && listgetdata(listhead(isis->area_list)) == area) - value = "level-1-2"; - else - value = NULL; - nb_cli_enqueue_change(vty, "./is-type", NB_OP_MODIFY, value); + nb_cli_enqueue_change(vty, "./is-type", NB_OP_MODIFY, NULL); return nb_cli_apply_changes(vty, NULL); } @@ -527,7 +513,7 @@ DEFPY(no_metric_style, no_metric_style_cmd, "Send and accept both styles of TLVs during transition\n" "Use new style of TLVs to carry wider metric\n") { - nb_cli_enqueue_change(vty, "./metric-style", NB_OP_MODIFY, "narrow"); + nb_cli_enqueue_change(vty, "./metric-style", NB_OP_MODIFY, NULL); return nb_cli_apply_changes(vty, NULL); } @@ -640,7 +626,8 @@ void cli_show_isis_domain_pwd(struct vty *vty, struct lyd_node *dnode, } /* - * XPath: /frr-isisd:isis/instance/lsp/generation-interval + * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/generation-interval + * XPath: /frr-isisd:isis/instance/lsp/timers/level-2/generation-interval */ DEFPY(lsp_gen_interval, lsp_gen_interval_cmd, "lsp-gen-interval [level-1|level-2]$level (1-120)$val", @@ -650,11 +637,13 @@ DEFPY(lsp_gen_interval, lsp_gen_interval_cmd, "Minimum interval in seconds\n") { if (!level || strmatch(level, "level-1")) - nb_cli_enqueue_change(vty, "./lsp/generation-interval/level-1", - NB_OP_MODIFY, val_str); + nb_cli_enqueue_change( + vty, "./lsp/timers/level-1/generation-interval", + NB_OP_MODIFY, val_str); if (!level || strmatch(level, "level-2")) - nb_cli_enqueue_change(vty, "./lsp/generation-interval/level-2", - NB_OP_MODIFY, val_str); + nb_cli_enqueue_change( + vty, "./lsp/timers/level-2/generation-interval", + NB_OP_MODIFY, val_str); return nb_cli_apply_changes(vty, NULL); } @@ -668,31 +657,20 @@ DEFPY(no_lsp_gen_interval, no_lsp_gen_interval_cmd, "Minimum interval in seconds\n") { if (!level || strmatch(level, "level-1")) - nb_cli_enqueue_change(vty, "./lsp/generation-interval/level-1", - NB_OP_MODIFY, NULL); + nb_cli_enqueue_change( + vty, "./lsp/timers/level-1/generation-interval", + NB_OP_MODIFY, NULL); if (!level || strmatch(level, "level-2")) - nb_cli_enqueue_change(vty, "./lsp/generation-interval/level-2", - NB_OP_MODIFY, NULL); + nb_cli_enqueue_change( + vty, "./lsp/timers/level-2/generation-interval", + NB_OP_MODIFY, NULL); return nb_cli_apply_changes(vty, NULL); } -void cli_show_isis_lsp_gen_interval(struct vty *vty, struct lyd_node *dnode, - bool show_defaults) -{ - const char *l1 = yang_dnode_get_string(dnode, "./level-1"); - const char *l2 = yang_dnode_get_string(dnode, "./level-2"); - - if (strmatch(l1, l2)) - vty_out(vty, " lsp-gen-interval %s\n", l1); - else { - vty_out(vty, " lsp-gen-interval level-1 %s\n", l1); - vty_out(vty, " lsp-gen-interval level-2 %s\n", l2); - } -} - /* - * XPath: /frr-isisd:isis/instance/lsp/refresh-interval + * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/refresh-interval + * XPath: /frr-isisd:isis/instance/lsp/timers/level-2/refresh-interval */ DEFPY(lsp_refresh_interval, lsp_refresh_interval_cmd, "lsp-refresh-interval [level-1|level-2]$level (1-65235)$val", @@ -702,10 +680,12 @@ DEFPY(lsp_refresh_interval, lsp_refresh_interval_cmd, "LSP refresh interval in seconds\n") { if (!level || strmatch(level, "level-1")) - nb_cli_enqueue_change(vty, "./lsp/refresh-interval/level-1", + nb_cli_enqueue_change(vty, + "./lsp/timers/level-1/refresh-interval", NB_OP_MODIFY, val_str); if (!level || strmatch(level, "level-2")) - nb_cli_enqueue_change(vty, "./lsp/refresh-interval/level-2", + nb_cli_enqueue_change(vty, + "./lsp/timers/level-2/refresh-interval", NB_OP_MODIFY, val_str); return nb_cli_apply_changes(vty, NULL); @@ -720,32 +700,22 @@ DEFPY(no_lsp_refresh_interval, no_lsp_refresh_interval_cmd, "LSP refresh interval in seconds\n") { if (!level || strmatch(level, "level-1")) - nb_cli_enqueue_change(vty, "./lsp/refresh-interval/level-1", + nb_cli_enqueue_change(vty, + "./lsp/timers/level-1/refresh-interval", NB_OP_MODIFY, NULL); if (!level || strmatch(level, "level-2")) - nb_cli_enqueue_change(vty, "./lsp/refresh-interval/level-2", + nb_cli_enqueue_change(vty, + "./lsp/timers/level-2/refresh-interval", NB_OP_MODIFY, NULL); return nb_cli_apply_changes(vty, NULL); } -void cli_show_isis_lsp_ref_interval(struct vty *vty, struct lyd_node *dnode, - bool show_defaults) -{ - const char *l1 = yang_dnode_get_string(dnode, "./level-1"); - const char *l2 = yang_dnode_get_string(dnode, "./level-2"); - - if (strmatch(l1, l2)) - vty_out(vty, " lsp-refresh-interval %s\n", l1); - else { - vty_out(vty, " lsp-refresh-interval level-1 %s\n", l1); - vty_out(vty, " lsp-refresh-interval level-2 %s\n", l2); - } -} - /* - * XPath: /frr-isisd:isis/instance/lsp/maximum-lifetime + * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/maximum-lifetime + * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/maximum-lifetime */ + DEFPY(max_lsp_lifetime, max_lsp_lifetime_cmd, "max-lsp-lifetime [level-1|level-2]$level (350-65535)$val", "Maximum LSP lifetime\n" @@ -754,10 +724,12 @@ DEFPY(max_lsp_lifetime, max_lsp_lifetime_cmd, "LSP lifetime in seconds\n") { if (!level || strmatch(level, "level-1")) - nb_cli_enqueue_change(vty, "./lsp/maximum-lifetime/level-1", + nb_cli_enqueue_change(vty, + "./lsp/timers/level-1/maximum-lifetime", NB_OP_MODIFY, val_str); if (!level || strmatch(level, "level-2")) - nb_cli_enqueue_change(vty, "./lsp/maximum-lifetime/level-2", + nb_cli_enqueue_change(vty, + "./lsp/timers/level-2/maximum-lifetime", NB_OP_MODIFY, val_str); return nb_cli_apply_changes(vty, NULL); @@ -772,26 +744,125 @@ DEFPY(no_max_lsp_lifetime, no_max_lsp_lifetime_cmd, "LSP lifetime in seconds\n") { if (!level || strmatch(level, "level-1")) - nb_cli_enqueue_change(vty, "./lsp/maximum-lifetime/level-1", + nb_cli_enqueue_change(vty, + "./lsp/timers/level-1/maximum-lifetime", NB_OP_MODIFY, NULL); if (!level || strmatch(level, "level-2")) - nb_cli_enqueue_change(vty, "./lsp/maximum-lifetime/level-2", + nb_cli_enqueue_change(vty, + "./lsp/timers/level-2/maximum-lifetime", NB_OP_MODIFY, NULL); return nb_cli_apply_changes(vty, NULL); } -void cli_show_isis_lsp_max_lifetime(struct vty *vty, struct lyd_node *dnode, - bool show_defaults) +/* unified LSP timers command + * XPath: /frr-isisd:isis/instance/lsp/timers + */ + +DEFPY(lsp_timers, lsp_timers_cmd, + "lsp-timers [level-1|level-2]$level gen-interval (1-120)$gen refresh-interval (1-65235)$refresh max-lifetime (350-65535)$lifetime", + "LSP-related timers\n" + "LSP-related timers for Level 1 only\n" + "LSP-related timers for Level 2 only\n" + "Minimum interval between regenerating same LSP\n" + "Generation interval in seconds\n" + "LSP refresh interval\n" + "LSP refresh interval in seconds\n" + "Maximum LSP lifetime\n" + "Maximum LSP lifetime in seconds\n") { - const char *l1 = yang_dnode_get_string(dnode, "./level-1"); - const char *l2 = yang_dnode_get_string(dnode, "./level-2"); + if (!level || strmatch(level, "level-1")) { + nb_cli_enqueue_change( + vty, "./lsp/timers/level-1/generation-interval", + NB_OP_MODIFY, gen_str); + nb_cli_enqueue_change(vty, + "./lsp/timers/level-1/refresh-interval", + NB_OP_MODIFY, refresh_str); + nb_cli_enqueue_change(vty, + "./lsp/timers/level-1/maximum-lifetime", + NB_OP_MODIFY, lifetime_str); + } + if (!level || strmatch(level, "level-2")) { + nb_cli_enqueue_change( + vty, "./lsp/timers/level-2/generation-interval", + NB_OP_MODIFY, gen_str); + nb_cli_enqueue_change(vty, + "./lsp/timers/level-2/refresh-interval", + NB_OP_MODIFY, refresh_str); + nb_cli_enqueue_change(vty, + "./lsp/timers/level-2/maximum-lifetime", + NB_OP_MODIFY, lifetime_str); + } - if (strmatch(l1, l2)) - vty_out(vty, " max-lsp-lifetime %s\n", l1); + return nb_cli_apply_changes(vty, NULL); +} + +DEFPY(no_lsp_timers, no_lsp_timers_cmd, + "no lsp-timers [level-1|level-2]$level [gen-interval (1-120) refresh-interval (1-65235) max-lifetime (350-65535)]", + NO_STR + "LSP-related timers\n" + "LSP-related timers for Level 1 only\n" + "LSP-related timers for Level 2 only\n" + "Minimum interval between regenerating same LSP\n" + "Generation interval in seconds\n" + "LSP refresh interval\n" + "LSP refresh interval in seconds\n" + "Maximum LSP lifetime\n" + "Maximum LSP lifetime in seconds\n") +{ + if (!level || strmatch(level, "level-1")) { + nb_cli_enqueue_change( + vty, "./lsp/timers/level-1/generation-interval", + NB_OP_MODIFY, NULL); + nb_cli_enqueue_change(vty, + "./lsp/timers/level-1/refresh-interval", + NB_OP_MODIFY, NULL); + nb_cli_enqueue_change(vty, + "./lsp/timers/level-1/maximum-lifetime", + NB_OP_MODIFY, NULL); + } + if (!level || strmatch(level, "level-2")) { + nb_cli_enqueue_change( + vty, "./lsp/timers/level-2/generation-interval", + NB_OP_MODIFY, NULL); + nb_cli_enqueue_change(vty, + "./lsp/timers/level-2/refresh-interval", + NB_OP_MODIFY, NULL); + nb_cli_enqueue_change(vty, + "./lsp/timers/level-2/maximum-lifetime", + NB_OP_MODIFY, NULL); + } + + return nb_cli_apply_changes(vty, NULL); +} + +void cli_show_isis_lsp_timers(struct vty *vty, struct lyd_node *dnode, + bool show_defaults) +{ + const char *l1_refresh = + yang_dnode_get_string(dnode, "./level-1/refresh-interval"); + const char *l2_refresh = + yang_dnode_get_string(dnode, "./level-2/refresh-interval"); + const char *l1_lifetime = + yang_dnode_get_string(dnode, "./level-1/maximum-lifetime"); + const char *l2_lifetime = + yang_dnode_get_string(dnode, "./level-2/maximum-lifetime"); + const char *l1_gen = + yang_dnode_get_string(dnode, "./level-1/generation-interval"); + const char *l2_gen = + yang_dnode_get_string(dnode, "./level-2/generation-interval"); + if (strmatch(l1_refresh, l2_refresh) + && strmatch(l1_lifetime, l2_lifetime) && strmatch(l1_gen, l2_gen)) + vty_out(vty, + " lsp-timers gen-interval %s refresh-interval %s max-lifetime %s\n", + l1_gen, l1_refresh, l1_lifetime); else { - vty_out(vty, " max-lsp-lifetime level-1 %s\n", l1); - vty_out(vty, " max-lsp-lifetime level-2 %s\n", l2); + vty_out(vty, + " lsp-timers level-1 gen-interval %s refresh-interval %s max-lifetime %s\n", + l1_gen, l1_refresh, l1_lifetime); + vty_out(vty, + " lsp-timers level-2 gen-interval %s refresh-interval %s max-lifetime %s\n", + l2_gen, l2_refresh, l2_lifetime); } } @@ -2001,6 +2072,8 @@ void isis_cli_init(void) install_element(ISIS_NODE, &no_lsp_refresh_interval_cmd); install_element(ISIS_NODE, &max_lsp_lifetime_cmd); install_element(ISIS_NODE, &no_max_lsp_lifetime_cmd); + install_element(ISIS_NODE, &lsp_timers_cmd); + install_element(ISIS_NODE, &no_lsp_timers_cmd); install_element(ISIS_NODE, &area_lsp_mtu_cmd); install_element(ISIS_NODE, &no_area_lsp_mtu_cmd); diff --git a/isisd/isis_main.c b/isisd/isis_main.c index f7fe089b99..4c841dffe2 100644 --- a/isisd/isis_main.c +++ b/isisd/isis_main.c @@ -168,6 +168,7 @@ static const struct frr_yang_module_info *const isisd_yang_modules[] = { &frr_isisd_info, #endif /* ifndef FABRICD */ &frr_route_map_info, + &frr_vrf_info, }; #ifdef FABRICD diff --git a/isisd/isis_misc.c b/isisd/isis_misc.c index 96b76da92d..25f7f8609b 100644 --- a/isisd/isis_misc.c +++ b/isisd/isis_misc.c @@ -29,6 +29,7 @@ #include "hash.h" #include "if.h" #include "command.h" +#include "network.h" #include "isisd/isis_constants.h" #include "isisd/isis_common.h" @@ -413,7 +414,7 @@ unsigned long isis_jitter(unsigned long timer, unsigned long jitter) * most IS-IS timers are no longer than 16 bit */ - j = 1 + (int)((RANDOM_SPREAD * random()) / (RAND_MAX + 1.0)); + j = 1 + (int)((RANDOM_SPREAD * frr_weak_random()) / (RAND_MAX + 1.0)); k = timer - (timer * (100 - jitter)) / 100; diff --git a/isisd/isis_mt.c b/isisd/isis_mt.c index 36413bac59..e8e35ae63b 100644 --- a/isisd/isis_mt.c +++ b/isisd/isis_mt.c @@ -3,7 +3,7 @@ * * Copyright (C) 2017 Christian Franke * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/isisd/isis_mt.h b/isisd/isis_mt.h index b40139c50a..fd9ee133ca 100644 --- a/isisd/isis_mt.h +++ b/isisd/isis_mt.h @@ -3,7 +3,7 @@ * * Copyright (C) 2017 Christian Franke * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/isisd/isis_nb.c b/isisd/isis_nb.c index d84e533240..da4322bd44 100644 --- a/isisd/isis_nb.c +++ b/isisd/isis_nb.c @@ -95,55 +95,43 @@ const struct frr_yang_module_info frr_isisd_info = { }, }, { - .xpath = "/frr-isisd:isis/instance/lsp/refresh-interval", + .xpath = "/frr-isisd:isis/instance/lsp/timers", .cbs = { - .cli_show = cli_show_isis_lsp_ref_interval, + .cli_show = cli_show_isis_lsp_timers, }, }, { - .xpath = "/frr-isisd:isis/instance/lsp/refresh-interval/level-1", + .xpath = "/frr-isisd:isis/instance/lsp/timers/level-1/refresh-interval", .cbs = { .modify = isis_instance_lsp_refresh_interval_level_1_modify, }, }, { - .xpath = "/frr-isisd:isis/instance/lsp/refresh-interval/level-2", - .cbs = { - .modify = isis_instance_lsp_refresh_interval_level_2_modify, - }, - }, - { - .xpath = "/frr-isisd:isis/instance/lsp/maximum-lifetime", - .cbs = { - .cli_show = cli_show_isis_lsp_max_lifetime, - }, - }, - { - .xpath = "/frr-isisd:isis/instance/lsp/maximum-lifetime/level-1", + .xpath = "/frr-isisd:isis/instance/lsp/timers/level-1/maximum-lifetime", .cbs = { .modify = isis_instance_lsp_maximum_lifetime_level_1_modify, }, }, { - .xpath = "/frr-isisd:isis/instance/lsp/maximum-lifetime/level-2", + .xpath = "/frr-isisd:isis/instance/lsp/timers/level-1/generation-interval", .cbs = { - .modify = isis_instance_lsp_maximum_lifetime_level_2_modify, + .modify = isis_instance_lsp_generation_interval_level_1_modify, }, }, { - .xpath = "/frr-isisd:isis/instance/lsp/generation-interval", + .xpath = "/frr-isisd:isis/instance/lsp/timers/level-2/refresh-interval", .cbs = { - .cli_show = cli_show_isis_lsp_gen_interval, + .modify = isis_instance_lsp_refresh_interval_level_2_modify, }, }, { - .xpath = "/frr-isisd:isis/instance/lsp/generation-interval/level-1", + .xpath = "/frr-isisd:isis/instance/lsp/timers/level-2/maximum-lifetime", .cbs = { - .modify = isis_instance_lsp_generation_interval_level_1_modify, + .modify = isis_instance_lsp_maximum_lifetime_level_2_modify, }, }, { - .xpath = "/frr-isisd:isis/instance/lsp/generation-interval/level-2", + .xpath = "/frr-isisd:isis/instance/lsp/timers/level-2/generation-interval", .cbs = { .modify = isis_instance_lsp_generation_interval_level_2_modify, }, diff --git a/isisd/isis_nb.h b/isisd/isis_nb.h index 29a2ded0de..e028dfd11b 100644 --- a/isisd/isis_nb.h +++ b/isisd/isis_nb.h @@ -427,12 +427,8 @@ void cli_show_isis_area_pwd(struct vty *vty, struct lyd_node *dnode, bool show_defaults); void cli_show_isis_domain_pwd(struct vty *vty, struct lyd_node *dnode, bool show_defaults); -void cli_show_isis_lsp_gen_interval(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); -void cli_show_isis_lsp_ref_interval(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); -void cli_show_isis_lsp_max_lifetime(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); +void cli_show_isis_lsp_timers(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); void cli_show_isis_lsp_mtu(struct vty *vty, struct lyd_node *dnode, bool show_defaults); void cli_show_isis_spf_min_interval(struct vty *vty, struct lyd_node *dnode, diff --git a/isisd/isis_nb_config.c b/isisd/isis_nb_config.c index d14704b4ee..4347c85664 100644 --- a/isisd/isis_nb_config.c +++ b/isisd/isis_nb_config.c @@ -371,7 +371,7 @@ int isis_instance_lsp_mtu_modify(enum nb_event event, } /* - * XPath: /frr-isisd:isis/instance/lsp/refresh-interval/level-1 + * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/refresh-interval */ int isis_instance_lsp_refresh_interval_level_1_modify( enum nb_event event, const struct lyd_node *dnode, @@ -391,7 +391,7 @@ int isis_instance_lsp_refresh_interval_level_1_modify( } /* - * XPath: /frr-isisd:isis/instance/lsp/refresh-interval/level-2 + * XPath: /frr-isisd:isis/instance/lsp/timers/level-2/refresh-interval */ int isis_instance_lsp_refresh_interval_level_2_modify( enum nb_event event, const struct lyd_node *dnode, @@ -411,7 +411,7 @@ int isis_instance_lsp_refresh_interval_level_2_modify( } /* - * XPath: /frr-isisd:isis/instance/lsp/maximum-lifetime/level-1 + * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/maximum-lifetime */ int isis_instance_lsp_maximum_lifetime_level_1_modify( enum nb_event event, const struct lyd_node *dnode, @@ -431,7 +431,7 @@ int isis_instance_lsp_maximum_lifetime_level_1_modify( } /* - * XPath: /frr-isisd:isis/instance/lsp/maximum-lifetime/level-2 + * XPath: /frr-isisd:isis/instance/lsp/timers/level-2/maximum-lifetime */ int isis_instance_lsp_maximum_lifetime_level_2_modify( enum nb_event event, const struct lyd_node *dnode, @@ -451,7 +451,7 @@ int isis_instance_lsp_maximum_lifetime_level_2_modify( } /* - * XPath: /frr-isisd:isis/instance/lsp/generation-interval/level-1 + * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/generation-interval */ int isis_instance_lsp_generation_interval_level_1_modify( enum nb_event event, const struct lyd_node *dnode, @@ -471,7 +471,7 @@ int isis_instance_lsp_generation_interval_level_1_modify( } /* - * XPath: /frr-isisd:isis/instance/lsp/generation-interval/level-2 + * XPath: /frr-isisd:isis/instance/lsp/timers/level-2/generation-interval */ int isis_instance_lsp_generation_interval_level_2_modify( enum nb_event event, const struct lyd_node *dnode, diff --git a/isisd/isis_pdu.c b/isisd/isis_pdu.c index 9153512623..e8a0ba02e9 100644 --- a/isisd/isis_pdu.c +++ b/isisd/isis_pdu.c @@ -164,7 +164,7 @@ static int process_p2p_hello(struct iih_info *iih) if (memcmp(iih->sys_id, adj->sysid, ISIS_SYS_ID_LEN)) { zlog_debug( "hello source and adjacency do not match, set adj down\n"); - isis_adj_state_change(adj, ISIS_ADJ_DOWN, + isis_adj_state_change(&adj, ISIS_ADJ_DOWN, "adj do not exist"); return ISIS_OK; } @@ -184,7 +184,7 @@ static int process_p2p_hello(struct iih_info *iih) * adjacency entry getting added to the lsp tlv neighbor list. */ adj->circuit_t = iih->circ_type; - isis_adj_state_change(adj, ISIS_ADJ_INITIALIZING, NULL); + isis_adj_state_change(&adj, ISIS_ADJ_INITIALIZING, NULL); adj->sys_type = ISIS_SYSTYPE_UNKNOWN; } @@ -233,7 +233,7 @@ static int process_p2p_hello(struct iih_info *iih) return ISIS_WARNING; } else if (adj->adj_usage == ISIS_ADJ_LEVEL1) { /* (6) down - wrong system */ - isis_adj_state_change(adj, + isis_adj_state_change(&adj, ISIS_ADJ_DOWN, "Wrong System"); } @@ -254,7 +254,7 @@ static int process_p2p_hello(struct iih_info *iih) || (adj->adj_usage == ISIS_ADJ_LEVEL2)) { /* (8) down - wrong system */ - isis_adj_state_change(adj, + isis_adj_state_change(&adj, ISIS_ADJ_DOWN, "Wrong System"); } @@ -268,7 +268,7 @@ static int process_p2p_hello(struct iih_info *iih) || (adj->adj_usage == ISIS_ADJ_LEVEL1AND2)) { /* (8) down - wrong system */ - isis_adj_state_change(adj, + isis_adj_state_change(&adj, ISIS_ADJ_DOWN, "Wrong System"); } @@ -282,7 +282,7 @@ static int process_p2p_hello(struct iih_info *iih) || (adj->adj_usage == ISIS_ADJ_LEVEL2)) { /* (8) down - wrong system */ - isis_adj_state_change(adj, + isis_adj_state_change(&adj, ISIS_ADJ_DOWN, "Wrong System"); } @@ -304,7 +304,7 @@ static int process_p2p_hello(struct iih_info *iih) || (adj->adj_usage == ISIS_ADJ_LEVEL2)) { /* (6) down - wrong system */ - isis_adj_state_change(adj, + isis_adj_state_change(&adj, ISIS_ADJ_DOWN, "Wrong System"); } @@ -318,7 +318,7 @@ static int process_p2p_hello(struct iih_info *iih) } else if (adj->adj_usage == ISIS_ADJ_LEVEL1AND2) { /* (6) down - wrong system */ - isis_adj_state_change(adj, + isis_adj_state_change(&adj, ISIS_ADJ_DOWN, "Wrong System"); } @@ -331,11 +331,11 @@ static int process_p2p_hello(struct iih_info *iih) if (iih->circuit->area->is_type == IS_LEVEL_1) { /* 8.2.5.2 b) 1) is_type L1 and adj is not up */ if (adj->adj_state != ISIS_ADJ_UP) { - isis_adj_state_change(adj, ISIS_ADJ_DOWN, + isis_adj_state_change(&adj, ISIS_ADJ_DOWN, "Area Mismatch"); /* 8.2.5.2 b) 2)is_type L1 and adj is up */ } else { - isis_adj_state_change(adj, ISIS_ADJ_DOWN, + isis_adj_state_change(&adj, ISIS_ADJ_DOWN, "Down - Area Mismatch"); } } @@ -349,7 +349,7 @@ static int process_p2p_hello(struct iih_info *iih) return ISIS_WARNING; } else if (adj->adj_usage == ISIS_ADJ_LEVEL1) { /* (7) down - area mismatch */ - isis_adj_state_change(adj, + isis_adj_state_change(&adj, ISIS_ADJ_DOWN, "Area Mismatch"); @@ -358,7 +358,7 @@ static int process_p2p_hello(struct iih_info *iih) || (adj->adj_usage == ISIS_ADJ_LEVEL2)) { /* (7) down - wrong system */ - isis_adj_state_change(adj, + isis_adj_state_change(&adj, ISIS_ADJ_DOWN, "Wrong System"); } @@ -371,7 +371,7 @@ static int process_p2p_hello(struct iih_info *iih) ISIS_ADJ_LEVEL2); } else if (adj->adj_usage == ISIS_ADJ_LEVEL1) { /* (7) down - wrong system */ - isis_adj_state_change(adj, + isis_adj_state_change(&adj, ISIS_ADJ_DOWN, "Wrong System"); } else if (adj->adj_usage @@ -379,12 +379,12 @@ static int process_p2p_hello(struct iih_info *iih) if (iih->circ_type == IS_LEVEL_2) { /* (7) down - wrong system */ isis_adj_state_change( - adj, ISIS_ADJ_DOWN, + &adj, ISIS_ADJ_DOWN, "Wrong System"); } else { /* (7) down - area mismatch */ isis_adj_state_change( - adj, ISIS_ADJ_DOWN, + &adj, ISIS_ADJ_DOWN, "Area Mismatch"); } } @@ -393,34 +393,36 @@ static int process_p2p_hello(struct iih_info *iih) } } else { /* down - area mismatch */ - isis_adj_state_change(adj, ISIS_ADJ_DOWN, "Area Mismatch"); + isis_adj_state_change(&adj, ISIS_ADJ_DOWN, "Area Mismatch"); } - if (adj->adj_state == ISIS_ADJ_UP && changed) { - lsp_regenerate_schedule(adj->circuit->area, - isis_adj_usage2levels(adj->adj_usage), - 0); - } + if (adj) { + if (adj->adj_state == ISIS_ADJ_UP && changed) { + lsp_regenerate_schedule( + adj->circuit->area, + isis_adj_usage2levels(adj->adj_usage), 0); + } - /* 8.2.5.2 c) if the action was up - comparing circuit IDs */ - /* FIXME - Missing parts */ + /* 8.2.5.2 c) if the action was up - comparing circuit IDs */ + /* FIXME - Missing parts */ - /* some of my own understanding of the ISO, why the heck does - * it not say what should I change the system_type to... - */ - switch (adj->adj_usage) { - case ISIS_ADJ_LEVEL1: - adj->sys_type = ISIS_SYSTYPE_L1_IS; - break; - case ISIS_ADJ_LEVEL2: - adj->sys_type = ISIS_SYSTYPE_L2_IS; - break; - case ISIS_ADJ_LEVEL1AND2: - adj->sys_type = ISIS_SYSTYPE_L2_IS; - break; - case ISIS_ADJ_NONE: - adj->sys_type = ISIS_SYSTYPE_UNKNOWN; - break; + /* some of my own understanding of the ISO, why the heck does + * it not say what should I change the system_type to... + */ + switch (adj->adj_usage) { + case ISIS_ADJ_LEVEL1: + adj->sys_type = ISIS_SYSTYPE_L1_IS; + break; + case ISIS_ADJ_LEVEL2: + adj->sys_type = ISIS_SYSTYPE_L2_IS; + break; + case ISIS_ADJ_LEVEL1AND2: + adj->sys_type = ISIS_SYSTYPE_L2_IS; + break; + case ISIS_ADJ_NONE: + adj->sys_type = ISIS_SYSTYPE_UNKNOWN; + break; + } } if (isis->debugs & DEBUG_ADJ_PACKETS) { @@ -455,7 +457,7 @@ static int process_lan_hello(struct iih_info *iih) } adj->level = iih->level; } - isis_adj_state_change(adj, ISIS_ADJ_INITIALIZING, NULL); + isis_adj_state_change(&adj, ISIS_ADJ_INITIALIZING, NULL); if (iih->level == IS_LEVEL_1) adj->sys_type = ISIS_SYSTYPE_L1_IS; @@ -506,13 +508,13 @@ static int process_lan_hello(struct iih_info *iih) if (adj->adj_state != ISIS_ADJ_UP) { if (own_snpa_found) { isis_adj_state_change( - adj, ISIS_ADJ_UP, + &adj, ISIS_ADJ_UP, "own SNPA found in LAN Neighbours TLV"); } } else { if (!own_snpa_found) { isis_adj_state_change( - adj, ISIS_ADJ_INITIALIZING, + &adj, ISIS_ADJ_INITIALIZING, "own SNPA not found in LAN Neighbours TLV"); } } diff --git a/isisd/isis_tx_queue.c b/isisd/isis_tx_queue.c index 507fd489bc..27e57db16c 100644 --- a/isisd/isis_tx_queue.c +++ b/isisd/isis_tx_queue.c @@ -3,7 +3,7 @@ * * Copyright (C) 2018 Christian Franke * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/isisd/isis_tx_queue.h b/isisd/isis_tx_queue.h index c2beda45b7..f0f1184d58 100644 --- a/isisd/isis_tx_queue.h +++ b/isisd/isis_tx_queue.h @@ -3,7 +3,7 @@ * * Copyright (C) 2018 Christian Franke * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/isisd/isisd.c b/isisd/isisd.c index 47d2e9faab..298629e246 100644 --- a/isisd/isisd.c +++ b/isisd/isisd.c @@ -73,7 +73,6 @@ int area_clear_net_title(struct vty *, const char *); int show_isis_interface_common(struct vty *, const char *ifname, char); int show_isis_neighbor_common(struct vty *, const char *id, char); int clear_isis_neighbor_common(struct vty *, const char *id); -int isis_config_write(struct vty *); void isis_new(unsigned long process_id, vrf_id_t vrf_id) @@ -137,17 +136,17 @@ struct isis_area *isis_area_create(const char *area_tag) enum isis_metric_style default_style; area->max_lsp_lifetime[0] = yang_get_default_uint16( - "/frr-isisd:isis/instance/lsp/maximum-lifetime/level-1"); + "/frr-isisd:isis/instance/lsp/timers/level-1/maximum-lifetime"); area->max_lsp_lifetime[1] = yang_get_default_uint16( - "/frr-isisd:isis/instance/lsp/maximum-lifetime/level-2"); + "/frr-isisd:isis/instance/lsp/timers/level-2/maximum-lifetime"); area->lsp_refresh[0] = yang_get_default_uint16( - "/frr-isisd:isis/instance/lsp/refresh-interval/level-1"); + "/frr-isisd:isis/instance/lsp/timers/level-1/refresh-interval"); area->lsp_refresh[1] = yang_get_default_uint16( - "/frr-isisd:isis/instance/lsp/refresh-interval/level-2"); + "/frr-isisd:isis/instance/lsp/timers/level-2/refresh-interval"); area->lsp_gen_interval[0] = yang_get_default_uint16( - "/frr-isisd:isis/instance/lsp/generation-interval/level-1"); + "/frr-isisd:isis/instance/lsp/timers/level-1/generation-interval"); area->lsp_gen_interval[1] = yang_get_default_uint16( - "/frr-isisd:isis/instance/lsp/generation-interval/level-2"); + "/frr-isisd:isis/instance/lsp/timers/level-2/generation-interval"); area->min_spf_interval[0] = yang_get_default_uint16( "/frr-isisd:isis/instance/spf/minimum-interval/level-1"); area->min_spf_interval[1] = yang_get_default_uint16( @@ -654,7 +653,7 @@ int clear_isis_neighbor_common(struct vty *vty, const char *id) sysid, ISIS_SYS_ID_LEN)) isis_adj_state_change( - adj, + &adj, ISIS_ADJ_DOWN, "clear user request"); } @@ -666,7 +665,7 @@ int clear_isis_neighbor_common(struct vty *vty, const char *id) || !memcmp(adj->sysid, sysid, ISIS_SYS_ID_LEN)) isis_adj_state_change( - adj, ISIS_ADJ_DOWN, + &adj, ISIS_ADJ_DOWN, "clear user request"); } } @@ -784,8 +783,14 @@ DEFUN_NOSH (show_debugging, return CMD_SUCCESS; } +static int config_write_debug(struct vty *vty); /* Debug node. */ -static struct cmd_node debug_node = {DEBUG_NODE, "", 1}; +static struct cmd_node debug_node = { + .name = "debug", + .node = DEBUG_NODE, + .prompt = "", + .config_write = config_write_debug, +}; static int config_write_debug(struct vty *vty) { @@ -1852,7 +1857,7 @@ DEFUN (no_log_adj_changes, #endif /* ifdef FABRICD */ #ifdef FABRICD /* IS-IS configuration write function */ -int isis_config_write(struct vty *vty) +static int isis_config_write(struct vty *vty) { int write = 0; @@ -2124,9 +2129,16 @@ int isis_config_write(struct vty *vty) return write; } +struct cmd_node router_node = { + .name = "openfabric", + .node = OPENFABRIC_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", + .config_write = isis_config_write, +}; #else /* IS-IS configuration write function */ -int isis_config_write(struct vty *vty) +static int isis_config_write(struct vty *vty) { int write = 0; struct lyd_node *dnode; @@ -2139,14 +2151,20 @@ int isis_config_write(struct vty *vty) return write; } -#endif /* ifdef FABRICD */ -struct cmd_node router_node = {ROUTER_NODE, "%s(config-router)# ", 1}; +struct cmd_node router_node = { + .name = "isis", + .node = ISIS_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", + .config_write = isis_config_write, +}; +#endif /* ifdef FABRICD */ void isis_init(void) { /* Install IS-IS top node */ - install_node(&router_node, isis_config_write); + install_node(&router_node); install_element(VIEW_NODE, &show_isis_summary_cmd); @@ -2167,7 +2185,7 @@ void isis_init(void) install_element(ENABLE_NODE, &show_debugging_isis_cmd); - install_node(&debug_node, config_write_debug); + install_node(&debug_node); install_element(ENABLE_NODE, &debug_isis_adj_cmd); install_element(ENABLE_NODE, &no_debug_isis_adj_cmd); diff --git a/ldpd/lde.c b/ldpd/lde.c index 23a2dbd760..2223e32f87 100644 --- a/ldpd/lde.c +++ b/ldpd/lde.c @@ -1637,6 +1637,56 @@ lde_change_egress_label(int af) NULL, 0); } +void +lde_change_host_label(int af) +{ + struct lde_nbr *ln; + struct fec *f; + struct fec_node *fn; + uint32_t new_label; + + RB_FOREACH(f, fec_tree, &ft) { + fn = (struct fec_node *)f; + + switch (af) { + case AF_INET: + if (fn->fec.type != FEC_TYPE_IPV4) + continue; + break; + case AF_INET6: + if (fn->fec.type != FEC_TYPE_IPV6) + continue; + break; + default: + fatalx("lde_change_host_label: unknown af"); + } + + /* + * If the local label has changed to NO_LABEL, send a label + * withdraw to all peers. + * If the local label has changed and it's different from + * NO_LABEL, send a label mapping to all peers advertising + * the new label. + * If the local label hasn't changed, do nothing + */ + new_label = lde_update_label(fn); + if (fn->local_label != new_label) { + if (new_label == NO_LABEL) + RB_FOREACH(ln, nbr_tree, &lde_nbrs) + lde_send_labelwithdraw(ln, fn, + NULL, NULL); + + fn->local_label = new_label; + if (fn->local_label != NO_LABEL) + RB_FOREACH(ln, nbr_tree, &lde_nbrs) + lde_send_labelmapping(ln, fn, 0); + } + } + RB_FOREACH(ln, nbr_tree, &lde_nbrs) + lde_imsg_compose_ldpe(IMSG_MAPPING_ADD_END, ln->peerid, 0, + NULL, 0); +} + static int lde_address_add(struct lde_nbr *ln, struct lde_addr *lde_addr) { @@ -1706,6 +1756,7 @@ static void zclient_sync_init(unsigned short instance) zclient_sync->sock = -1; zclient_sync->redist_default = ZEBRA_ROUTE_LDP; zclient_sync->instance = instance; + zclient_sync->session_id = 1; /* Distinguish from main session */ zclient_sync->privs = &lde_privs; while (zclient_socket_connect(zclient_sync) < 0) { diff --git a/ldpd/lde.h b/ldpd/lde.h index a099f8d286..36196a3d08 100644 --- a/ldpd/lde.h +++ b/ldpd/lde.h @@ -183,6 +183,7 @@ void lde_req_del(struct lde_nbr *, struct lde_req *, int); struct lde_wdraw *lde_wdraw_add(struct lde_nbr *, struct fec_node *); void lde_wdraw_del(struct lde_nbr *, struct lde_wdraw *); void lde_change_egress_label(int); +void lde_change_host_label(int); struct lde_addr *lde_address_find(struct lde_nbr *, int, union ldpd_addr *); diff --git a/ldpd/ldp_debug.c b/ldpd/ldp_debug.c index ec70ef510a..b9ef60ff94 100644 --- a/ldpd/ldp_debug.c +++ b/ldpd/ldp_debug.c @@ -29,12 +29,14 @@ struct ldp_debug conf_ldp_debug; struct ldp_debug ldp_debug; +static int ldp_debug_config_write(struct vty *); + /* Debug node. */ -struct cmd_node ldp_debug_node = -{ - DEBUG_NODE, - "", - 1 +struct cmd_node ldp_debug_node = { + .name = "debug", + .node = DEBUG_NODE, + .prompt = "", + .config_write = ldp_debug_config_write, }; int @@ -142,7 +144,7 @@ ldp_vty_show_debugging(struct vty *vty) return (CMD_SUCCESS); } -int +static int ldp_debug_config_write(struct vty *vty) { int write = 0; diff --git a/ldpd/ldp_vty.h b/ldpd/ldp_vty.h index af5f1d5616..f6ba8f8c97 100644 --- a/ldpd/ldp_vty.h +++ b/ldpd/ldp_vty.h @@ -33,9 +33,6 @@ extern struct cmd_node ldp_debug_node; union ldpd_addr; int ldp_get_address(const char *, int *, union ldpd_addr *); -int ldp_config_write(struct vty *); -int ldp_l2vpn_config_write(struct vty *); -int ldp_debug_config_write(struct vty *); int ldp_vty_mpls_ldp (struct vty *, const char *); int ldp_vty_address_family (struct vty *, const char *, const char *); int ldp_vty_disc_holdtime(struct vty *, const char *, enum hello_type, long); diff --git a/ldpd/ldp_vty_cmds.c b/ldpd/ldp_vty_cmds.c index c10c6ae35c..fc84c7f76b 100644 --- a/ldpd/ldp_vty_cmds.c +++ b/ldpd/ldp_vty_cmds.c @@ -779,14 +779,14 @@ ldp_vty_init (void) { cmd_variable_handler_register(l2vpn_var_handlers); - install_node(&ldp_node, ldp_config_write); - install_node(&ldp_ipv4_node, NULL); - install_node(&ldp_ipv6_node, NULL); - install_node(&ldp_ipv4_iface_node, NULL); - install_node(&ldp_ipv6_iface_node, NULL); - install_node(&ldp_l2vpn_node, ldp_l2vpn_config_write); - install_node(&ldp_pseudowire_node, NULL); - install_node(&ldp_debug_node, ldp_debug_config_write); + install_node(&ldp_node); + install_node(&ldp_ipv4_node); + install_node(&ldp_ipv6_node); + install_node(&ldp_ipv4_iface_node); + install_node(&ldp_ipv6_iface_node); + install_node(&ldp_l2vpn_node); + install_node(&ldp_pseudowire_node); + install_node(&ldp_debug_node); install_default(LDP_NODE); install_default(LDP_IPV4_NODE); install_default(LDP_IPV6_NODE); diff --git a/ldpd/ldp_vty_conf.c b/ldpd/ldp_vty_conf.c index 05b8962563..3abd0817a8 100644 --- a/ldpd/ldp_vty_conf.c +++ b/ldpd/ldp_vty_conf.c @@ -30,60 +30,64 @@ #include "vty.h" #include "ldp_vty.h" +static int ldp_config_write(struct vty *); static void ldp_af_iface_config_write(struct vty *, int); static void ldp_af_config_write(struct vty *, int, struct ldpd_conf *, struct ldpd_af_conf *); +static int ldp_l2vpn_config_write(struct vty *); static void ldp_l2vpn_pw_config_write(struct vty *, struct l2vpn_pw *); static int ldp_vty_get_af(struct vty *); static int ldp_iface_is_configured(struct ldpd_conf *, const char *); -struct cmd_node ldp_node = -{ - LDP_NODE, - "%s(config-ldp)# ", - 1, +struct cmd_node ldp_node = { + .name = "ldp", + .node = LDP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-ldp)# ", + .config_write = ldp_config_write, }; -struct cmd_node ldp_ipv4_node = -{ - LDP_IPV4_NODE, - "%s(config-ldp-af)# ", - 1, +struct cmd_node ldp_ipv4_node = { + .name = "ldp ipv4", + .node = LDP_IPV4_NODE, + .parent_node = LDP_NODE, + .prompt = "%s(config-ldp-af)# ", }; -struct cmd_node ldp_ipv6_node = -{ - LDP_IPV6_NODE, - "%s(config-ldp-af)# ", - 1, +struct cmd_node ldp_ipv6_node = { + .name = "ldp ipv6", + .node = LDP_IPV6_NODE, + .parent_node = LDP_NODE, + .prompt = "%s(config-ldp-af)# ", }; -struct cmd_node ldp_ipv4_iface_node = -{ - LDP_IPV4_IFACE_NODE, - "%s(config-ldp-af-if)# ", - 1, +struct cmd_node ldp_ipv4_iface_node = { + .name = "ldp ipv4 interface", + .node = LDP_IPV4_IFACE_NODE, + .parent_node = LDP_IPV4_NODE, + .prompt = "%s(config-ldp-af-if)# ", }; -struct cmd_node ldp_ipv6_iface_node = -{ - LDP_IPV6_IFACE_NODE, - "%s(config-ldp-af-if)# ", - 1, +struct cmd_node ldp_ipv6_iface_node = { + .name = "ldp ipv6 interface", + .node = LDP_IPV6_IFACE_NODE, + .parent_node = LDP_IPV6_NODE, + .prompt = "%s(config-ldp-af-if)# ", }; -struct cmd_node ldp_l2vpn_node = -{ - LDP_L2VPN_NODE, - "%s(config-l2vpn)# ", - 1, +struct cmd_node ldp_l2vpn_node = { + .name = "ldp l2vpn", + .node = LDP_L2VPN_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-l2vpn)# ", + .config_write = ldp_l2vpn_config_write, }; -struct cmd_node ldp_pseudowire_node = -{ - LDP_PSEUDOWIRE_NODE, - "%s(config-l2vpn-pw)# ", - 1, +struct cmd_node ldp_pseudowire_node = { + .name = "ldp", + .node = LDP_PSEUDOWIRE_NODE, + .parent_node = LDP_L2VPN_NODE, + .prompt = "%s(config-l2vpn-pw)# ", }; int @@ -240,7 +244,7 @@ ldp_af_config_write(struct vty *vty, int af, struct ldpd_conf *conf, vty_out(vty, " exit-address-family\n"); } -int +static int ldp_config_write(struct vty *vty) { struct nbr_params *nbrp; @@ -345,7 +349,7 @@ ldp_l2vpn_pw_config_write(struct vty *vty, struct l2vpn_pw *pw) vty_out (vty," ! Incomplete config, specify a pw-id\n"); } -int +static int ldp_l2vpn_config_write(struct vty *vty) { struct l2vpn *l2vpn; @@ -1077,7 +1081,7 @@ ldp_vty_neighbor_password(struct vty *vty, const char *negate, struct in_addr ls if (password_len >= sizeof(nbrp->auth.md5key)) vty_out(vty, "%% password has been truncated to %zu " "characters.", sizeof(nbrp->auth.md5key) - 1); - nbrp->auth.md5key_len = password_len; + nbrp->auth.md5key_len = strlen(nbrp->auth.md5key); nbrp->auth.method = AUTH_MD5SIG; } diff --git a/ldpd/ldp_zebra.c b/ldpd/ldp_zebra.c index b3ccb77602..28e56ecd64 100644 --- a/ldpd/ldp_zebra.c +++ b/ldpd/ldp_zebra.c @@ -108,8 +108,7 @@ ldp_zebra_send_mpls_labels(int cmd, struct kroute *kr) struct zapi_labels zl = {}; struct zapi_nexthop *znh; - if (kr->local_label < MPLS_LABEL_RESERVED_MAX || - kr->remote_label == NO_LABEL) + if (kr->local_label < MPLS_LABEL_RESERVED_MAX) return (0); debug_zebra_out("prefix %s/%u nexthop %s ifindex %u labels %s/%s (%s)", @@ -122,21 +121,32 @@ ldp_zebra_send_mpls_labels(int cmd, struct kroute *kr) zl.local_label = kr->local_label; /* Set prefix. */ - SET_FLAG(zl.message, ZAPI_LABELS_FTN); - zl.route.prefix.family = kr->af; - switch (kr->af) { - case AF_INET: - zl.route.prefix.u.prefix4 = kr->prefix.v4; - break; - case AF_INET6: - zl.route.prefix.u.prefix6 = kr->prefix.v6; - break; - default: - fatalx("ldp_zebra_send_mpls_labels: unknown af"); + if (kr->remote_label != NO_LABEL) { + SET_FLAG(zl.message, ZAPI_LABELS_FTN); + zl.route.prefix.family = kr->af; + switch (kr->af) { + case AF_INET: + zl.route.prefix.u.prefix4 = kr->prefix.v4; + break; + case AF_INET6: + zl.route.prefix.u.prefix6 = kr->prefix.v6; + break; + default: + fatalx("ldp_zebra_send_mpls_labels: unknown af"); + } + zl.route.prefix.prefixlen = kr->prefixlen; + zl.route.type = kr->route_type; + zl.route.instance = kr->route_instance; } - zl.route.prefix.prefixlen = kr->prefixlen; - zl.route.type = kr->route_type; - zl.route.instance = kr->route_instance; + + /* + * For broken LSPs, instruct the forwarding plane to pop the top-level + * label and forward packets normally. This is a best-effort attempt + * to deliver labeled IP packets to their final destination (instead of + * dropping them). + */ + if (kr->remote_label == NO_LABEL) + kr->remote_label = MPLS_LABEL_IMPLICIT_NULL; /* Set nexthop. */ zl.nexthop_num = 1; diff --git a/ldpd/ldpd.c b/ldpd/ldpd.c index 0f9f055d02..8e546be93e 100644 --- a/ldpd/ldpd.c +++ b/ldpd/ldpd.c @@ -180,6 +180,7 @@ static struct quagga_signal_t ldp_signals[] = }; static const struct frr_yang_module_info *const ldpd_yang_modules[] = { + &frr_vrf_info, }; FRR_DAEMON_INFO(ldpd, LDP, @@ -308,9 +309,15 @@ main(int argc, char *argv[]) exit(1); } - if (lflag || eflag) - openzlog(ldpd_di.progname, "LDP", 0, - LOG_CONS | LOG_NDELAY | LOG_PID, LOG_DAEMON); + if (lflag || eflag) { + struct zprivs_ids_t ids; + + zprivs_preinit(&ldpd_privs); + zprivs_get_ids(&ids); + + zlog_init(ldpd_di.progname, "LDP", 0, + ids.uid_normal, ids.gid_normal); + } if (lflag) lde(); else if (eflag) @@ -486,7 +493,7 @@ ldpd_shutdown(void) static pid_t start_child(enum ldpd_process p, char *argv0, int fd_async, int fd_sync) { - char *argv[3]; + char *argv[7]; int argc = 0, nullfd; pid_t pid; @@ -529,6 +536,11 @@ start_child(enum ldpd_process p, char *argv0, int fd_async, int fd_sync) argv[argc++] = (char *)"-E"; break; } + + argv[argc++] = (char *)"-u"; + argv[argc++] = (char *)ldpd_privs.user; + argv[argc++] = (char *)"-g"; + argv[argc++] = (char *)ldpd_privs.group; argv[argc++] = NULL; execvp(argv0, argv); @@ -1319,6 +1331,7 @@ merge_af(int af, struct ldpd_af_conf *af_conf, struct ldpd_af_conf *xa) int stop_init_backoff = 0; int remove_dynamic_tnbrs = 0; int change_egress_label = 0; + int change_host_label = 0; int reset_nbrs_ipv4 = 0; int reset_nbrs = 0; int update_sockets = 0; @@ -1349,6 +1362,12 @@ merge_af(int af, struct ldpd_af_conf *af_conf, struct ldpd_af_conf *xa) if ((af_conf->flags & F_LDPD_AF_EXPNULL) != (xa->flags & F_LDPD_AF_EXPNULL)) change_egress_label = 1; + + /* changing config of host only fec filtering */ + if ((af_conf->flags & F_LDPD_AF_ALLOCHOSTONLY) + != (xa->flags & F_LDPD_AF_ALLOCHOSTONLY)) + change_host_label = 1; + af_conf->flags = xa->flags; /* update the transport address */ @@ -1358,6 +1377,10 @@ merge_af(int af, struct ldpd_af_conf *af_conf, struct ldpd_af_conf *xa) } /* update ACLs */ + if (strcmp(af_conf->acl_label_allocate_for, + xa->acl_label_allocate_for)) + change_host_label = 1; + if (strcmp(af_conf->acl_label_advertise_to, xa->acl_label_advertise_to) || strcmp(af_conf->acl_label_advertise_for, @@ -1391,6 +1414,8 @@ merge_af(int af, struct ldpd_af_conf *af_conf, struct ldpd_af_conf *xa) case PROC_LDE_ENGINE: if (change_egress_label) lde_change_egress_label(af); + if (change_host_label) + lde_change_host_label(af); break; case PROC_LDP_ENGINE: if (stop_init_backoff) diff --git a/ldpd/log.c b/ldpd/log.c index b138e5754a..1903017588 100644 --- a/ldpd/log.c +++ b/ldpd/log.c @@ -24,7 +24,6 @@ #include "log.h" #include <lib/log.h> -#include <lib/log_int.h> const char *log_procname; diff --git a/ldpd/socket.c b/ldpd/socket.c index 4909ea7ad8..e865707d44 100644 --- a/ldpd/socket.c +++ b/ldpd/socket.c @@ -320,7 +320,7 @@ sock_set_md5sig(int fd, int af, union ldpd_addr *addr, const char *password) int sock_set_ipv4_tos(int fd, int tos) { - if (setsockopt(fd, IPPROTO_IP, IP_TOS, (int *)&tos, sizeof(tos)) < 0) { + if (setsockopt(fd, IPPROTO_IP, IP_TOS, &tos, sizeof(tos)) < 0) { log_warn("%s: error setting IP_TOS to 0x%x", __func__, tos); return (-1); } diff --git a/lib/agentx.c b/lib/agentx.c index d1b801fe8c..7c4bdcbe27 100644 --- a/lib/agentx.c +++ b/lib/agentx.c @@ -158,9 +158,13 @@ static void agentx_events_update(void) } /* AgentX node. */ -static struct cmd_node agentx_node = {SMUX_NODE, - "", /* AgentX has no interface. */ - 1}; +static int config_write_agentx(struct vty *vty); +static struct cmd_node agentx_node = { + .name = "smux", + .node = SMUX_NODE, + .prompt = "", + .config_write = config_write_agentx, +}; /* Logging NetSNMP messages */ static int agentx_log_callback(int major, int minor, void *serverarg, @@ -246,7 +250,7 @@ void smux_init(struct thread_master *tm) agentx_log_callback, NULL); init_agent(FRR_SMUX_NAME); - install_node(&agentx_node, config_write_agentx); + install_node(&agentx_node); install_element(CONFIG_NODE, &agentx_enable_cmd); install_element(CONFIG_NODE, &no_agentx_cmd); } diff --git a/lib/bitfield.h b/lib/bitfield.h index eebfc049d9..72980165f9 100644 --- a/lib/bitfield.h +++ b/lib/bitfield.h @@ -152,9 +152,8 @@ typedef unsigned int word_t; */ #define bf_free(v) \ do { \ - if ((v).data) { \ - free((v).data); \ - } \ + free((v).data); \ + (v).data = NULL; \ } while (0) #ifdef __cplusplus diff --git a/lib/clippy.c b/lib/clippy.c index cd8067f5eb..2e09c24c66 100644 --- a/lib/clippy.c +++ b/lib/clippy.c @@ -107,21 +107,11 @@ int main(int argc, char **argv) #include "log.h" #include "zassert.h" -#define ZLOG_FUNC(FUNCNAME) \ - void FUNCNAME(const char *format, ...) \ - { \ - va_list args; \ - va_start(args, format); \ - vfprintf(stderr, format, args); \ - fputs("\n", stderr); \ - va_end(args); \ - } - -ZLOG_FUNC(zlog_err) -ZLOG_FUNC(zlog_warn) -ZLOG_FUNC(zlog_info) -ZLOG_FUNC(zlog_notice) -ZLOG_FUNC(zlog_debug) +void vzlog(int prio, const char *format, va_list args) +{ + vfprintf(stderr, format, args); + fputs("\n", stderr); +} void _zlog_assert_failed(const char *assertion, const char *file, unsigned int line, const char *function) diff --git a/lib/command.c b/lib/command.c index 8811b3a791..fbbf10c796 100644 --- a/lib/command.c +++ b/lib/command.c @@ -31,7 +31,7 @@ #include "frrstr.h" #include "memory.h" #include "log.h" -#include "log_int.h" +#include "log_vty.h" #include "thread.h" #include "vector.h" #include "linklist.h" @@ -47,6 +47,7 @@ #include "hook.h" #include "lib_errors.h" #include "northbound_cli.h" +#include "network.h" DEFINE_MTYPE_STATIC(LIB, HOST, "Host config") DEFINE_MTYPE(LIB, COMPLETION, "Completion item") @@ -73,86 +74,6 @@ const struct message tokennames[] = { item(END_TKN), {0}, }; - -const char *const node_names[] = { - "auth", // AUTH_NODE, - "view", // VIEW_NODE, - "auth enable", // AUTH_ENABLE_NODE, - "enable", // ENABLE_NODE, - "config", // CONFIG_NODE, - "debug", // DEBUG_NODE, - "vrf debug", // VRF_DEBUG_NODE, - "northbound debug", // NORTHBOUND_DEBUG_NODE, - "vnc debug", // DEBUG_VNC_NODE, - "route-map debug", /* RMAP_DEBUG_NODE */ - "resolver debug", /* RESOLVER_DEBUG_NODE */ - "aaa", // AAA_NODE, - "keychain", // KEYCHAIN_NODE, - "keychain key", // KEYCHAIN_KEY_NODE, - "static ip", // IP_NODE, - "vrf", // VRF_NODE, - "interface", // INTERFACE_NODE, - "nexthop-group", // NH_GROUP_NODE, - "zebra", // ZEBRA_NODE, - "table", // TABLE_NODE, - "rip", // RIP_NODE, - "ripng", // RIPNG_NODE, - "babel", // BABEL_NODE, - "eigrp", // EIGRP_NODE, - "bgp", // BGP_NODE, - "bgp vpnv4", // BGP_VPNV4_NODE, - "bgp vpnv6", // BGP_VPNV6_NODE, - "bgp ipv4 unicast", // BGP_IPV4_NODE, - "bgp ipv4 multicast", // BGP_IPV4M_NODE, - "bgp ipv4 labeled unicast", // BGP_IPV4L_NODE, - "bgp ipv6", // BGP_IPV6_NODE, - "bgp ipv6 multicast", // BGP_IPV6M_NODE, - "bgp ipv6 labeled unicast", // BGP_IPV6L_NODE, - "bgp vrf policy", // BGP_VRF_POLICY_NODE, - "bgp vnc defaults", // BGP_VNC_DEFAULTS_NODE, - "bgp vnc nve", // BGP_VNC_NVE_GROUP_NODE, - "bgp vnc l2", // BGP_VNC_L2_GROUP_NODE, - "rfp defaults", // RFP_DEFAULTS_NODE, - "bgp evpn", // BGP_EVPN_NODE, - "ospf", // OSPF_NODE, - "ospf6", // OSPF6_NODE, - "ldp", // LDP_NODE, - "ldp ipv4", // LDP_IPV4_NODE, - "ldp ipv6", // LDP_IPV6_NODE, - "ldp ipv4 interface", // LDP_IPV4_IFACE_NODE, - "ldp ipv6 interface", // LDP_IPV6_IFACE_NODE, - "ldp l2vpn", // LDP_L2VPN_NODE, - "ldp", // LDP_PSEUDOWIRE_NODE, - "isis", // ISIS_NODE, - "ipv4 access list", // ACCESS_NODE, - "ipv4 prefix list", // PREFIX_NODE, - "ipv6 access list", // ACCESS_IPV6_NODE, - "MAC access list", // ACCESS_MAC_NODE, - "ipv6 prefix list", // PREFIX_IPV6_NODE, - "as list", // AS_LIST_NODE, - "community list", // COMMUNITY_LIST_NODE, - "routemap", // RMAP_NODE, - "pbr-map", // PBRMAP_NODE, - "smux", // SMUX_NODE, - "dump", // DUMP_NODE, - "forwarding", // FORWARDING_NODE, - "protocol", // PROTOCOL_NODE, - "mpls", // MPLS_NODE, - "pw", // PW_NODE, - "vty", // VTY_NODE, - "link-params", // LINK_PARAMS_NODE, - "bgp evpn vni", // BGP_EVPN_VNI_NODE, - "rpki", // RPKI_NODE - "bgp ipv4 flowspec", /* BGP_FLOWSPECV4_NODE - */ - "bgp ipv6 flowspec", /* BGP_FLOWSPECV6_NODE - */ - "bfd", /* BFD_NODE */ - "bfd peer", /* BFD_PEER_NODE */ - "openfabric", // OPENFABRIC_NODE - "vrrp", /* VRRP_NODE */ - "bmp", /* BMP_NODE */ -}; /* clang-format on */ /* Command vector which includes some level of command lists. Normally @@ -179,84 +100,45 @@ const char *cmd_domainname_get(void) return host.domainname; } +static int root_on_exit(struct vty *vty); + /* Standard command node structures. */ static struct cmd_node auth_node = { - AUTH_NODE, "Password: ", + .name = "auth", + .node = AUTH_NODE, + .prompt = "Password: ", }; static struct cmd_node view_node = { - VIEW_NODE, "%s> ", + .name = "view", + .node = VIEW_NODE, + .prompt = "%s> ", + .node_exit = root_on_exit, }; static struct cmd_node auth_enable_node = { - AUTH_ENABLE_NODE, "Password: ", + .name = "auth enable", + .node = AUTH_ENABLE_NODE, + .prompt = "Password: ", }; static struct cmd_node enable_node = { - ENABLE_NODE, "%s# ", + .name = "enable", + .node = ENABLE_NODE, + .prompt = "%s# ", + .node_exit = root_on_exit, }; -static struct cmd_node config_node = {CONFIG_NODE, "%s(config)# ", 1}; - -static const struct facility_map { - int facility; - const char *name; - size_t match; -} syslog_facilities[] = { - {LOG_KERN, "kern", 1}, - {LOG_USER, "user", 2}, - {LOG_MAIL, "mail", 1}, - {LOG_DAEMON, "daemon", 1}, - {LOG_AUTH, "auth", 1}, - {LOG_SYSLOG, "syslog", 1}, - {LOG_LPR, "lpr", 2}, - {LOG_NEWS, "news", 1}, - {LOG_UUCP, "uucp", 2}, - {LOG_CRON, "cron", 1}, -#ifdef LOG_FTP - {LOG_FTP, "ftp", 1}, -#endif - {LOG_LOCAL0, "local0", 6}, - {LOG_LOCAL1, "local1", 6}, - {LOG_LOCAL2, "local2", 6}, - {LOG_LOCAL3, "local3", 6}, - {LOG_LOCAL4, "local4", 6}, - {LOG_LOCAL5, "local5", 6}, - {LOG_LOCAL6, "local6", 6}, - {LOG_LOCAL7, "local7", 6}, - {0, NULL, 0}, +static int config_write_host(struct vty *vty); +static struct cmd_node config_node = { + .name = "config", + .node = CONFIG_NODE, + .parent_node = ENABLE_NODE, + .prompt = "%s(config)# ", + .config_write = config_write_host, + .node_exit = vty_config_node_exit, }; -static const char *facility_name(int facility) -{ - const struct facility_map *fm; - - for (fm = syslog_facilities; fm->name; fm++) - if (fm->facility == facility) - return fm->name; - return ""; -} - -static int facility_match(const char *str) -{ - const struct facility_map *fm; - - for (fm = syslog_facilities; fm->name; fm++) - if (!strncmp(str, fm->name, fm->match)) - return fm->facility; - return -1; -} - -static int level_match(const char *s) -{ - int level; - - for (level = 0; zlog_priority[level] != NULL; level++) - if (!strncmp(s, zlog_priority[level], 2)) - return level; - return ZLOG_DISABLED; -} - /* This is called from main when a daemon is invoked with -v or --version. */ void print_version(const char *progname) { @@ -345,10 +227,9 @@ static bool cmd_hash_cmp(const void *a, const void *b) } /* Install top node of command vector. */ -void install_node(struct cmd_node *node, int (*func)(struct vty *)) +void install_node(struct cmd_node *node) { vector_set_index(cmdvec, node->node, node); - node->func = func; node->cmdgraph = graph_new(); node->cmd_vector = vector_init(VECTOR_MIN_SIZE); // add start node @@ -386,9 +267,9 @@ void install_element(enum node_type ntype, const struct cmd_element *cmd) if (cnode == NULL) { fprintf(stderr, "%s[%s]:\n" - "\tnode %d (%s) does not exist.\n" + "\tnode %d does not exist.\n" "\tplease call install_node() before install_element()\n", - cmd->name, cmd->string, ntype, node_names[ntype]); + cmd->name, cmd->string, ntype); exit(EXIT_FAILURE); } @@ -397,7 +278,7 @@ void install_element(enum node_type ntype, const struct cmd_element *cmd) "%s[%s]:\n" "\tnode %d (%s) already has this command installed.\n" "\tduplicate install_element call?\n", - cmd->name, cmd->string, ntype, node_names[ntype]); + cmd->name, cmd->string, ntype, cnode->name); return; } @@ -435,9 +316,9 @@ void uninstall_element(enum node_type ntype, const struct cmd_element *cmd) if (cnode == NULL) { fprintf(stderr, "%s[%s]:\n" - "\tnode %d (%s) does not exist.\n" + "\tnode %d does not exist.\n" "\tplease call install_node() before uninstall_element()\n", - cmd->name, cmd->string, ntype, node_names[ntype]); + cmd->name, cmd->string, ntype); exit(EXIT_FAILURE); } @@ -446,7 +327,7 @@ void uninstall_element(enum node_type ntype, const struct cmd_element *cmd) "%s[%s]:\n" "\tnode %d (%s) does not have this command installed.\n" "\tduplicate uninstall_element call?\n", - cmd->name, cmd->string, ntype, node_names[ntype]); + cmd->name, cmd->string, ntype, cnode->name); return; } @@ -486,13 +367,15 @@ static char *zencrypt(const char *passwd) gettimeofday(&tv, 0); - to64(&salt[0], random(), 3); + to64(&salt[0], frr_weak_random(), 3); to64(&salt[3], tv.tv_usec, 3); salt[5] = '\0'; return crypt(passwd, salt); } +static bool full_cli; + /* This function write configuration of this host. */ static int config_write_host(struct vty *vty) { @@ -508,7 +391,7 @@ static int config_write_host(struct vty *vty) * which would cause other daemons to then switch to syslog when they * parse frr.conf. */ - if (strcmp(zlog_default->protoname, "WATCHFRR")) { + if (full_cli) { if (host.encrypt) { if (host.password_encrypt) vty_out(vty, "password 8 %s\n", @@ -523,59 +406,7 @@ static int config_write_host(struct vty *vty) vty_out(vty, "enable password %s\n", host.enable); } - - if (host.logfile - && (zlog_default->maxlvl[ZLOG_DEST_FILE] - != ZLOG_DISABLED)) { - vty_out(vty, "log file %s", host.logfile); - if (zlog_default->maxlvl[ZLOG_DEST_FILE] - != zlog_default->default_lvl) - vty_out(vty, " %s", - zlog_priority - [zlog_default->maxlvl - [ZLOG_DEST_FILE]]); - vty_out(vty, "\n"); - } - - if (zlog_default->maxlvl[ZLOG_DEST_STDOUT] != ZLOG_DISABLED) { - vty_out(vty, "log stdout"); - if (zlog_default->maxlvl[ZLOG_DEST_STDOUT] - != zlog_default->default_lvl) - vty_out(vty, " %s", - zlog_priority - [zlog_default->maxlvl - [ZLOG_DEST_STDOUT]]); - vty_out(vty, "\n"); - } - - if (zlog_default->maxlvl[ZLOG_DEST_MONITOR] == ZLOG_DISABLED) - vty_out(vty, "no log monitor\n"); - else if (zlog_default->maxlvl[ZLOG_DEST_MONITOR] - != zlog_default->default_lvl) - vty_out(vty, "log monitor %s\n", - zlog_priority[zlog_default->maxlvl - [ZLOG_DEST_MONITOR]]); - - if (zlog_default->maxlvl[ZLOG_DEST_SYSLOG] != ZLOG_DISABLED) { - vty_out(vty, "log syslog"); - if (zlog_default->maxlvl[ZLOG_DEST_SYSLOG] - != zlog_default->default_lvl) - vty_out(vty, " %s", - zlog_priority[zlog_default->maxlvl - [ZLOG_DEST_SYSLOG]]); - vty_out(vty, "\n"); - } - - if (zlog_default->facility != LOG_DAEMON) - vty_out(vty, "log facility %s\n", - facility_name(zlog_default->facility)); - - if (zlog_default->record_priority == 1) - vty_out(vty, "log record-priority\n"); - - if (zlog_default->timestamp_precision > 0) - vty_out(vty, "log timestamp precision %d\n", - zlog_default->timestamp_precision); + log_config_write(vty); if (host.advanced) vty_out(vty, "service advanced-vty\n"); @@ -1448,90 +1279,25 @@ DEFUN (config_exit, return CMD_SUCCESS; } +static int root_on_exit(struct vty *vty) +{ + if (vty_shell(vty)) + exit(0); + else + vty->status = VTY_CLOSE; + return 0; +} + void cmd_exit(struct vty *vty) { - switch (vty->node) { - case VIEW_NODE: - case ENABLE_NODE: - if (vty_shell(vty)) - exit(0); - else - vty->status = VTY_CLOSE; - break; - case CONFIG_NODE: - vty->node = ENABLE_NODE; - vty_config_exit(vty); - break; - case INTERFACE_NODE: - case PW_NODE: - case VRF_NODE: - case NH_GROUP_NODE: - case ZEBRA_NODE: - case BGP_NODE: - case RIP_NODE: - case EIGRP_NODE: - case BABEL_NODE: - case RIPNG_NODE: - case OSPF_NODE: - case OSPF6_NODE: - case LDP_NODE: - case LDP_L2VPN_NODE: - case ISIS_NODE: - case OPENFABRIC_NODE: - case KEYCHAIN_NODE: - case RMAP_NODE: - case PBRMAP_NODE: - case VTY_NODE: - case BFD_NODE: - vty->node = CONFIG_NODE; - break; - case BGP_IPV4_NODE: - case BGP_IPV4M_NODE: - case BGP_IPV4L_NODE: - case BGP_VPNV4_NODE: - case BGP_VPNV6_NODE: - case BGP_FLOWSPECV4_NODE: - case BGP_FLOWSPECV6_NODE: - case BGP_VRF_POLICY_NODE: - case BGP_VNC_DEFAULTS_NODE: - case BGP_VNC_NVE_GROUP_NODE: - case BGP_VNC_L2_GROUP_NODE: - case BGP_IPV6_NODE: - case BGP_IPV6M_NODE: - case BGP_EVPN_NODE: - case BGP_IPV6L_NODE: - case BMP_NODE: - vty->node = BGP_NODE; - break; - case BGP_EVPN_VNI_NODE: - vty->node = BGP_EVPN_NODE; - break; - case LDP_IPV4_NODE: - case LDP_IPV6_NODE: - vty->node = LDP_NODE; - break; - case LDP_IPV4_IFACE_NODE: - vty->node = LDP_IPV4_NODE; - break; - case LDP_IPV6_IFACE_NODE: - vty->node = LDP_IPV6_NODE; - break; - case LDP_PSEUDOWIRE_NODE: - vty->node = LDP_L2VPN_NODE; - break; - case KEYCHAIN_KEY_NODE: - vty->node = KEYCHAIN_NODE; - break; - case LINK_PARAMS_NODE: - vty->node = INTERFACE_NODE; - break; - case BFD_PEER_NODE: - vty->node = BFD_NODE; - break; - default: - break; - } + struct cmd_node *cnode = vector_lookup(cmdvec, vty->node); + if (cnode->node_exit) { + if (!cnode->node_exit(vty)) + return; + } + if (cnode->parent_node) + vty->node = cnode->parent_node; if (vty->xpath_index > 0) vty->xpath_index--; } @@ -1556,7 +1322,6 @@ DEFUN (config_end, vty_config_exit(vty); vty->node = ENABLE_NODE; } - return CMD_SUCCESS; } @@ -1716,9 +1481,8 @@ static int vty_write_config(struct vty *vty) vty_out(vty, "!\n"); for (i = 0; i < vector_active(cmdvec); i++) - if ((node = vector_slot(cmdvec, i)) && node->func - && (node->vtysh || vty->type != VTY_SHELL)) { - if ((*node->func)(vty)) + if ((node = vector_slot(cmdvec, i)) && node->config_write) { + if ((*node->config_write)(vty)) vty_out(vty, "!\n"); } @@ -2273,7 +2037,8 @@ DEFUN (config_logmsg, int level; char *message; - if ((level = level_match(argv[idx_log_level]->arg)) == ZLOG_DISABLED) + level = log_level_match(argv[idx_log_level]->arg); + if (level == ZLOG_DISABLED) return CMD_ERR_NO_MATCH; zlog(level, "%s", @@ -2284,348 +2049,6 @@ DEFUN (config_logmsg, return CMD_SUCCESS; } -DEFUN (show_logging, - show_logging_cmd, - "show logging", - SHOW_STR - "Show current logging configuration\n") -{ - struct zlog *zl = zlog_default; - - vty_out(vty, "Syslog logging: "); - if (zl->maxlvl[ZLOG_DEST_SYSLOG] == ZLOG_DISABLED) - vty_out(vty, "disabled"); - else - vty_out(vty, "level %s, facility %s, ident %s", - zlog_priority[zl->maxlvl[ZLOG_DEST_SYSLOG]], - facility_name(zl->facility), zl->ident); - vty_out(vty, "\n"); - - vty_out(vty, "Stdout logging: "); - if (zl->maxlvl[ZLOG_DEST_STDOUT] == ZLOG_DISABLED) - vty_out(vty, "disabled"); - else - vty_out(vty, "level %s", - zlog_priority[zl->maxlvl[ZLOG_DEST_STDOUT]]); - vty_out(vty, "\n"); - - vty_out(vty, "Monitor logging: "); - if (zl->maxlvl[ZLOG_DEST_MONITOR] == ZLOG_DISABLED) - vty_out(vty, "disabled"); - else - vty_out(vty, "level %s", - zlog_priority[zl->maxlvl[ZLOG_DEST_MONITOR]]); - vty_out(vty, "\n"); - - vty_out(vty, "File logging: "); - if ((zl->maxlvl[ZLOG_DEST_FILE] == ZLOG_DISABLED) || !zl->fp) - vty_out(vty, "disabled"); - else - vty_out(vty, "level %s, filename %s", - zlog_priority[zl->maxlvl[ZLOG_DEST_FILE]], - zl->filename); - vty_out(vty, "\n"); - - vty_out(vty, "Protocol name: %s\n", zl->protoname); - vty_out(vty, "Record priority: %s\n", - (zl->record_priority ? "enabled" : "disabled")); - vty_out(vty, "Timestamp precision: %d\n", zl->timestamp_precision); - - return CMD_SUCCESS; -} - -DEFUN (config_log_stdout, - config_log_stdout_cmd, - "log stdout [<emergencies|alerts|critical|errors|warnings|notifications|informational|debugging>]", - "Logging control\n" - "Set stdout logging level\n" - LOG_LEVEL_DESC) -{ - int idx_log_level = 2; - - if (argc == idx_log_level) { - zlog_set_level(ZLOG_DEST_STDOUT, zlog_default->default_lvl); - return CMD_SUCCESS; - } - int level; - - if ((level = level_match(argv[idx_log_level]->arg)) == ZLOG_DISABLED) - return CMD_ERR_NO_MATCH; - zlog_set_level(ZLOG_DEST_STDOUT, level); - return CMD_SUCCESS; -} - -DEFUN (no_config_log_stdout, - no_config_log_stdout_cmd, - "no log stdout [<emergencies|alerts|critical|errors|warnings|notifications|informational|debugging>]", - NO_STR - "Logging control\n" - "Cancel logging to stdout\n" - LOG_LEVEL_DESC) -{ - zlog_set_level(ZLOG_DEST_STDOUT, ZLOG_DISABLED); - return CMD_SUCCESS; -} - -DEFUN (config_log_monitor, - config_log_monitor_cmd, - "log monitor [<emergencies|alerts|critical|errors|warnings|notifications|informational|debugging>]", - "Logging control\n" - "Set terminal line (monitor) logging level\n" - LOG_LEVEL_DESC) -{ - int idx_log_level = 2; - - if (argc == idx_log_level) { - zlog_set_level(ZLOG_DEST_MONITOR, zlog_default->default_lvl); - return CMD_SUCCESS; - } - int level; - - if ((level = level_match(argv[idx_log_level]->arg)) == ZLOG_DISABLED) - return CMD_ERR_NO_MATCH; - zlog_set_level(ZLOG_DEST_MONITOR, level); - return CMD_SUCCESS; -} - -DEFUN (no_config_log_monitor, - no_config_log_monitor_cmd, - "no log monitor [<emergencies|alerts|critical|errors|warnings|notifications|informational|debugging>]", - NO_STR - "Logging control\n" - "Disable terminal line (monitor) logging\n" - LOG_LEVEL_DESC) -{ - zlog_set_level(ZLOG_DEST_MONITOR, ZLOG_DISABLED); - return CMD_SUCCESS; -} - -static int set_log_file(struct vty *vty, const char *fname, int loglevel) -{ - int ret; - char *p = NULL; - const char *fullpath; - - /* Path detection. */ - if (!IS_DIRECTORY_SEP(*fname)) { - char cwd[MAXPATHLEN + 1]; - cwd[MAXPATHLEN] = '\0'; - - if (getcwd(cwd, MAXPATHLEN) == NULL) { - flog_err_sys(EC_LIB_SYSTEM_CALL, - "config_log_file: Unable to alloc mem!"); - return CMD_WARNING_CONFIG_FAILED; - } - - p = XMALLOC(MTYPE_TMP, strlen(cwd) + strlen(fname) + 2); - sprintf(p, "%s/%s", cwd, fname); - fullpath = p; - } else - fullpath = fname; - - ret = zlog_set_file(fullpath, loglevel); - - XFREE(MTYPE_TMP, p); - - if (!ret) { - if (vty) - vty_out(vty, "can't open logfile %s\n", fname); - return CMD_WARNING_CONFIG_FAILED; - } - - XFREE(MTYPE_HOST, host.logfile); - - host.logfile = XSTRDUP(MTYPE_HOST, fname); - -#if defined(HAVE_CUMULUS) - if (zlog_default->maxlvl[ZLOG_DEST_SYSLOG] != ZLOG_DISABLED) - zlog_set_level(ZLOG_DEST_SYSLOG, ZLOG_DISABLED); -#endif - return CMD_SUCCESS; -} - -void command_setup_early_logging(const char *dest, const char *level) -{ - char *token; - - if (level) { - int nlevel = level_match(level); - - if (nlevel != ZLOG_DISABLED) - zlog_default->default_lvl = nlevel; - } - - if (!dest) - return; - - if (strcmp(dest, "stdout") == 0) { - zlog_set_level(ZLOG_DEST_STDOUT, zlog_default->default_lvl); - return; - } - - if (strcmp(dest, "syslog") == 0) { - zlog_set_level(ZLOG_DEST_SYSLOG, zlog_default->default_lvl); - return; - } - - token = strstr(dest, ":"); - if (token == NULL) - return; - - token++; - - set_log_file(NULL, token, zlog_default->default_lvl); -} - -DEFUN (config_log_file, - config_log_file_cmd, - "log file FILENAME [<emergencies|alerts|critical|errors|warnings|notifications|informational|debugging>]", - "Logging control\n" - "Logging to file\n" - "Logging filename\n" - LOG_LEVEL_DESC) -{ - int idx_filename = 2; - int idx_log_levels = 3; - if (argc == 4) { - int level; - if ((level = level_match(argv[idx_log_levels]->arg)) - == ZLOG_DISABLED) - return CMD_ERR_NO_MATCH; - return set_log_file(vty, argv[idx_filename]->arg, level); - } else - return set_log_file(vty, argv[idx_filename]->arg, - zlog_default->default_lvl); -} - -static void disable_log_file(void) -{ - zlog_reset_file(); - - XFREE(MTYPE_HOST, host.logfile); -} - -DEFUN (no_config_log_file, - no_config_log_file_cmd, - "no log file [FILENAME [LEVEL]]", - NO_STR - "Logging control\n" - "Cancel logging to file\n" - "Logging file name\n" - "Logging level\n") -{ - disable_log_file(); - return CMD_SUCCESS; -} - -DEFUN (config_log_syslog, - config_log_syslog_cmd, - "log syslog [<emergencies|alerts|critical|errors|warnings|notifications|informational|debugging>]", - "Logging control\n" - "Set syslog logging level\n" - LOG_LEVEL_DESC) -{ - int idx_log_levels = 2; - - if (argc == 3) { - int level; - if ((level = level_match(argv[idx_log_levels]->arg)) - == ZLOG_DISABLED) - return CMD_ERR_NO_MATCH; - zlog_set_level(ZLOG_DEST_SYSLOG, level); - return CMD_SUCCESS; - } else { - zlog_set_level(ZLOG_DEST_SYSLOG, zlog_default->default_lvl); - return CMD_SUCCESS; - } -} - -DEFUN (no_config_log_syslog, - no_config_log_syslog_cmd, - "no log syslog [<kern|user|mail|daemon|auth|syslog|lpr|news|uucp|cron|local0|local1|local2|local3|local4|local5|local6|local7>] [<emergencies|alerts|critical|errors|warnings|notifications|informational|debugging>]", - NO_STR - "Logging control\n" - "Cancel logging to syslog\n" - LOG_FACILITY_DESC - LOG_LEVEL_DESC) -{ - zlog_set_level(ZLOG_DEST_SYSLOG, ZLOG_DISABLED); - return CMD_SUCCESS; -} - -DEFUN (config_log_facility, - config_log_facility_cmd, - "log facility <kern|user|mail|daemon|auth|syslog|lpr|news|uucp|cron|local0|local1|local2|local3|local4|local5|local6|local7>", - "Logging control\n" - "Facility parameter for syslog messages\n" - LOG_FACILITY_DESC) -{ - int idx_target = 2; - int facility = facility_match(argv[idx_target]->arg); - - zlog_default->facility = facility; - return CMD_SUCCESS; -} - -DEFUN (no_config_log_facility, - no_config_log_facility_cmd, - "no log facility [<kern|user|mail|daemon|auth|syslog|lpr|news|uucp|cron|local0|local1|local2|local3|local4|local5|local6|local7>]", - NO_STR - "Logging control\n" - "Reset syslog facility to default (daemon)\n" - LOG_FACILITY_DESC) -{ - zlog_default->facility = LOG_DAEMON; - return CMD_SUCCESS; -} - -DEFUN (config_log_record_priority, - config_log_record_priority_cmd, - "log record-priority", - "Logging control\n" - "Log the priority of the message within the message\n") -{ - zlog_default->record_priority = 1; - return CMD_SUCCESS; -} - -DEFUN (no_config_log_record_priority, - no_config_log_record_priority_cmd, - "no log record-priority", - NO_STR - "Logging control\n" - "Do not log the priority of the message within the message\n") -{ - zlog_default->record_priority = 0; - return CMD_SUCCESS; -} - -DEFUN (config_log_timestamp_precision, - config_log_timestamp_precision_cmd, - "log timestamp precision (0-6)", - "Logging control\n" - "Timestamp configuration\n" - "Set the timestamp precision\n" - "Number of subsecond digits\n") -{ - int idx_number = 3; - zlog_default->timestamp_precision = - strtoul(argv[idx_number]->arg, NULL, 10); - return CMD_SUCCESS; -} - -DEFUN (no_config_log_timestamp_precision, - no_config_log_timestamp_precision_cmd, - "no log timestamp precision", - NO_STR - "Logging control\n" - "Timestamp configuration\n" - "Reset the timestamp precision to the default value of 0\n") -{ - zlog_default->timestamp_precision = 0; - return CMD_SUCCESS; -} - DEFUN (debug_memstats, debug_memstats_cmd, "[no] debug memstats-at-exit", @@ -2800,7 +2223,7 @@ DEFUN(find, if (regexec(&exp, cli->string, 0, NULL, 0) == 0) vty_out(vty, " (%s) %s\n", - node_names[node->node], cli->string); + node->name, cli->string); } } @@ -2848,9 +2271,6 @@ void cmd_init(int terminal) { struct utsname names; - if (array_size(node_names) != NODE_TYPE_MAX) - assert(!"Update the CLI node description array!"); - uname(&names); qobj_init(); @@ -2875,7 +2295,6 @@ void cmd_init(int terminal) #endif host.password = NULL; host.enable = NULL; - host.logfile = NULL; host.config = NULL; host.noconfig = (terminal < 0); host.lines = -1; @@ -2883,11 +2302,11 @@ void cmd_init(int terminal) host.motdfile = NULL; /* Install top nodes. */ - install_node(&view_node, NULL); - install_node(&enable_node, NULL); - install_node(&auth_node, NULL); - install_node(&auth_enable_node, NULL); - install_node(&config_node, config_write_host); + install_node(&view_node); + install_node(&enable_node); + install_node(&auth_node); + install_node(&auth_enable_node); + install_node(&config_node); /* Each node's basic commands. */ install_element(VIEW_NODE, &show_version_cmd); @@ -2903,7 +2322,6 @@ void cmd_init(int terminal) install_element(VIEW_NODE, &config_enable_cmd); install_element(VIEW_NODE, &config_terminal_length_cmd); install_element(VIEW_NODE, &config_terminal_no_length_cmd); - install_element(VIEW_NODE, &show_logging_cmd); install_element(VIEW_NODE, &show_commandtree_cmd); install_element(VIEW_NODE, &echo_cmd); install_element(VIEW_NODE, &autocomplete_cmd); @@ -2930,6 +2348,8 @@ void cmd_init(int terminal) install_element(CONFIG_NODE, &no_domainname_cmd); if (terminal > 0) { + full_cli = true; + install_element(CONFIG_NODE, &debug_memstats_cmd); install_element(CONFIG_NODE, &password_cmd); @@ -2937,23 +2357,6 @@ void cmd_init(int terminal) install_element(CONFIG_NODE, &enable_password_cmd); install_element(CONFIG_NODE, &no_enable_password_cmd); - install_element(CONFIG_NODE, &config_log_stdout_cmd); - install_element(CONFIG_NODE, &no_config_log_stdout_cmd); - install_element(CONFIG_NODE, &config_log_monitor_cmd); - install_element(CONFIG_NODE, &no_config_log_monitor_cmd); - install_element(CONFIG_NODE, &config_log_file_cmd); - install_element(CONFIG_NODE, &no_config_log_file_cmd); - install_element(CONFIG_NODE, &config_log_syslog_cmd); - install_element(CONFIG_NODE, &no_config_log_syslog_cmd); - install_element(CONFIG_NODE, &config_log_facility_cmd); - install_element(CONFIG_NODE, &no_config_log_facility_cmd); - install_element(CONFIG_NODE, &config_log_record_priority_cmd); - install_element(CONFIG_NODE, - &no_config_log_record_priority_cmd); - install_element(CONFIG_NODE, - &config_log_timestamp_precision_cmd); - install_element(CONFIG_NODE, - &no_config_log_timestamp_precision_cmd); install_element(CONFIG_NODE, &service_password_encrypt_cmd); install_element(CONFIG_NODE, &no_service_password_encrypt_cmd); install_element(CONFIG_NODE, &banner_motd_default_cmd); @@ -2963,6 +2366,7 @@ void cmd_init(int terminal) install_element(CONFIG_NODE, &service_terminal_length_cmd); install_element(CONFIG_NODE, &no_service_terminal_length_cmd); + log_cmd_init(); vrf_install_commands(); } @@ -3000,7 +2404,6 @@ void cmd_terminate(void) XFREE(MTYPE_HOST, host.password_encrypt); XFREE(MTYPE_HOST, host.enable); XFREE(MTYPE_HOST, host.enable_encrypt); - XFREE(MTYPE_HOST, host.logfile); XFREE(MTYPE_HOST, host.motdfile); XFREE(MTYPE_HOST, host.config); XFREE(MTYPE_HOST, host.motd); diff --git a/lib/command.h b/lib/command.h index ea8a76a964..725a201446 100644 --- a/lib/command.h +++ b/lib/command.h @@ -66,9 +66,6 @@ struct host { /* System wide terminal lines. */ int lines; - /* Log filename. */ - char *logfile; - /* config file name of this host */ char *config; int noconfig; @@ -149,6 +146,7 @@ enum node_type { MPLS_NODE, /* MPLS config node */ PW_NODE, /* Pseudowire config node */ VTY_NODE, /* Vty node. */ + FPM_NODE, /* Dataplane FPM node. */ LINK_PARAMS_NODE, /* Link-parameters node */ BGP_EVPN_VNI_NODE, /* BGP EVPN VNI */ RPKI_NODE, /* RPKI node for configuration of RPKI cache server @@ -165,22 +163,29 @@ enum node_type { extern vector cmdvec; extern const struct message tokennames[]; -extern const char *const node_names[]; + +/* for external users depending on struct layout */ +#define FRR_CMD_NODE_20200416 /* Node which has some commands and prompt string and configuration function pointer . */ struct cmd_node { + const char *name; + /* Node index. */ enum node_type node; + enum node_type parent_node; /* Prompt character at vty interface. */ const char *prompt; - /* Is this node's configuration goes to vtysh ? */ - int vtysh; - /* Node's configuration write function */ - int (*func)(struct vty *); + int (*config_write)(struct vty *); + + /* called when leaving the node on a VTY session. + * return 1 if normal exit processing should happen, 0 to suppress + */ + int (*node_exit)(struct vty *); /* Node's command graph */ struct graph *cmdgraph; @@ -434,7 +439,7 @@ struct cmd_node { #define NO_GR_NEIGHBOR_HELPER_CMD "Undo Graceful Restart Helper command for a neighbor\n" /* Prototypes. */ -extern void install_node(struct cmd_node *node, int (*)(struct vty *)); +extern void install_node(struct cmd_node *node); extern void install_default(enum node_type); extern void install_element(enum node_type, const struct cmd_element *); diff --git a/lib/command_match.c b/lib/command_match.c index 0195aebc17..801b05f157 100644 --- a/lib/command_match.c +++ b/lib/command_match.c @@ -88,7 +88,7 @@ enum matcher_rv command_match(struct graph *cmdgraph, vector vline, // prepend a dummy token to match that pesky start node vector vvline = vector_init(vline->alloced + 1); - vector_set_index(vvline, 0, (void *)XSTRDUP(MTYPE_TMP, "dummy")); + vector_set_index(vvline, 0, XSTRDUP(MTYPE_TMP, "dummy")); memcpy(vvline->index + 1, vline->index, sizeof(void *) * vline->alloced); vvline->active = vline->active + 1; @@ -1,5 +1,5 @@ /* CSV - * Copyright (C) 2013 Cumulus Networks, Inc. + * Copyright (C) 2013,2020 Cumulus Networks, Inc. * * This file is part of Quagga. * @@ -22,6 +22,8 @@ #include "config.h" #endif +#include <zebra.h> + #include <stdio.h> #include <stdlib.h> #include <string.h> @@ -419,7 +421,7 @@ void csv_clone_record(csv_t *csv, csv_record_t *in_rec, csv_record_t **out_rec) } rec->record = curr; rec->rec_len = in_rec->rec_len; - strcpy(rec->record, in_rec->record); + strlcpy(rec->record, in_rec->record, csv->buflen); /* decode record into fields */ csv_decode_record(rec); diff --git a/lib/defaults.c b/lib/defaults.c index 71ccc73cc6..7466aad5b1 100644 --- a/lib/defaults.c +++ b/lib/defaults.c @@ -100,10 +100,10 @@ static bool frr_match_version(const char *name, const char *vspec, const char *version, bool check) { int cmp; - static struct spec { + static const struct spec { const char *str; - bool dir, eq; - } *s, specs[] = { + int dir, eq; + } specs[] = { {"<=", -1, 1}, {">=", 1, 1}, {"==", 0, 1}, @@ -112,6 +112,7 @@ static bool frr_match_version(const char *name, const char *vspec, {"=", 0, 1}, {NULL, 0, 0}, }; + const struct spec *s; if (!vspec) /* NULL = all versions */ diff --git a/lib/filter.c b/lib/filter.c index 3226fb2f5e..da02a77763 100644 --- a/lib/filter.c +++ b/lib/filter.c @@ -2294,6 +2294,7 @@ DEFUN (ipv6_access_list_exact, if (argv_find(argv, argc, "exact-match", &idx)) exact = 1; + assert(prefix); return filter_set_zebra(vty, argv[idx_word]->arg, seq, permit_deny, AFI_IP6, prefix, exact, 1); } @@ -2812,9 +2813,13 @@ static int config_write_access(struct vty *vty, afi_t afi) return write; } +static int config_write_access_mac(struct vty *vty); static struct cmd_node access_mac_node = { - ACCESS_MAC_NODE, "", /* Access list has no interface. */ - 1}; + .name = "MAC access list", + .node = ACCESS_MAC_NODE, + .prompt = "", + .config_write = config_write_access_mac, +}; static int config_write_access_mac(struct vty *vty) { @@ -2850,7 +2855,7 @@ static void access_list_reset_mac(void) /* Install vty related command. */ static void access_list_init_mac(void) { - install_node(&access_mac_node, config_write_access_mac); + install_node(&access_mac_node); install_element(ENABLE_NODE, &show_mac_access_list_cmd); install_element(ENABLE_NODE, &show_mac_access_list_name_cmd); @@ -2863,9 +2868,13 @@ static void access_list_init_mac(void) } /* Access-list node. */ -static struct cmd_node access_node = {ACCESS_NODE, - "", /* Access list has no interface. */ - 1}; +static int config_write_access_ipv4(struct vty *vty); +static struct cmd_node access_node = { + .name = "ipv4 access list", + .node = ACCESS_NODE, + .prompt = "", + .config_write = config_write_access_ipv4, +}; static int config_write_access_ipv4(struct vty *vty) { @@ -2901,7 +2910,7 @@ static void access_list_reset_ipv4(void) /* Install vty related command. */ static void access_list_init_ipv4(void) { - install_node(&access_node, config_write_access_ipv4); + install_node(&access_node); install_element(ENABLE_NODE, &show_ip_access_list_cmd); install_element(ENABLE_NODE, &show_ip_access_list_name_cmd); @@ -2948,7 +2957,13 @@ static void access_list_init_ipv4(void) install_element(CONFIG_NODE, &no_access_list_remark_comment_cmd); } -static struct cmd_node access_ipv6_node = {ACCESS_IPV6_NODE, "", 1}; +static int config_write_access_ipv6(struct vty *vty); +static struct cmd_node access_ipv6_node = { + .name = "ipv6 access list", + .node = ACCESS_IPV6_NODE, + .prompt = "", + .config_write = config_write_access_ipv6, +}; static int config_write_access_ipv6(struct vty *vty) { @@ -2983,7 +2998,7 @@ static void access_list_reset_ipv6(void) static void access_list_init_ipv6(void) { - install_node(&access_ipv6_node, config_write_access_ipv6); + install_node(&access_ipv6_node); install_element(ENABLE_NODE, &show_ipv6_access_list_cmd); install_element(ENABLE_NODE, &show_ipv6_access_list_name_cmd); diff --git a/lib/frr_pthread.c b/lib/frr_pthread.c index 55f0b55ed6..e237934f81 100644 --- a/lib/frr_pthread.c +++ b/lib/frr_pthread.c @@ -27,6 +27,7 @@ #include "frr_pthread.h" #include "memory.h" #include "linklist.h" +#include "zlog.h" DEFINE_MTYPE_STATIC(LIB, FRR_PTHREAD, "FRR POSIX Thread") DEFINE_MTYPE_STATIC(LIB, PTHREAD_PRIM, "POSIX sync primitives") @@ -273,6 +274,8 @@ static void *fpt_run(void *arg) struct frr_pthread *fpt = arg; fpt->master->owner = pthread_self(); + zlog_tls_buffer_init(); + int sleeper[2]; pipe(sleeper); thread_add_read(fpt->master, &fpt_dummy, NULL, sleeper[0], NULL); @@ -294,5 +297,7 @@ static void *fpt_run(void *arg) close(sleeper[1]); close(sleeper[0]); + zlog_tls_buffer_fini(); + return NULL; } diff --git a/lib/frrlua.c b/lib/frrlua.c index 26610556dc..9f9cf8c1f6 100644 --- a/lib/frrlua.c +++ b/lib/frrlua.c @@ -5,7 +5,7 @@ * Copyright (C) 2016 Cumulus Networks, Inc. * Donald Sharp * - * This file is part of FreeRangeRouting (FRR). + * This file is part of FRRouting (FRR). * * FRR is free software; you can redistribute it and/or modify it under the * terms of the GNU General Public License as published by the Free Software diff --git a/lib/frrlua.h b/lib/frrlua.h index 374eb70311..40c7a67b89 100644 --- a/lib/frrlua.h +++ b/lib/frrlua.h @@ -5,7 +5,7 @@ * Copyright (C) 2016 Cumulus Networks, Inc. * Donald Sharp * - * This file is part of FreeRangeRouting (FRR). + * This file is part of FRRouting (FRR). * * FRR is free software; you can redistribute it and/or modify it under the * terms of the GNU General Public License as published by the Free Software diff --git a/lib/grammar_sandbox.c b/lib/grammar_sandbox.c index 8ccdbfcbc1..a40b815caa 100644 --- a/lib/grammar_sandbox.c +++ b/lib/grammar_sandbox.c @@ -399,7 +399,7 @@ DEFUN (grammar_findambig, if (!nodegraph) continue; vty_out(vty, "scanning node %d (%s)\n", scannode - 1, - node_names[scannode - 1]); + cnode->name); } commands = cmd_graph_permutations(nodegraph); diff --git a/lib/grammar_sandbox_main.c b/lib/grammar_sandbox_main.c index 4bd8f5138a..fbb97d2dd5 100644 --- a/lib/grammar_sandbox_main.c +++ b/lib/grammar_sandbox_main.c @@ -7,7 +7,7 @@ * Copyright (C) 2016 Cumulus Networks, Inc. * Copyright (C) 2017 David Lamparter for NetDEF, Inc. * - * This file is part of FreeRangeRouting (FRR). + * This file is part of FRRouting (FRR). * * FRR is free software; you can redistribute it and/or modify it under the * terms of the GNU General Public License as published by the Free Software @@ -45,11 +45,7 @@ int main(int argc, char **argv) master = thread_master_create(NULL); - openzlog("grammar_sandbox", "NONE", 0, LOG_CONS | LOG_NDELAY | LOG_PID, - LOG_DAEMON); - zlog_set_level(ZLOG_DEST_SYSLOG, ZLOG_DISABLED); - zlog_set_level(ZLOG_DEST_STDOUT, LOG_DEBUG); - zlog_set_level(ZLOG_DEST_MONITOR, ZLOG_DISABLED); + zlog_aux_init("NONE: ", LOG_DEBUG); /* Library inits. */ cmd_init(1); @@ -58,7 +54,7 @@ int main(int argc, char **argv) vty_init(master, true); lib_cmd_init(); - yang_init(); + yang_init(true); nb_init(master, NULL, 0); vty_stdio(vty_do_exit); diff --git a/lib/hook.c b/lib/hook.c index 870d158aac..5a8ad00d66 100644 --- a/lib/hook.c +++ b/lib/hook.c @@ -18,16 +18,26 @@ #include "config.h" #endif +#include <string.h> + #include "memory.h" #include "hook.h" DEFINE_MTYPE_STATIC(LIB, HOOK_ENTRY, "Hook entry") -void _hook_register(struct hook *hook, void *funcptr, void *arg, bool has_arg, - struct frrmod_runtime *module, const char *funcname, - int priority) +void _hook_register(struct hook *hook, struct hookent *stackent, void *funcptr, + void *arg, bool has_arg, struct frrmod_runtime *module, + const char *funcname, int priority) { - struct hookent *he = XCALLOC(MTYPE_HOOK_ENTRY, sizeof(*he)), **pos; + struct hookent *he, **pos; + + if (!stackent->ent_used) + he = stackent; + else { + he = XCALLOC(MTYPE_HOOK_ENTRY, sizeof(*he)); + he->ent_on_heap = true; + } + he->ent_used = true; he->hookfn = funcptr; he->hookarg = arg; he->has_arg = has_arg; @@ -52,7 +62,10 @@ void _hook_unregister(struct hook *hook, void *funcptr, void *arg, bool has_arg) if (he->hookfn == funcptr && he->hookarg == arg && he->has_arg == has_arg) { *prev = he->next; - XFREE(MTYPE_HOOK_ENTRY, he); + if (he->ent_on_heap) + XFREE(MTYPE_HOOK_ENTRY, he); + else + memset(he, 0, sizeof(*he)); break; } } diff --git a/lib/hook.h b/lib/hook.h index f7fb7b8a5c..3823cebe6a 100644 --- a/lib/hook.h +++ b/lib/hook.h @@ -114,7 +114,9 @@ struct hookent { struct hookent *next; void *hookfn; /* actually a function pointer */ void *hookarg; - bool has_arg; + bool has_arg : 1; + bool ent_on_heap : 1; + bool ent_used : 1; int priority; struct frrmod_runtime *module; const char *fnname; @@ -133,21 +135,33 @@ struct hook { * always use hook_register(), which uses the static inline helper from * DECLARE_HOOK in order to get type safety */ -extern void _hook_register(struct hook *hook, void *funcptr, void *arg, - bool has_arg, struct frrmod_runtime *module, +extern void _hook_register(struct hook *hook, struct hookent *stackent, + void *funcptr, void *arg, bool has_arg, + struct frrmod_runtime *module, const char *funcname, int priority); + +/* most hook_register calls are not in a loop or similar and can use a + * statically allocated "struct hookent" from the data segment + */ +#define _hook_reg_svar(hook, funcptr, arg, has_arg, module, funcname, prio) \ + do { \ + static struct hookent stack_hookent = { .ent_on_heap = 0, }; \ + _hook_register(hook, &stack_hookent, funcptr, arg, has_arg, \ + module, funcname, prio); \ + } while (0) + #define hook_register(hookname, func) \ - _hook_register(&_hook_##hookname, _hook_typecheck_##hookname(func), \ + _hook_reg_svar(&_hook_##hookname, _hook_typecheck_##hookname(func), \ NULL, false, THIS_MODULE, #func, HOOK_DEFAULT_PRIORITY) #define hook_register_arg(hookname, func, arg) \ - _hook_register(&_hook_##hookname, \ + _hook_reg_svar(&_hook_##hookname, \ _hook_typecheck_arg_##hookname(func), arg, true, \ THIS_MODULE, #func, HOOK_DEFAULT_PRIORITY) #define hook_register_prio(hookname, prio, func) \ - _hook_register(&_hook_##hookname, _hook_typecheck_##hookname(func), \ + _hook_reg_svar(&_hook_##hookname, _hook_typecheck_##hookname(func), \ NULL, false, THIS_MODULE, #func, prio) #define hook_register_arg_prio(hookname, prio, func, arg) \ - _hook_register(&_hook_##hookname, \ + _hook_reg_svar(&_hook_##hookname, \ _hook_typecheck_arg_##hookname(func), arg, true, \ THIS_MODULE, #func, prio) @@ -582,23 +582,39 @@ struct interface *if_get_by_ifindex(ifindex_t ifindex, vrf_id_t vrf_id) return NULL; } -void if_set_index(struct interface *ifp, ifindex_t ifindex) +int if_set_index(struct interface *ifp, ifindex_t ifindex) { struct vrf *vrf; + if (ifp->ifindex == ifindex) + return 0; + vrf = vrf_get(ifp->vrf_id, NULL); assert(vrf); - if (ifp->ifindex == ifindex) - return; + /* + * If there is already an interface with this ifindex, we will collide + * on insertion, so don't even try. + */ + if (if_lookup_by_ifindex(ifindex, ifp->vrf_id)) + return -1; if (ifp->ifindex != IFINDEX_INTERNAL) IFINDEX_RB_REMOVE(vrf, ifp); ifp->ifindex = ifindex; - if (ifp->ifindex != IFINDEX_INTERNAL) - IFINDEX_RB_INSERT(vrf, ifp) + if (ifp->ifindex != IFINDEX_INTERNAL) { + /* + * This should never happen, since we checked if there was + * already an interface with the desired ifindex at the top of + * the function. Nevertheless. + */ + if (IFINDEX_RB_INSERT(vrf, ifp)) + return -1; + } + + return 0; } void if_set_name(struct interface *ifp, const char *name) @@ -1249,8 +1265,6 @@ struct if_link_params *if_link_params_get(struct interface *ifp) struct if_link_params *iflp = XCALLOC(MTYPE_IF_LINK_PARAMS, sizeof(struct if_link_params)); - if (iflp == NULL) - return NULL; /* Set TE metric equal to standard metric */ iflp->te_metric = ifp->metric; @@ -1278,8 +1292,6 @@ struct if_link_params *if_link_params_get(struct interface *ifp) void if_link_params_free(struct interface *ifp) { - if (ifp->link_params == NULL) - return; XFREE(MTYPE_IF_LINK_PARAMS, ifp->link_params); } @@ -1655,33 +1667,98 @@ static int lib_interface_description_destroy(enum nb_event event, return NB_OK; } -/* clang-format off */ +/* + * XPath: /frr-interface:lib/interface/state/if-index + */ +struct yang_data *lib_interface_state_if_index_get_elem(const char *xpath, + const void *list_entry) +{ + const struct interface *ifp = list_entry; + + return yang_data_new_int32(xpath, ifp->ifindex); +} + +/* + * XPath: /frr-interface:lib/interface/state/mtu + */ +struct yang_data *lib_interface_state_mtu_get_elem(const char *xpath, + const void *list_entry) +{ + const struct interface *ifp = list_entry; -#if defined(__GNUC__) && ((__GNUC__ - 0) < 5) && !defined(__clang__) -/* gcc versions before 5.x miscalculate the size for structs with variable - * length arrays (they just count it as size 0) + return yang_data_new_uint16(xpath, ifp->mtu); +} + +/* + * XPath: /frr-interface:lib/interface/state/mtu6 + */ +struct yang_data *lib_interface_state_mtu6_get_elem(const char *xpath, + const void *list_entry) +{ + const struct interface *ifp = list_entry; + + return yang_data_new_uint32(xpath, ifp->mtu6); +} + +/* + * XPath: /frr-interface:lib/interface/state/speed */ -struct frr_yang_module_info_size3 { - /* YANG module name. */ - const char *name; +struct yang_data *lib_interface_state_speed_get_elem(const char *xpath, + const void *list_entry) +{ + const struct interface *ifp = list_entry; - /* Northbound callbacks. */ - const struct { - /* Data path of this YANG node. */ - const char *xpath; + return yang_data_new_uint32(xpath, ifp->speed); +} - /* Callbacks implemented for this node. */ - struct nb_callbacks cbs; +/* + * XPath: /frr-interface:lib/interface/state/metric + */ +struct yang_data *lib_interface_state_metric_get_elem(const char *xpath, + const void *list_entry) +{ + const struct interface *ifp = list_entry; - /* Priority - lower priorities are processed first. */ - uint32_t priority; - } nodes[3]; -}; + return yang_data_new_uint32(xpath, ifp->metric); +} -const struct frr_yang_module_info_size3 frr_interface_info_size3 asm("frr_interface_info") = { -#else +/* + * XPath: /frr-interface:lib/interface/state/flags + */ +struct yang_data *lib_interface_state_flags_get_elem(const char *xpath, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-interface:lib/interface/state/type + */ +struct yang_data *lib_interface_state_type_get_elem(const char *xpath, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-interface:lib/interface/state/phy-address + */ +struct yang_data * +lib_interface_state_phy_address_get_elem(const char *xpath, + const void *list_entry) +{ + const struct interface *ifp = list_entry; + struct ethaddr macaddr; + + memcpy(&macaddr.octet, ifp->hw_addr, ETH_ALEN); + + return yang_data_new_mac(xpath, &macaddr); +} + +/* clang-format off */ const struct frr_yang_module_info frr_interface_info = { -#endif .name = "frr-interface", .nodes = { { @@ -1704,6 +1781,54 @@ const struct frr_yang_module_info frr_interface_info = { }, }, { + .xpath = "/frr-interface:lib/interface/state/if-index", + .cbs = { + .get_elem = lib_interface_state_if_index_get_elem, + } + }, + { + .xpath = "/frr-interface:lib/interface/state/mtu", + .cbs = { + .get_elem = lib_interface_state_mtu_get_elem, + } + }, + { + .xpath = "/frr-interface:lib/interface/state/mtu6", + .cbs = { + .get_elem = lib_interface_state_mtu6_get_elem, + } + }, + { + .xpath = "/frr-interface:lib/interface/state/speed", + .cbs = { + .get_elem = lib_interface_state_speed_get_elem, + } + }, + { + .xpath = "/frr-interface:lib/interface/state/metric", + .cbs = { + .get_elem = lib_interface_state_metric_get_elem, + } + }, + { + .xpath = "/frr-interface:lib/interface/state/flags", + .cbs = { + .get_elem = lib_interface_state_flags_get_elem, + } + }, + { + .xpath = "/frr-interface:lib/interface/state/type", + .cbs = { + .get_elem = lib_interface_state_type_get_elem, + } + }, + { + .xpath = "/frr-interface:lib/interface/state/phy-address", + .cbs = { + .get_elem = lib_interface_state_phy_address_get_elem, + } + }, + { .xpath = NULL, }, } @@ -308,33 +308,58 @@ RB_HEAD(if_index_head, interface); RB_PROTOTYPE(if_index_head, interface, index_entry, if_cmp_index_func) DECLARE_QOBJ_TYPE(interface) -#define IFNAME_RB_INSERT(vrf, ifp) \ - if (RB_INSERT(if_name_head, &vrf->ifaces_by_name, (ifp))) \ - flog_err(EC_LIB_INTERFACE, \ - "%s(%s): corruption detected -- interface with this " \ - "name exists already in VRF %u!", \ - __func__, (ifp)->name, (ifp)->vrf_id); - -#define IFNAME_RB_REMOVE(vrf, ifp) \ - if (RB_REMOVE(if_name_head, &vrf->ifaces_by_name, (ifp)) == NULL) \ - flog_err(EC_LIB_INTERFACE, \ - "%s(%s): corruption detected -- interface with this " \ - "name doesn't exist in VRF %u!", \ - __func__, (ifp)->name, (ifp)->vrf_id); - -#define IFINDEX_RB_INSERT(vrf, ifp) \ - if (RB_INSERT(if_index_head, &vrf->ifaces_by_index, (ifp))) \ - flog_err(EC_LIB_INTERFACE, \ - "%s(%u): corruption detected -- interface with this " \ - "ifindex exists already in VRF %u!", \ - __func__, (ifp)->ifindex, (ifp)->vrf_id); - -#define IFINDEX_RB_REMOVE(vrf, ifp) \ - if (RB_REMOVE(if_index_head, &vrf->ifaces_by_index, (ifp)) == NULL) \ - flog_err(EC_LIB_INTERFACE, \ - "%s(%u): corruption detected -- interface with this " \ - "ifindex doesn't exist in VRF %u!", \ - __func__, (ifp)->ifindex, (ifp)->vrf_id); +#define IFNAME_RB_INSERT(vrf, ifp) \ + ({ \ + struct interface *_iz = \ + RB_INSERT(if_name_head, &vrf->ifaces_by_name, (ifp)); \ + if (_iz) \ + flog_err( \ + EC_LIB_INTERFACE, \ + "%s(%s): corruption detected -- interface with this " \ + "name exists already in VRF %u!", \ + __func__, (ifp)->name, (ifp)->vrf_id); \ + _iz; \ + }) + +#define IFNAME_RB_REMOVE(vrf, ifp) \ + ({ \ + struct interface *_iz = \ + RB_REMOVE(if_name_head, &vrf->ifaces_by_name, (ifp)); \ + if (_iz == NULL) \ + flog_err( \ + EC_LIB_INTERFACE, \ + "%s(%s): corruption detected -- interface with this " \ + "name doesn't exist in VRF %u!", \ + __func__, (ifp)->name, (ifp)->vrf_id); \ + _iz; \ + }) + + +#define IFINDEX_RB_INSERT(vrf, ifp) \ + ({ \ + struct interface *_iz = RB_INSERT( \ + if_index_head, &vrf->ifaces_by_index, (ifp)); \ + if (_iz) \ + flog_err( \ + EC_LIB_INTERFACE, \ + "%s(%u): corruption detected -- interface with this " \ + "ifindex exists already in VRF %u!", \ + __func__, (ifp)->ifindex, (ifp)->vrf_id); \ + _iz; \ + }) + +#define IFINDEX_RB_REMOVE(vrf, ifp) \ + ({ \ + struct interface *_iz = RB_REMOVE( \ + if_index_head, &vrf->ifaces_by_index, (ifp)); \ + if (_iz == NULL) \ + flog_err( \ + EC_LIB_INTERFACE, \ + "%s(%u): corruption detected -- interface with this " \ + "ifindex doesn't exist in VRF %u!", \ + __func__, (ifp)->ifindex, (ifp)->vrf_id); \ + _iz; \ + }) #define FOR_ALL_INTERFACES(vrf, ifp) \ if (vrf) \ @@ -502,7 +527,7 @@ extern struct interface *if_get_by_name(const char *ifname, vrf_id_t vrf_id); extern struct interface *if_get_by_ifindex(ifindex_t ifindex, vrf_id_t vrf_id); /* Sets the index and adds to index list */ -extern void if_set_index(struct interface *ifp, ifindex_t ifindex); +extern int if_set_index(struct interface *ifp, ifindex_t ifindex); /* Sets the name and adds to name list */ extern void if_set_name(struct interface *ifp, const char *name); @@ -577,6 +602,24 @@ extern void if_destroy_via_zapi(struct interface *ifp); extern const struct frr_yang_module_info frr_interface_info; +struct yang_data *lib_interface_state_if_index_get_elem(const char *xpath, + const void *list_entry); +struct yang_data *lib_interface_state_mtu_get_elem(const char *xpath, + const void *list_entry); +struct yang_data *lib_interface_state_mtu6_get_elem(const char *xpath, + const void *list_entry); +struct yang_data *lib_interface_state_speed_get_elem(const char *xpath, + const void *list_entry); +struct yang_data *lib_interface_state_metric_get_elem(const char *xpath, + const void *list_entry); +struct yang_data *lib_interface_state_flags_get_elem(const char *xpath, + const void *list_entry); +struct yang_data *lib_interface_state_type_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_interface_state_phy_address_get_elem(const char *xpath, + const void *list_entry); + #ifdef __cplusplus } #endif diff --git a/lib/ipaddr.h b/lib/ipaddr.h index c6372f1abb..cd7f79a04e 100644 --- a/lib/ipaddr.h +++ b/lib/ipaddr.h @@ -112,7 +112,7 @@ static inline void ipv4_to_ipv4_mapped_ipv6(struct in6_addr *in6, /* * convert an ipv4 mapped ipv6 address back to ipv4 address */ -static inline void ipv4_mapped_ipv6_to_ipv4(struct in6_addr *in6, +static inline void ipv4_mapped_ipv6_to_ipv4(const struct in6_addr *in6, struct in_addr *in) { memset(in, 0, sizeof(struct in_addr)); diff --git a/lib/json.c b/lib/json.c index 991240639a..6bea3982e3 100644 --- a/lib/json.c +++ b/lib/json.c @@ -50,6 +50,11 @@ void json_object_int_add(struct json_object *obj, const char *key, int64_t i) json_object_object_add(obj, key, json_object_new_int64(i)); } +void json_object_double_add(struct json_object *obj, const char *key, double i) +{ + json_object_object_add(obj, key, json_object_new_double(i)); +} + void json_object_boolean_false_add(struct json_object *obj, const char *key) { json_object_object_add(obj, key, json_object_new_boolean(0)); diff --git a/lib/json.h b/lib/json.h index c8866c524a..afe0b175da 100644 --- a/lib/json.h +++ b/lib/json.h @@ -48,6 +48,9 @@ extern void json_object_int_add(struct json_object *obj, const char *key, int64_t i); void json_object_boolean_add(struct json_object *obj, const char *key, bool val); + +extern void json_object_double_add(struct json_object *obj, const char *key, + double i); extern void json_object_boolean_false_add(struct json_object *obj, const char *key); extern void json_object_boolean_true_add(struct json_object *obj, diff --git a/lib/keychain.c b/lib/keychain.c index ea512a2699..251211734b 100644 --- a/lib/keychain.c +++ b/lib/keychain.c @@ -959,11 +959,21 @@ DEFUN (no_send_lifetime, return CMD_SUCCESS; } -static struct cmd_node keychain_node = {KEYCHAIN_NODE, "%s(config-keychain)# ", - 1}; - -static struct cmd_node keychain_key_node = {KEYCHAIN_KEY_NODE, - "%s(config-keychain-key)# ", 1}; +static int keychain_config_write(struct vty *vty); +static struct cmd_node keychain_node = { + .name = "keychain", + .node = KEYCHAIN_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-keychain)# ", + .config_write = keychain_config_write, +}; + +static struct cmd_node keychain_key_node = { + .name = "keychain key", + .node = KEYCHAIN_KEY_NODE, + .parent_node = KEYCHAIN_NODE, + .prompt = "%s(config-keychain-key)# ", +}; static int keychain_strftime(char *buf, int bufsiz, time_t *time) { @@ -1042,8 +1052,8 @@ void keychain_init(void) { keychain_list = list_new(); - install_node(&keychain_node, keychain_config_write); - install_node(&keychain_key_node, NULL); + install_node(&keychain_node); + install_node(&keychain_key_node); install_default(KEYCHAIN_NODE); install_default(KEYCHAIN_KEY_NODE); diff --git a/lib/libfrr.c b/lib/libfrr.c index 3622890e46..ac165f254e 100644 --- a/lib/libfrr.c +++ b/lib/libfrr.c @@ -33,7 +33,6 @@ #include "lib_vty.h" #include "log_vty.h" #include "zclient.h" -#include "log_int.h" #include "module.h" #include "network.h" #include "lib_errors.h" @@ -630,6 +629,7 @@ struct thread_master *frr_init(void) { struct option_chain *oc; struct frrmod_runtime *module; + struct zprivs_ids_t ids; char moderr[256]; char p_instance[16] = "", p_pathspace[256] = ""; const char *dir; @@ -657,9 +657,11 @@ struct thread_master *frr_init(void) #endif zprivs_preinit(di->privs); + zprivs_get_ids(&ids); - openzlog(di->progname, di->logname, di->instance, - LOG_CONS | LOG_NDELAY | LOG_PID, LOG_DAEMON); + zlog_init(di->progname, di->logname, di->instance, + ids.uid_normal, ids.gid_normal); + zlog_tls_buffer_init(); command_setup_early_logging(di->early_logging, di->early_loglevel); @@ -709,7 +711,6 @@ struct thread_master *frr_init(void) vty_init(master, di->log_always); lib_cmd_init(); - log_filter_cmd_init(); frr_pthread_init(); @@ -717,7 +718,7 @@ struct thread_master *frr_init(void) log_ref_vty_init(); lib_error_init(); - yang_init(); + yang_init(true); debug_init_cli(); @@ -1086,7 +1087,7 @@ void frr_run(struct thread_master *master) } /* end fixed stderr startup logging */ - zlog_startup_stderr = false; + zlog_startup_end(); struct thread thread; while (thread_fetch(master, &thread)) @@ -1119,7 +1120,8 @@ void frr_fini(void) /* signal_init -> nothing needed */ thread_master_free(master); master = NULL; - closezlog(); + zlog_tls_buffer_fini(); + zlog_fini(); /* frrmod_init -> nothing needed / hooks */ rcu_shutdown(); @@ -25,7 +25,6 @@ #include "zclient.h" #include "log.h" -#include "log_int.h" #include "memory.h" #include "command.h" #include "lib_errors.h" @@ -33,154 +32,12 @@ #include "printfrr.h" #include "frr_pthread.h" -#ifndef SUNOS_5 -#include <sys/un.h> -#endif -/* for printstack on solaris */ -#ifdef HAVE_UCONTEXT_H -#include <ucontext.h> -#endif - #ifdef HAVE_LIBUNWIND #define UNW_LOCAL_ONLY #include <libunwind.h> #include <dlfcn.h> #endif -DEFINE_MTYPE_STATIC(LIB, ZLOG, "Logging") - -/* hook for external logging */ -DEFINE_HOOK(zebra_ext_log, (int priority, const char *format, va_list args), - (priority, format, args)); - -static int logfile_fd = -1; /* Used in signal handler. */ - -struct zlog *zlog_default = NULL; -bool zlog_startup_stderr = true; - -/* lock protecting zlog_default for mt-safe zlog */ -static pthread_mutex_t loglock = PTHREAD_MUTEX_INITIALIZER; - -const char *zlog_priority[] = { - "emergencies", "alerts", "critical", "errors", "warnings", - "notifications", "informational", "debugging", NULL, -}; - -static char zlog_filters[ZLOG_FILTERS_MAX][ZLOG_FILTER_LENGTH_MAX + 1]; -static uint8_t zlog_filter_count; - -/* - * look for a match on the filter in the current filters, loglock must be held - */ -static int zlog_filter_lookup(const char *lookup) -{ - for (int i = 0; i < zlog_filter_count; i++) { - if (strncmp(lookup, zlog_filters[i], sizeof(zlog_filters[0])) - == 0) - return i; - } - return -1; -} - -void zlog_filter_clear(void) -{ - frr_with_mutex(&loglock) { - zlog_filter_count = 0; - } -} - -int zlog_filter_add(const char *filter) -{ - frr_with_mutex(&loglock) { - if (zlog_filter_count >= ZLOG_FILTERS_MAX) - return 1; - - if (zlog_filter_lookup(filter) != -1) - /* Filter already present */ - return -1; - - strlcpy(zlog_filters[zlog_filter_count], filter, - sizeof(zlog_filters[0])); - - if (zlog_filters[zlog_filter_count][0] == '\0') - /* Filter was either empty or didn't get copied - * correctly - */ - return -1; - - zlog_filter_count++; - } - return 0; -} - -int zlog_filter_del(const char *filter) -{ - frr_with_mutex(&loglock) { - int found_idx = zlog_filter_lookup(filter); - int last_idx = zlog_filter_count - 1; - - if (found_idx == -1) - /* Didn't find the filter to delete */ - return -1; - - /* Adjust the filter array */ - memmove(zlog_filters[found_idx], zlog_filters[found_idx + 1], - (last_idx - found_idx) * sizeof(zlog_filters[0])); - - zlog_filter_count--; - } - return 0; -} - -/* Dump all filters to buffer, delimited by new line */ -int zlog_filter_dump(char *buf, size_t max_size) -{ - int len = 0; - - frr_with_mutex(&loglock) { - for (int i = 0; i < zlog_filter_count; i++) { - int ret; - ret = snprintf(buf + len, max_size - len, " %s\n", - zlog_filters[i]); - len += ret; - if ((ret < 0) || ((size_t)len >= max_size)) - return -1; - } - } - - return len; -} - -/* - * write_wrapper - * - * glibc has declared that the return value from write *must* not be - * ignored. - * gcc see's this problem and issues a warning for the line. - * - * Why is this a big deal you say? Because both of them are right - * and if you have -Werror enabled then all calls to write - * generate a build error and the build stops. - * - * clang has helpfully allowed this construct: - * (void)write(...) - * to tell the compiler yeah I know it has a return value - * I don't care about it at this time. - * gcc doesn't have this ability. - * - * This code was written such that it didn't care about the - * return value from write. At this time do I want - * to go through and fix and test this code for correctness. - * So just wrapper the bad behavior and move on. - */ -static void write_wrapper(int fd, const void *buf, size_t count) -{ - if (write(fd, buf, count) <= 0) - return; - - return; -} - /** * Looks up a message in a message list by key. * @@ -264,274 +121,12 @@ size_t quagga_timestamp(int timestamp_precision, char *buf, size_t buflen) return 0; } -static inline void timestamp_control_render(struct timestamp_control *ctl) -{ - if (!ctl->already_rendered) { - ctl->len = quagga_timestamp(ctl->precision, ctl->buf, - sizeof(ctl->buf)); - ctl->already_rendered = 1; - } -} - -/* Utility routine for current time printing. */ -static void time_print(FILE *fp, struct timestamp_control *ctl) -{ - timestamp_control_render(ctl); - fprintf(fp, "%s ", ctl->buf); -} - -static int time_print_buf(char *buf, int len, int max_size, - struct timestamp_control *ctl) -{ - timestamp_control_render(ctl); - - if (ctl->len + 1 >= (unsigned long)max_size) - return -1; - - return snprintf(buf + len, max_size - len, "%s ", ctl->buf); -} - -static void vzlog_file(struct zlog *zl, struct timestamp_control *tsctl, - const char *proto_str, int record_priority, int priority, - FILE *fp, const char *msg) -{ - time_print(fp, tsctl); - if (record_priority) - fprintf(fp, "%s: ", zlog_priority[priority]); - - fprintf(fp, "%s%s\n", proto_str, msg); - fflush(fp); -} - -/* Search a buf for the filter strings, loglock must be held */ -static int search_buf(const char *buf) -{ - char *found = NULL; - - for (int i = 0; i < zlog_filter_count; i++) { - found = strstr(buf, zlog_filters[i]); - if (found != NULL) - return 0; - } - - return -1; -} - -/* Filter out a log */ -static int vzlog_filter(struct zlog *zl, struct timestamp_control *tsctl, - const char *proto_str, int priority, const char *msg) -{ - int len = 0; - int ret = 0; - char buf[1024] = ""; - - ret = time_print_buf(buf, len, sizeof(buf), tsctl); - - len += ret; - if ((ret < 0) || ((size_t)len >= sizeof(buf))) - goto search; - - if (zl && zl->record_priority) - snprintf(buf + len, sizeof(buf) - len, "%s: %s: %s", - zlog_priority[priority], proto_str, msg); - else - snprintf(buf + len, sizeof(buf) - len, "%s: %s", proto_str, - msg); - -search: - return search_buf(buf); -} - -/* va_list version of zlog. */ -void vzlog(int priority, const char *format, va_list args) -{ - frr_mutex_lock_autounlock(&loglock); - - char proto_str[32] = ""; - int original_errno = errno; - struct timestamp_control tsctl = {}; - tsctl.already_rendered = 0; - struct zlog *zl = zlog_default; - char buf[256], *msg; - - if (zl == NULL) { - tsctl.precision = 0; - } else { - tsctl.precision = zl->timestamp_precision; - if (zl->instance) - sprintf(proto_str, "%s[%d]: ", zl->protoname, - zl->instance); - else - sprintf(proto_str, "%s: ", zl->protoname); - } - - msg = vasnprintfrr(MTYPE_TMP, buf, sizeof(buf), format, args); - - /* If it doesn't match on a filter, do nothing with the debug log */ - if ((priority == LOG_DEBUG) && zlog_filter_count - && vzlog_filter(zl, &tsctl, proto_str, priority, msg)) - goto out; - - /* call external hook */ - hook_call(zebra_ext_log, priority, format, args); - - /* When zlog_default is also NULL, use stderr for logging. */ - if (zl == NULL) { - time_print(stderr, &tsctl); - fprintf(stderr, "%s: %s\n", "unknown", msg); - fflush(stderr); - goto out; - } - - /* Syslog output */ - if (priority <= zl->maxlvl[ZLOG_DEST_SYSLOG]) - syslog(priority | zlog_default->facility, "%s", msg); - - /* File output. */ - if ((priority <= zl->maxlvl[ZLOG_DEST_FILE]) && zl->fp) - vzlog_file(zl, &tsctl, proto_str, zl->record_priority, priority, - zl->fp, msg); - - /* fixed-config logging to stderr while we're stating up & haven't - * daemonized / reached mainloop yet - * - * note the "else" on stdout output -- we don't want to print the same - * message to both stderr and stdout. */ - if (zlog_startup_stderr && priority <= LOG_WARNING) - vzlog_file(zl, &tsctl, proto_str, 1, priority, stderr, msg); - else if (priority <= zl->maxlvl[ZLOG_DEST_STDOUT]) - vzlog_file(zl, &tsctl, proto_str, zl->record_priority, priority, - stdout, msg); - - /* Terminal monitor. */ - if (priority <= zl->maxlvl[ZLOG_DEST_MONITOR]) - vty_log((zl->record_priority ? zlog_priority[priority] : NULL), - proto_str, msg, &tsctl); - -out: - if (msg != buf) - XFREE(MTYPE_TMP, msg); - errno = original_errno; -} - -int vzlog_test(int priority) -{ - frr_mutex_lock_autounlock(&loglock); - - struct zlog *zl = zlog_default; - - /* When zlog_default is also NULL, use stderr for logging. */ - if (zl == NULL) - return 1; - /* Syslog output */ - else if (priority <= zl->maxlvl[ZLOG_DEST_SYSLOG]) - return 1; - /* File output. */ - else if ((priority <= zl->maxlvl[ZLOG_DEST_FILE]) && zl->fp) - return 1; - /* stdout output. */ - else if (priority <= zl->maxlvl[ZLOG_DEST_STDOUT]) - return 1; - /* Terminal monitor. */ - else if (priority <= zl->maxlvl[ZLOG_DEST_MONITOR]) - return 1; - - return 0; -} - /* * crash handling * * NB: only AS-Safe (async-signal) functions can be used here! */ -/* Needs to be enhanced to support Solaris. */ -static int syslog_connect(void) -{ -#ifdef SUNOS_5 - return -1; -#else - int fd; - struct sockaddr_un addr; - - if ((fd = socket(AF_UNIX, SOCK_DGRAM, 0)) < 0) - return -1; - addr.sun_family = AF_UNIX; -#ifdef _PATH_LOG -#define SYSLOG_SOCKET_PATH _PATH_LOG -#else -#define SYSLOG_SOCKET_PATH "/dev/log" -#endif - strlcpy(addr.sun_path, SYSLOG_SOCKET_PATH, sizeof(addr.sun_path)); -#undef SYSLOG_SOCKET_PATH - if (connect(fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) { - close(fd); - return -1; - } - return fd; -#endif -} - -static void syslog_sigsafe(int priority, const char *msg, size_t msglen) -{ - static int syslog_fd = -1; - char buf[sizeof("<1234567890>ripngd[1234567890]: ") + msglen + 50]; - struct fbuf fb = { .buf = buf, .pos = buf, .len = sizeof(buf) }; - - if ((syslog_fd < 0) && ((syslog_fd = syslog_connect()) < 0)) - return; - - /* forget about the timestamp, too difficult in a signal handler */ - bprintfrr(&fb, "<%d>%s", priority, zlog_default->ident); - if (zlog_default->syslog_options & LOG_PID) - bprintfrr(&fb, "[%ld]", (long)getpid()); - bprintfrr(&fb, ": %s", msg); - write_wrapper(syslog_fd, fb.buf, fb.pos - fb.buf); -} - -static int open_crashlog(void) -{ - char crashlog_buf[PATH_MAX]; - const char *crashlog_default = "/var/tmp/frr.crashlog", *crashlog; - - if (!zlog_default || !zlog_default->ident) - crashlog = crashlog_default; - else { - snprintfrr(crashlog_buf, sizeof(crashlog_buf), - "/var/tmp/frr.%s.crashlog", zlog_default->ident); - crashlog = crashlog_buf; - } - return open(crashlog, O_WRONLY | O_CREAT | O_EXCL, LOGFILE_MASK); -} - -/* N.B. implicit priority is most severe */ -#define PRI LOG_CRIT - -static void crash_write(struct fbuf *fb, char *msgstart) -{ - if (fb->pos == fb->buf) - return; - if (!msgstart) - msgstart = fb->buf; - - /* If no file logging configured, try to write to fallback log file. */ - if ((logfile_fd >= 0) || ((logfile_fd = open_crashlog()) >= 0)) - write(logfile_fd, fb->buf, fb->pos - fb->buf); - if (!zlog_default) - write(STDERR_FILENO, fb->buf, fb->pos - fb->buf); - else { - if (PRI <= zlog_default->maxlvl[ZLOG_DEST_STDOUT]) - write(STDOUT_FILENO, fb->buf, fb->pos - fb->buf); - /* Remove trailing '\n' for monitor and syslog */ - fb->pos--; - if (PRI <= zlog_default->maxlvl[ZLOG_DEST_MONITOR]) - vty_log_fixed(fb->buf, fb->pos - fb->buf); - if (PRI <= zlog_default->maxlvl[ZLOG_DEST_SYSLOG]) - syslog_sigsafe(PRI | zlog_default->facility, msgstart, - fb->pos - msgstart); - } -} - /* Note: the goal here is to use only async-signal-safe functions. */ void zlog_signal(int signo, const char *action, void *siginfo_v, void *program_counter) @@ -540,14 +135,9 @@ void zlog_signal(int signo, const char *action, void *siginfo_v, time_t now; char buf[sizeof("DEFAULT: Received signal S at T (si_addr 0xP, PC 0xP); aborting...") + 100]; - char *msgstart; struct fbuf fb = { .buf = buf, .pos = buf, .len = sizeof(buf) }; time(&now); - if (zlog_default) - bprintfrr(&fb, "%s: ", zlog_default->protoname); - - msgstart = fb.pos; bprintfrr(&fb, "Received signal %d at %lld", signo, (long long)now); if (program_counter) @@ -559,9 +149,9 @@ void zlog_signal(int signo, const char *action, void *siginfo_v, (ptrdiff_t)siginfo->si_addr); bprintfrr(&fb, "; %s\n", action); - crash_write(&fb, msgstart); + zlog_sigsafe(fb.buf, fb.pos - fb.buf); - zlog_backtrace_sigsafe(PRI, program_counter); + zlog_backtrace_sigsafe(LOG_CRIT, program_counter); fb.pos = buf; @@ -574,7 +164,7 @@ void zlog_signal(int signo, const char *action, void *siginfo_v, bprintfrr(&fb, "in thread %s scheduled from %s:%d\n", tc->funcname, tc->schedfrom, tc->schedfrom_line); - crash_write(&fb, NULL); + zlog_sigsafe(fb.buf, fb.pos - fb.buf); } /* Log a backtrace using only async-signal-safe functions. @@ -609,85 +199,35 @@ void zlog_backtrace_sigsafe(int priority, void *program_counter) bprintfrr(&fb, " %s (mapped at %p)", dlinfo.dli_fname, dlinfo.dli_fbase); bprintfrr(&fb, "\n"); - crash_write(&fb, NULL); + zlog_sigsafe(fb.buf, fb.pos - fb.buf); } -#elif defined(HAVE_GLIBC_BACKTRACE) || defined(HAVE_PRINTSTACK) - static const char pclabel[] = "Program counter: "; +#elif defined(HAVE_GLIBC_BACKTRACE) void *array[64]; - int size; + int size, i; char buf[128]; struct fbuf fb = { .buf = buf, .pos = buf, .len = sizeof(buf) }; char **bt = NULL; -#ifdef HAVE_GLIBC_BACKTRACE size = backtrace(array, array_size(array)); if (size <= 0 || (size_t)size > array_size(array)) return; -#define DUMP(FD) \ - { \ - if (program_counter) { \ - write_wrapper(FD, pclabel, sizeof(pclabel) - 1); \ - backtrace_symbols_fd(&program_counter, 1, FD); \ - } \ - write_wrapper(FD, fb.buf, fb.pos - fb.buf); \ - backtrace_symbols_fd(array, size, FD); \ - } -#elif defined(HAVE_PRINTSTACK) - size = 0; - -#define DUMP(FD) \ - { \ - if (program_counter) \ - write_wrapper((FD), pclabel, sizeof(pclabel) - 1); \ - write_wrapper((FD), fb.buf, fb.pos - fb.buf); \ - printstack((FD)); \ - } -#endif /* HAVE_GLIBC_BACKTRACE, HAVE_PRINTSTACK */ + bprintfrr(&fb, "Backtrace for %d stack frames:", size); + zlog_sigsafe(fb.pos, fb.buf - fb.pos); - bprintfrr(&fb, "Backtrace for %d stack frames:\n", size); + bt = backtrace_symbols(array, size); - if ((logfile_fd >= 0) || ((logfile_fd = open_crashlog()) >= 0)) - DUMP(logfile_fd) - if (!zlog_default) - DUMP(STDERR_FILENO) - else { - if (priority <= zlog_default->maxlvl[ZLOG_DEST_STDOUT]) - DUMP(STDOUT_FILENO) - /* Remove trailing '\n' for monitor and syslog */ - fb.pos--; - if (priority <= zlog_default->maxlvl[ZLOG_DEST_MONITOR]) - vty_log_fixed(fb.buf, fb.pos - fb.buf); - if (priority <= zlog_default->maxlvl[ZLOG_DEST_SYSLOG]) - syslog_sigsafe(priority | zlog_default->facility, - fb.buf, fb.pos - fb.buf); - { - int i; -#ifdef HAVE_GLIBC_BACKTRACE - bt = backtrace_symbols(array, size); -#endif - /* Just print the function addresses. */ - for (i = 0; i < size; i++) { - fb.pos = buf; - if (bt) - bprintfrr(&fb, "%s", bt[i]); - else - bprintfrr(&fb, "[bt %d] 0x%tx", i, - (ptrdiff_t)(array[i])); - if (priority - <= zlog_default->maxlvl[ZLOG_DEST_MONITOR]) - vty_log_fixed(fb.buf, fb.pos - fb.buf); - if (priority - <= zlog_default->maxlvl[ZLOG_DEST_SYSLOG]) - syslog_sigsafe(priority - | zlog_default->facility, - fb.buf, fb.pos - fb.buf); - } - if (bt) - free(bt); - } + for (i = 0; i < size; i++) { + fb.pos = buf; + if (bt) + bprintfrr(&fb, "%s", bt[i]); + else + bprintfrr(&fb, "[bt %d] 0x%tx", i, + (ptrdiff_t)(array[i])); + zlog_sigsafe(fb.buf, fb.pos - fb.buf); } -#undef DUMP + if (bt) + free(bt); #endif /* HAVE_STRACK_TRACE */ } @@ -754,36 +294,6 @@ void zlog_backtrace(int priority) #endif } -void zlog(int priority, const char *format, ...) -{ - va_list args; - - va_start(args, format); - vzlog(priority, format, args); - va_end(args); -} - -#define ZLOG_FUNC(FUNCNAME, PRIORITY) \ - void FUNCNAME(const char *format, ...) \ - { \ - va_list args; \ - va_start(args, format); \ - vzlog(PRIORITY, format, args); \ - va_end(args); \ - } - -ZLOG_FUNC(zlog_err, LOG_ERR) - -ZLOG_FUNC(zlog_warn, LOG_WARNING) - -ZLOG_FUNC(zlog_info, LOG_INFO) - -ZLOG_FUNC(zlog_notice, LOG_NOTICE) - -ZLOG_FUNC(zlog_debug, LOG_DEBUG) - -#undef ZLOG_FUNC - void zlog_thread_info(int log_level) { struct thread *tc; @@ -801,11 +311,6 @@ void zlog_thread_info(int log_level) void _zlog_assert_failed(const char *assertion, const char *file, unsigned int line, const char *function) { - /* Force fallback file logging? */ - if (zlog_default && !zlog_default->fp - && ((logfile_fd = open_crashlog()) >= 0) - && ((zlog_default->fp = fdopen(logfile_fd, "w")) != NULL)) - zlog_default->maxlvl[ZLOG_DEST_FILE] = LOG_ERR; zlog(LOG_CRIT, "Assertion `%s' failed in file %s, line %u, function %s", assertion, file, line, (function ? function : "?")); zlog_backtrace(LOG_CRIT); @@ -816,174 +321,14 @@ void _zlog_assert_failed(const char *assertion, const char *file, void memory_oom(size_t size, const char *name) { - flog_err_sys(EC_LIB_SYSTEM_CALL, - "out of memory: failed to allocate %zu bytes for %s" - "object", - size, name); - zlog_backtrace(LOG_ERR); + zlog(LOG_CRIT, + "out of memory: failed to allocate %zu bytes for %s object", + size, name); + zlog_backtrace(LOG_CRIT); + log_memstats(stderr, "log"); abort(); } -/* Open log stream */ -void openzlog(const char *progname, const char *protoname, - unsigned short instance, int syslog_flags, int syslog_facility) -{ - struct zlog *zl; - unsigned int i; - - zl = XCALLOC(MTYPE_ZLOG, sizeof(struct zlog)); - - zl->ident = progname; - zl->protoname = protoname; - zl->instance = instance; - zl->facility = syslog_facility; - zl->syslog_options = syslog_flags; - - /* Set default logging levels. */ - for (i = 0; i < array_size(zl->maxlvl); i++) - zl->maxlvl[i] = ZLOG_DISABLED; - zl->maxlvl[ZLOG_DEST_MONITOR] = LOG_DEBUG; - zl->default_lvl = LOG_DEBUG; - - openlog(progname, syslog_flags, zl->facility); - - frr_with_mutex(&loglock) { - zlog_default = zl; - } - -#ifdef HAVE_GLIBC_BACKTRACE - /* work around backtrace() using lazily resolved dynamically linked - * symbols, which will otherwise cause funny breakage in the SEGV - * handler. - * (particularly, the dynamic linker can call malloc(), which uses locks - * in programs linked with -pthread, thus can deadlock.) */ - void *bt[4]; - backtrace(bt, array_size(bt)); - free(backtrace_symbols(bt, 0)); - backtrace_symbols_fd(bt, 0, 0); -#endif -} - -void closezlog(void) -{ - frr_mutex_lock_autounlock(&loglock); - - struct zlog *zl = zlog_default; - - closelog(); - - if (zl->fp != NULL) - fclose(zl->fp); - - XFREE(MTYPE_ZLOG, zl->filename); - - XFREE(MTYPE_ZLOG, zl); - zlog_default = NULL; -} - -/* Called from command.c. */ -void zlog_set_level(zlog_dest_t dest, int log_level) -{ - frr_with_mutex(&loglock) { - zlog_default->maxlvl[dest] = log_level; - } -} - -int zlog_set_file(const char *filename, int log_level) -{ - struct zlog *zl; - FILE *fp; - mode_t oldumask; - int ret = 1; - - /* There is opend file. */ - zlog_reset_file(); - - /* Open file. */ - oldumask = umask(0777 & ~LOGFILE_MASK); - fp = fopen(filename, "a"); - umask(oldumask); - if (fp == NULL) { - ret = 0; - } else { - frr_with_mutex(&loglock) { - zl = zlog_default; - - /* Set flags. */ - zl->filename = XSTRDUP(MTYPE_ZLOG, filename); - zl->maxlvl[ZLOG_DEST_FILE] = log_level; - zl->fp = fp; - logfile_fd = fileno(fp); - } - } - - return ret; -} - -/* Reset opend file. */ -int zlog_reset_file(void) -{ - frr_mutex_lock_autounlock(&loglock); - - struct zlog *zl = zlog_default; - - if (zl->fp) - fclose(zl->fp); - zl->fp = NULL; - logfile_fd = -1; - zl->maxlvl[ZLOG_DEST_FILE] = ZLOG_DISABLED; - - XFREE(MTYPE_ZLOG, zl->filename); - - return 1; -} - -/* Reopen log file. */ -int zlog_rotate(void) -{ - pthread_mutex_lock(&loglock); - - struct zlog *zl = zlog_default; - int level; - int ret = 1; - - if (zl->fp) - fclose(zl->fp); - zl->fp = NULL; - logfile_fd = -1; - level = zl->maxlvl[ZLOG_DEST_FILE]; - zl->maxlvl[ZLOG_DEST_FILE] = ZLOG_DISABLED; - - if (zl->filename) { - mode_t oldumask; - int save_errno; - - oldumask = umask(0777 & ~LOGFILE_MASK); - zl->fp = fopen(zl->filename, "a"); - save_errno = errno; - umask(oldumask); - if (zl->fp == NULL) { - - pthread_mutex_unlock(&loglock); - - flog_err_sys( - EC_LIB_SYSTEM_CALL, - "Log rotate failed: cannot open file %s for append: %s", - zl->filename, safe_strerror(save_errno)); - ret = -1; - - pthread_mutex_lock(&loglock); - } else { - logfile_fd = fileno(zl->fp); - zl->maxlvl[ZLOG_DEST_FILE] = level; - } - } - - pthread_mutex_unlock(&loglock); - - return ret; -} - /* Wrapper around strerror to handle case where it returns NULL. */ const char *safe_strerror(int errnum) { @@ -22,21 +22,22 @@ #ifndef _ZEBRA_LOG_H #define _ZEBRA_LOG_H +#include "zassert.h" + #include <syslog.h> #include <stdint.h> #include <stdbool.h> #include <stdio.h> #include <stdarg.h> + #include "lib/hook.h" +#include "lib/zlog.h" +#include "lib/zlog_targets.h" #ifdef __cplusplus extern "C" { #endif -/* Hook for external logging function */ -DECLARE_HOOK(zebra_ext_log, (int priority, const char *format, va_list args), - (priority, format, args)); - /* Here is some guidance on logging levels to use: * * LOG_DEBUG - For all messages that are enabled by optional debugging @@ -53,19 +54,7 @@ DECLARE_HOOK(zebra_ext_log, (int priority, const char *format, va_list args), * please use LOG_ERR instead. */ -/* If maxlvl is set to ZLOG_DISABLED, then no messages will be sent - to that logging destination. */ -#define ZLOG_DISABLED (LOG_EMERG-1) - -typedef enum { - ZLOG_DEST_SYSLOG = 0, - ZLOG_DEST_STDOUT, - ZLOG_DEST_MONITOR, - ZLOG_DEST_FILE -} zlog_dest_t; -#define ZLOG_NUM_DESTS (ZLOG_DEST_FILE+1) - -extern bool zlog_startup_stderr; +extern void zlog_rotate(void); /* Message structure. */ struct message { @@ -73,22 +62,6 @@ struct message { const char *str; }; -/* Open zlog function */ -extern void openzlog(const char *progname, const char *protoname, - uint16_t instance, int syslog_options, - int syslog_facility); - -/* Close zlog function. */ -extern void closezlog(void); - -/* Handy zlog functions. */ -extern void zlog_err(const char *format, ...) PRINTFRR(1, 2); -extern void zlog_warn(const char *format, ...) PRINTFRR(1, 2); -extern void zlog_info(const char *format, ...) PRINTFRR(1, 2); -extern void zlog_notice(const char *format, ...) PRINTFRR(1, 2); -extern void zlog_debug(const char *format, ...) PRINTFRR(1, 2); -extern void zlog(int priority, const char *format, ...) PRINTFRR(2, 3); - /* For logs which have error codes associated with them */ #define flog_err(ferr_id, format, ...) \ zlog_err("[EC %" PRIu32 "] " format, ferr_id, ##__VA_ARGS__) @@ -101,23 +74,16 @@ extern void zlog(int priority, const char *format, ...) PRINTFRR(2, 3); extern void zlog_thread_info(int log_level); -/* Set logging level for the given destination. If the log_level - argument is ZLOG_DISABLED, then the destination is disabled. - This function should not be used for file logging (use zlog_set_file - or zlog_reset_file instead). */ -extern void zlog_set_level(zlog_dest_t, int log_level); - -/* Set logging to the given filename at the specified level. */ -extern int zlog_set_file(const char *filename, int log_level); -/* Disable file logging. */ -extern int zlog_reset_file(void); - -/* Rotate log. */ -extern int zlog_rotate(void); - #define ZLOG_FILTERS_MAX 100 /* Max # of filters at once */ #define ZLOG_FILTER_LENGTH_MAX 80 /* 80 character filter limit */ +struct zlog_cfg_filterfile { + struct zlog_cfg_file parent; +}; + +extern void zlog_filterfile_init(struct zlog_cfg_filterfile *zcf); +extern void zlog_filterfile_fini(struct zlog_cfg_filterfile *zcf); + /* Add/Del/Dump log filters */ extern void zlog_filter_clear(void); extern int zlog_filter_add(const char *filter); @@ -176,8 +142,6 @@ extern int proto_redistnum(int afi, const char *s); extern const char *zserv_command_string(unsigned int command); -extern int vzlog_test(int priority); - /* structure useful for avoiding repeated rendering of the same timestamp */ struct timestamp_control { size_t len; /* length of rendered timestamp */ diff --git a/lib/log_filter.c b/lib/log_filter.c new file mode 100644 index 0000000000..721e57a628 --- /dev/null +++ b/lib/log_filter.c @@ -0,0 +1,156 @@ +/* + * Logging - Filtered file log target + * Copyright (C) 2019 Cumulus Networks, Inc. + * Stephen Worley + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> + +#include "frr_pthread.h" +#include "log.h" + +static pthread_mutex_t logfilterlock = PTHREAD_MUTEX_INITIALIZER; +static char zlog_filters[ZLOG_FILTERS_MAX][ZLOG_FILTER_LENGTH_MAX + 1]; +static uint8_t zlog_filter_count; + +/* + * look for a match on the filter in the current filters, + * logfilterlock must be held + */ +static int zlog_filter_lookup(const char *lookup) +{ + for (int i = 0; i < zlog_filter_count; i++) { + if (strncmp(lookup, zlog_filters[i], sizeof(zlog_filters[0])) + == 0) + return i; + } + return -1; +} + +void zlog_filter_clear(void) +{ + frr_with_mutex(&logfilterlock) { + zlog_filter_count = 0; + } +} + +int zlog_filter_add(const char *filter) +{ + frr_with_mutex(&logfilterlock) { + if (zlog_filter_count >= ZLOG_FILTERS_MAX) + return 1; + + if (zlog_filter_lookup(filter) != -1) + /* Filter already present */ + return -1; + + strlcpy(zlog_filters[zlog_filter_count], filter, + sizeof(zlog_filters[0])); + + if (zlog_filters[zlog_filter_count][0] == '\0') + /* Filter was either empty or didn't get copied + * correctly + */ + return -1; + + zlog_filter_count++; + } + return 0; +} + +int zlog_filter_del(const char *filter) +{ + frr_with_mutex(&logfilterlock) { + int found_idx = zlog_filter_lookup(filter); + int last_idx = zlog_filter_count - 1; + + if (found_idx == -1) + /* Didn't find the filter to delete */ + return -1; + + /* Adjust the filter array */ + memmove(zlog_filters[found_idx], zlog_filters[found_idx + 1], + (last_idx - found_idx) * sizeof(zlog_filters[0])); + + zlog_filter_count--; + } + return 0; +} + +/* Dump all filters to buffer, delimited by new line */ +int zlog_filter_dump(char *buf, size_t max_size) +{ + int len = 0; + + frr_with_mutex(&logfilterlock) { + for (int i = 0; i < zlog_filter_count; i++) { + int ret; + + ret = snprintf(buf + len, max_size - len, " %s\n", + zlog_filters[i]); + len += ret; + if ((ret < 0) || ((size_t)len >= max_size)) + return -1; + } + } + + return len; +} + +static int search_buf(const char *buf) +{ + char *found = NULL; + + frr_with_mutex(&logfilterlock) { + for (int i = 0; i < zlog_filter_count; i++) { + found = strstr(buf, zlog_filters[i]); + if (found != NULL) + return 0; + } + } + + return -1; +} + +static void zlog_filterfile_fd(struct zlog_target *zt, struct zlog_msg *msgs[], + size_t nmsgs) +{ + struct zlog_msg *msgfilt[nmsgs]; + size_t i, o = 0; + + for (i = 0; i < nmsgs; i++) { + if (zlog_msg_prio(msgs[i]) >= LOG_DEBUG + && search_buf(zlog_msg_text(msgs[i], NULL)) < 0) + continue; + + msgfilt[o++] = msgs[i]; + } + + if (o) + zlog_fd(zt, msgfilt, o); +} + +void zlog_filterfile_init(struct zlog_cfg_filterfile *zcf) +{ + zlog_file_init(&zcf->parent); + zcf->parent.zlog_wrap = zlog_filterfile_fd; +} + +void zlog_filterfile_fini(struct zlog_cfg_filterfile *zcf) +{ + zlog_file_fini(&zcf->parent); +} diff --git a/lib/log_int.h b/lib/log_int.h deleted file mode 100644 index 287e626eab..0000000000 --- a/lib/log_int.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Zebra logging funcions. - * Copyright (C) 1997, 1998, 1999 Kunihiro Ishiguro - * - * This file is part of GNU Zebra. - * - * GNU Zebra is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2, or (at your option) any - * later version. - * - * GNU Zebra is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; see the file COPYING; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef _ZEBRA_LOG_PRIVATE_H -#define _ZEBRA_LOG_PRIVATE_H - -#include "log.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct zlog { - const char *ident; /* daemon name (first arg to openlog) */ - const char *protoname; - unsigned short instance; - int maxlvl[ZLOG_NUM_DESTS]; /* maximum priority to send to associated - logging destination */ - int default_lvl; /* maxlvl to use if none is specified */ - FILE *fp; - char *filename; - int facility; /* as per syslog facility */ - int record_priority; /* should messages logged through stdio include the - priority of the message? */ - int syslog_options; /* 2nd arg to openlog */ - int timestamp_precision; /* # of digits of subsecond precision */ -}; - -/* Default logging strucutre. */ -extern struct zlog *zlog_default; - -extern const char *zlog_priority[]; - -/* Generic function for zlog. */ -extern void vzlog(int priority, const char *format, va_list args); - -#ifdef __cplusplus -} -#endif - -#endif /* _ZEBRA_LOG_PRIVATE_H */ diff --git a/lib/log_vty.c b/lib/log_vty.c index 68d598f565..97026e5dbc 100644 --- a/lib/log_vty.c +++ b/lib/log_vty.c @@ -22,12 +22,542 @@ #include "lib/log_vty.h" #include "command.h" -#include "lib/vty.h" #include "lib/log.h" +#include "lib/zlog_targets.h" +#include "lib/lib_errors.h" +#include "lib/printfrr.h" + #ifndef VTYSH_EXTRACT_PL #include "lib/log_vty_clippy.c" #endif +#define ZLOG_MAXLVL(a, b) MAX(a, b) + +DEFINE_HOOK(zlog_rotate, (), ()) + +static const int log_default_lvl = LOG_DEBUG; + +static int log_config_stdout_lvl = ZLOG_DISABLED; +static int log_config_syslog_lvl = ZLOG_DISABLED; +static int log_cmdline_stdout_lvl = ZLOG_DISABLED; +static int log_cmdline_syslog_lvl = ZLOG_DISABLED; + +static struct zlog_cfg_file zt_file_cmdline = { + .prio_min = ZLOG_DISABLED, +}; +static struct zlog_cfg_file zt_file = { + .prio_min = ZLOG_DISABLED, +}; +static struct zlog_cfg_file zt_stdout = { + .prio_min = ZLOG_DISABLED, +}; +static struct zlog_cfg_filterfile zt_filterfile = { + .parent = { + .prio_min = ZLOG_DISABLED, + }, +}; + +static const char *zlog_progname; +static const char *zlog_protoname; + +static const struct facility_map { + int facility; + const char *name; + size_t match; +} syslog_facilities[] = { + {LOG_KERN, "kern", 1}, + {LOG_USER, "user", 2}, + {LOG_MAIL, "mail", 1}, + {LOG_DAEMON, "daemon", 1}, + {LOG_AUTH, "auth", 1}, + {LOG_SYSLOG, "syslog", 1}, + {LOG_LPR, "lpr", 2}, + {LOG_NEWS, "news", 1}, + {LOG_UUCP, "uucp", 2}, + {LOG_CRON, "cron", 1}, +#ifdef LOG_FTP + {LOG_FTP, "ftp", 1}, +#endif + {LOG_LOCAL0, "local0", 6}, + {LOG_LOCAL1, "local1", 6}, + {LOG_LOCAL2, "local2", 6}, + {LOG_LOCAL3, "local3", 6}, + {LOG_LOCAL4, "local4", 6}, + {LOG_LOCAL5, "local5", 6}, + {LOG_LOCAL6, "local6", 6}, + {LOG_LOCAL7, "local7", 6}, + {0, NULL, 0}, +}; + +static const char * const zlog_priority[] = { + "emergencies", "alerts", "critical", "errors", "warnings", + "notifications", "informational", "debugging", NULL, +}; + +static const char *facility_name(int facility) +{ + const struct facility_map *fm; + + for (fm = syslog_facilities; fm->name; fm++) + if (fm->facility == facility) + return fm->name; + return ""; +} + +static int facility_match(const char *str) +{ + const struct facility_map *fm; + + for (fm = syslog_facilities; fm->name; fm++) + if (!strncmp(str, fm->name, fm->match)) + return fm->facility; + return -1; +} + +int log_level_match(const char *s) +{ + int level; + + for (level = 0; zlog_priority[level] != NULL; level++) + if (!strncmp(s, zlog_priority[level], 2)) + return level; + return ZLOG_DISABLED; +} + +void zlog_rotate(void) +{ + zlog_file_rotate(&zt_file); + zlog_file_rotate(&zt_filterfile.parent); + hook_call(zlog_rotate); +} + + +void log_show_syslog(struct vty *vty) +{ + int level = zlog_syslog_get_prio_min(); + + vty_out(vty, "Syslog logging: "); + if (level == ZLOG_DISABLED) + vty_out(vty, "disabled\n"); + else + vty_out(vty, "level %s, facility %s, ident %s\n", + zlog_priority[level], + facility_name(zlog_syslog_get_facility()), + zlog_progname); +} + +DEFUN (show_logging, + show_logging_cmd, + "show logging", + SHOW_STR + "Show current logging configuration\n") +{ + log_show_syslog(vty); + + vty_out(vty, "Stdout logging: "); + if (zt_stdout.prio_min == ZLOG_DISABLED) + vty_out(vty, "disabled"); + else + vty_out(vty, "level %s", + zlog_priority[zt_stdout.prio_min]); + vty_out(vty, "\n"); + + vty_out(vty, "File logging: "); + if (zt_file.prio_min == ZLOG_DISABLED || !zt_file.filename) + vty_out(vty, "disabled"); + else + vty_out(vty, "level %s, filename %s", + zlog_priority[zt_file.prio_min], zt_file.filename); + vty_out(vty, "\n"); + + if (zt_filterfile.parent.prio_min != ZLOG_DISABLED + && zt_filterfile.parent.filename) + vty_out(vty, "Filtered-file logging: level %s, filename %s\n", + zlog_priority[zt_filterfile.parent.prio_min], + zt_filterfile.parent.filename); + + if (log_cmdline_syslog_lvl != ZLOG_DISABLED) + vty_out(vty, + "From command line: \"--log syslog --log-level %s\"\n", + zlog_priority[log_cmdline_syslog_lvl]); + if (log_cmdline_stdout_lvl != ZLOG_DISABLED) + vty_out(vty, + "From command line: \"--log stdout --log-level %s\"\n", + zlog_priority[log_cmdline_stdout_lvl]); + if (zt_file_cmdline.prio_min != ZLOG_DISABLED) + vty_out(vty, + "From command line: \"--log file:%s --log-level %s\"\n", + zt_file_cmdline.filename, + zlog_priority[zt_file_cmdline.prio_min]); + + vty_out(vty, "Protocol name: %s\n", zlog_protoname); + vty_out(vty, "Record priority: %s\n", + (zt_file.record_priority ? "enabled" : "disabled")); + vty_out(vty, "Timestamp precision: %d\n", zt_file.ts_subsec); + return CMD_SUCCESS; +} + +DEFPY (config_log_stdout, + config_log_stdout_cmd, + "log stdout [<emergencies|alerts|critical|errors|warnings|notifications|informational|debugging>$levelarg]", + "Logging control\n" + "Set stdout logging level\n" + LOG_LEVEL_DESC) +{ + int level; + + if (levelarg) { + level = log_level_match(levelarg); + if (level == ZLOG_DISABLED) + return CMD_ERR_NO_MATCH; + } else + level = log_default_lvl; + + log_config_stdout_lvl = level; + zt_stdout.prio_min = ZLOG_MAXLVL(log_config_stdout_lvl, + log_cmdline_stdout_lvl); + zlog_file_set_other(&zt_stdout); + return CMD_SUCCESS; +} + +DEFUN (no_config_log_stdout, + no_config_log_stdout_cmd, + "no log stdout [<emergencies|alerts|critical|errors|warnings|notifications|informational|debugging>]", + NO_STR + "Logging control\n" + "Cancel logging to stdout\n" + LOG_LEVEL_DESC) +{ + log_config_stdout_lvl = ZLOG_DISABLED; + zt_stdout.prio_min = ZLOG_MAXLVL(log_config_stdout_lvl, + log_cmdline_stdout_lvl); + zlog_file_set_other(&zt_stdout); + return CMD_SUCCESS; +} + +DEFUN_HIDDEN (config_log_monitor, + config_log_monitor_cmd, + "log monitor [<emergencies|alerts|critical|errors|warnings|notifications|informational|debugging>]", + "Logging control\n" + "Set terminal line (monitor) logging level\n" + LOG_LEVEL_DESC) +{ + vty_out(vty, "%% \"log monitor\" is deprecated and does nothing.\n"); + return CMD_SUCCESS; +} + +DEFUN_HIDDEN (no_config_log_monitor, + no_config_log_monitor_cmd, + "no log monitor [<emergencies|alerts|critical|errors|warnings|notifications|informational|debugging>]", + NO_STR + "Logging control\n" + "Disable terminal line (monitor) logging\n" + LOG_LEVEL_DESC) +{ + return CMD_SUCCESS; +} + +static int set_log_file(struct zlog_cfg_file *target, struct vty *vty, + const char *fname, int loglevel) +{ + char *p = NULL; + const char *fullpath; + bool ok; + + /* Path detection. */ + if (!IS_DIRECTORY_SEP(*fname)) { + char cwd[MAXPATHLEN + 1]; + + cwd[MAXPATHLEN] = '\0'; + + if (getcwd(cwd, MAXPATHLEN) == NULL) { + flog_err_sys(EC_LIB_SYSTEM_CALL, + "config_log_file: Unable to alloc mem!"); + return CMD_WARNING_CONFIG_FAILED; + } + + p = XMALLOC(MTYPE_TMP, strlen(cwd) + strlen(fname) + 2); + sprintf(p, "%s/%s", cwd, fname); + fullpath = p; + } else + fullpath = fname; + + target->prio_min = loglevel; + ok = zlog_file_set_filename(target, fullpath); + + XFREE(MTYPE_TMP, p); + + if (!ok) { + if (vty) + vty_out(vty, "can't open logfile %s\n", fname); + return CMD_WARNING_CONFIG_FAILED; + } + return CMD_SUCCESS; +} + +void command_setup_early_logging(const char *dest, const char *level) +{ + int nlevel; + char *sep; + int len; + char type[8]; + + if (level) { + nlevel = log_level_match(level); + + if (nlevel == ZLOG_DISABLED) { + fprintf(stderr, "invalid log level \"%s\"\n", level); + exit(1); + } + } else + nlevel = log_default_lvl; + + if (!dest) + return; + + sep = strchr(dest, ':'); + len = sep ? (int)(sep - dest) : (int)strlen(dest); + + snprintfrr(type, sizeof(type), "%.*s", len, dest); + + if (strcmp(type, "stdout") == 0) { + log_cmdline_stdout_lvl = nlevel; + zt_stdout.prio_min = ZLOG_MAXLVL(log_config_stdout_lvl, + log_cmdline_stdout_lvl); + zlog_file_set_other(&zt_stdout); + return; + } + if (strcmp(type, "syslog") == 0) { + log_cmdline_syslog_lvl = nlevel; + zlog_syslog_set_prio_min(ZLOG_MAXLVL(log_config_syslog_lvl, + log_cmdline_syslog_lvl)); + return; + } + if (strcmp(type, "file") == 0 && sep) { + sep++; + set_log_file(&zt_file_cmdline, NULL, sep, nlevel); + return; + } + + fprintf(stderr, "invalid log target \"%s\" (\"%s\")\n", type, dest); + exit(1); +} + +DEFUN (clear_log_cmdline, + clear_log_cmdline_cmd, + "clear log cmdline-targets", + CLEAR_STR + "Logging control\n" + "Disable log targets specified at startup by --log option\n") +{ + zt_file_cmdline.prio_min = ZLOG_DISABLED; + zlog_file_set_other(&zt_file_cmdline); + + log_cmdline_syslog_lvl = ZLOG_DISABLED; + zlog_syslog_set_prio_min(ZLOG_MAXLVL(log_config_syslog_lvl, + log_cmdline_syslog_lvl)); + + log_cmdline_stdout_lvl = ZLOG_DISABLED; + zt_stdout.prio_min = ZLOG_MAXLVL(log_config_stdout_lvl, + log_cmdline_stdout_lvl); + zlog_file_set_other(&zt_stdout); + + return CMD_SUCCESS; +} + +DEFPY (config_log_file, + config_log_file_cmd, + "log file FILENAME [<emergencies|alerts|critical|errors|warnings|notifications|informational|debugging>$levelarg]", + "Logging control\n" + "Logging to file\n" + "Logging filename\n" + LOG_LEVEL_DESC) +{ + int level = log_default_lvl; + + if (levelarg) { + level = log_level_match(levelarg); + if (level == ZLOG_DISABLED) + return CMD_ERR_NO_MATCH; + } + return set_log_file(&zt_file, vty, filename, level); +} + +DEFUN (no_config_log_file, + no_config_log_file_cmd, + "no log file [FILENAME [LEVEL]]", + NO_STR + "Logging control\n" + "Cancel logging to file\n" + "Logging file name\n" + "Logging level\n") +{ + zt_file.prio_min = ZLOG_DISABLED; + zlog_file_set_other(&zt_file); + return CMD_SUCCESS; +} + +DEFPY (config_log_syslog, + config_log_syslog_cmd, + "log syslog [<emergencies|alerts|critical|errors|warnings|notifications|informational|debugging>$levelarg]", + "Logging control\n" + "Set syslog logging level\n" + LOG_LEVEL_DESC) +{ + int level; + + if (levelarg) { + level = log_level_match(levelarg); + + if (level == ZLOG_DISABLED) + return CMD_ERR_NO_MATCH; + } else + level = log_default_lvl; + + log_config_syslog_lvl = level; + zlog_syslog_set_prio_min(ZLOG_MAXLVL(log_config_syslog_lvl, + log_cmdline_syslog_lvl)); + return CMD_SUCCESS; +} + +DEFUN (no_config_log_syslog, + no_config_log_syslog_cmd, + "no log syslog [<kern|user|mail|daemon|auth|syslog|lpr|news|uucp|cron|local0|local1|local2|local3|local4|local5|local6|local7>] [<emergencies|alerts|critical|errors|warnings|notifications|informational|debugging>]", + NO_STR + "Logging control\n" + "Cancel logging to syslog\n" + LOG_FACILITY_DESC + LOG_LEVEL_DESC) +{ + log_config_syslog_lvl = ZLOG_DISABLED; + zlog_syslog_set_prio_min(ZLOG_MAXLVL(log_config_syslog_lvl, + log_cmdline_syslog_lvl)); + return CMD_SUCCESS; +} + +DEFPY (config_log_facility, + config_log_facility_cmd, + "log facility <kern|user|mail|daemon|auth|syslog|lpr|news|uucp|cron|local0|local1|local2|local3|local4|local5|local6|local7>$facilityarg", + "Logging control\n" + "Facility parameter for syslog messages\n" + LOG_FACILITY_DESC) +{ + int facility = facility_match(facilityarg); + + zlog_syslog_set_facility(facility); + return CMD_SUCCESS; +} + +DEFUN (no_config_log_facility, + no_config_log_facility_cmd, + "no log facility [<kern|user|mail|daemon|auth|syslog|lpr|news|uucp|cron|local0|local1|local2|local3|local4|local5|local6|local7>]", + NO_STR + "Logging control\n" + "Reset syslog facility to default (daemon)\n" + LOG_FACILITY_DESC) +{ + zlog_syslog_set_facility(LOG_DAEMON); + return CMD_SUCCESS; +} + +DEFUN (config_log_record_priority, + config_log_record_priority_cmd, + "log record-priority", + "Logging control\n" + "Log the priority of the message within the message\n") +{ + zt_file.record_priority = true; + zlog_file_set_other(&zt_file); + zt_stdout.record_priority = true; + zlog_file_set_other(&zt_stdout); + zt_filterfile.parent.record_priority = true; + zlog_file_set_other(&zt_filterfile.parent); + return CMD_SUCCESS; +} + +DEFUN (no_config_log_record_priority, + no_config_log_record_priority_cmd, + "no log record-priority", + NO_STR + "Logging control\n" + "Do not log the priority of the message within the message\n") +{ + zt_file.record_priority = false; + zlog_file_set_other(&zt_file); + zt_stdout.record_priority = false; + zlog_file_set_other(&zt_stdout); + zt_filterfile.parent.record_priority = false; + zlog_file_set_other(&zt_filterfile.parent); + return CMD_SUCCESS; +} + +DEFPY (config_log_timestamp_precision, + config_log_timestamp_precision_cmd, + "log timestamp precision (0-6)", + "Logging control\n" + "Timestamp configuration\n" + "Set the timestamp precision\n" + "Number of subsecond digits\n") +{ + zt_file.ts_subsec = precision; + zlog_file_set_other(&zt_file); + zt_stdout.ts_subsec = precision; + zlog_file_set_other(&zt_stdout); + zt_filterfile.parent.ts_subsec = precision; + zlog_file_set_other(&zt_filterfile.parent); + return CMD_SUCCESS; +} + +DEFUN (no_config_log_timestamp_precision, + no_config_log_timestamp_precision_cmd, + "no log timestamp precision [(0-6)]", + NO_STR + "Logging control\n" + "Timestamp configuration\n" + "Reset the timestamp precision to the default value of 0\n" + "Number of subsecond digits\n") +{ + zt_file.ts_subsec = 0; + zlog_file_set_other(&zt_file); + zt_stdout.ts_subsec = 0; + zlog_file_set_other(&zt_stdout); + zt_filterfile.parent.ts_subsec = 0; + zlog_file_set_other(&zt_filterfile.parent); + return CMD_SUCCESS; +} + +DEFPY (config_log_filterfile, + config_log_filterfile_cmd, + "log filtered-file FILENAME [<emergencies|alerts|critical|errors|warnings|notifications|informational|debugging>$levelarg]", + "Logging control\n" + "Logging to file with string filter\n" + "Logging filename\n" + LOG_LEVEL_DESC) +{ + int level = log_default_lvl; + + if (levelarg) { + level = log_level_match(levelarg); + if (level == ZLOG_DISABLED) + return CMD_ERR_NO_MATCH; + } + return set_log_file(&zt_filterfile.parent, vty, filename, level); +} + +DEFUN (no_config_log_filterfile, + no_config_log_filterfile_cmd, + "no log filtered-file [FILENAME [LEVEL]]", + NO_STR + "Logging control\n" + "Cancel logging to file with string filter\n" + "Logging file name\n" + "Logging level\n") +{ + zt_filterfile.parent.prio_min = ZLOG_DISABLED; + zlog_file_set_other(&zt_filterfile.parent); + return CMD_SUCCESS; +} + DEFPY (log_filter, log_filter_cmd, "[no] log-filter WORD$filter", @@ -89,9 +619,122 @@ DEFPY (show_log_filter, return CMD_SUCCESS; } -void log_filter_cmd_init(void) +void log_config_write(struct vty *vty) { + bool show_cmdline_hint = false; + + if (zt_file.prio_min != ZLOG_DISABLED && zt_file.filename) { + vty_out(vty, "log file %s", zt_file.filename); + + if (zt_file.prio_min != log_default_lvl) + vty_out(vty, " %s", zlog_priority[zt_file.prio_min]); + vty_out(vty, "\n"); + } + + if (zt_filterfile.parent.prio_min != ZLOG_DISABLED + && zt_filterfile.parent.filename) { + vty_out(vty, "log filtered-file %s", + zt_filterfile.parent.filename); + + if (zt_filterfile.parent.prio_min != log_default_lvl) + vty_out(vty, " %s", + zlog_priority[zt_filterfile.parent.prio_min]); + vty_out(vty, "\n"); + } + + if (log_config_stdout_lvl != ZLOG_DISABLED) { + vty_out(vty, "log stdout"); + + if (log_config_stdout_lvl != log_default_lvl) + vty_out(vty, " %s", + zlog_priority[log_config_stdout_lvl]); + vty_out(vty, "\n"); + } + + if (log_config_syslog_lvl != ZLOG_DISABLED) { + vty_out(vty, "log syslog"); + + if (log_config_syslog_lvl != log_default_lvl) + vty_out(vty, " %s", + zlog_priority[log_config_syslog_lvl]); + vty_out(vty, "\n"); + } + + if (log_cmdline_syslog_lvl != ZLOG_DISABLED) { + vty_out(vty, + "! \"log syslog %s\" enabled by \"--log\" startup option\n", + zlog_priority[log_cmdline_syslog_lvl]); + show_cmdline_hint = true; + } + if (log_cmdline_stdout_lvl != ZLOG_DISABLED) { + vty_out(vty, + "! \"log stdout %s\" enabled by \"--log\" startup option\n", + zlog_priority[log_cmdline_stdout_lvl]); + show_cmdline_hint = true; + } + if (zt_file_cmdline.prio_min != ZLOG_DISABLED) { + vty_out(vty, + "! \"log file %s %s\" enabled by \"--log\" startup option\n", + zt_file_cmdline.filename, + zlog_priority[zt_file_cmdline.prio_min]); + show_cmdline_hint = true; + } + if (show_cmdline_hint) + vty_out(vty, + "! use \"clear log cmdline-targets\" to remove this target\n"); + + if (zlog_syslog_get_facility() != LOG_DAEMON) + vty_out(vty, "log facility %s\n", + facility_name(zlog_syslog_get_facility())); + + if (zt_file.record_priority == 1) + vty_out(vty, "log record-priority\n"); + + if (zt_file.ts_subsec > 0) + vty_out(vty, "log timestamp precision %d\n", + zt_file.ts_subsec); +} + +static int log_vty_init(const char *progname, const char *protoname, + unsigned short instance, uid_t uid, gid_t gid) +{ + zlog_progname = progname; + zlog_protoname = protoname; + + zlog_filterfile_init(&zt_filterfile); + + zlog_file_set_fd(&zt_stdout, STDOUT_FILENO); + return 0; +} + +__attribute__((_CONSTRUCTOR(475))) static void log_vty_preinit(void) +{ + hook_register(zlog_init, log_vty_init); +} + +void log_cmd_init(void) +{ + install_element(VIEW_NODE, &show_logging_cmd); + install_element(ENABLE_NODE, &clear_log_cmdline_cmd); + + install_element(CONFIG_NODE, &config_log_stdout_cmd); + install_element(CONFIG_NODE, &no_config_log_stdout_cmd); + install_element(CONFIG_NODE, &config_log_monitor_cmd); + install_element(CONFIG_NODE, &no_config_log_monitor_cmd); + install_element(CONFIG_NODE, &config_log_file_cmd); + install_element(CONFIG_NODE, &no_config_log_file_cmd); + install_element(CONFIG_NODE, &config_log_syslog_cmd); + install_element(CONFIG_NODE, &no_config_log_syslog_cmd); + install_element(CONFIG_NODE, &config_log_facility_cmd); + install_element(CONFIG_NODE, &no_config_log_facility_cmd); + install_element(CONFIG_NODE, &config_log_record_priority_cmd); + install_element(CONFIG_NODE, &no_config_log_record_priority_cmd); + install_element(CONFIG_NODE, &config_log_timestamp_precision_cmd); + install_element(CONFIG_NODE, &no_config_log_timestamp_precision_cmd); + install_element(VIEW_NODE, &show_log_filter_cmd); install_element(CONFIG_NODE, &log_filter_cmd); install_element(CONFIG_NODE, &log_filter_clear_cmd); + install_element(CONFIG_NODE, &config_log_filterfile_cmd); + install_element(CONFIG_NODE, &no_config_log_filterfile_cmd); } diff --git a/lib/log_vty.h b/lib/log_vty.h index fa5627e4bd..0fd60e9b07 100644 --- a/lib/log_vty.h +++ b/lib/log_vty.h @@ -20,5 +20,17 @@ #ifndef __LOG_VTY_H__ #define __LOG_VTY_H__ -extern void log_filter_cmd_init(void); + +#include "lib/hook.h" + +struct vty; + +extern void log_cmd_init(void); +extern void log_config_write(struct vty *vty); +extern int log_level_match(const char *s); +extern void log_show_syslog(struct vty *vty); + +DECLARE_HOOK(zlog_rotate, (), ()) +extern void zlog_rotate(void); + #endif /* __LOG_VTY_H__ */ @@ -429,7 +429,7 @@ void hmac_md5(unsigned char *text, int text_len, unsigned char *key, * pass */ MD5Update(&context, k_ipad, 64); /* start with inner pad */ MD5Update(&context, text, text_len); /* then text of datagram */ - MD5Final((uint8_t *)digest, &context); /* finish up 1st pass */ + MD5Final(digest, &context); /* finish up 1st pass */ /* * perform outer MD5 */ @@ -438,5 +438,5 @@ void hmac_md5(unsigned char *text, int text_len, unsigned char *key, MD5Update(&context, k_opad, 64); /* start with outer pad */ MD5Update(&context, digest, 16); /* then results of 1st * hash */ - MD5Final((uint8_t *)digest, &context); /* finish up 2nd pass */ + MD5Final(digest, &context); /* finish up 2nd pass */ } diff --git a/lib/memory.c b/lib/memory.c index 149e294d50..3a29404827 100644 --- a/lib/memory.c +++ b/lib/memory.c @@ -163,7 +163,8 @@ static int qmem_exit_walker(void *arg, struct memgroup *mg, struct memtype *mt) } else if (mt->n_alloc) { char size[32]; - eda->error++; + if (!mg->active_at_exit) + eda->error++; snprintf(size, sizeof(size), "%10zu", mt->size); fprintf(eda->fp, "%s: memstats: %-30s: %6zu * %s\n", eda->prefix, mt->name, mt->n_alloc, diff --git a/lib/memory.h b/lib/memory.h index e4e05faa4f..13f2f9b11a 100644 --- a/lib/memory.h +++ b/lib/memory.h @@ -17,6 +17,7 @@ #ifndef _QUAGGA_MEMORY_H #define _QUAGGA_MEMORY_H +#include <stdbool.h> #include <stdlib.h> #include <stdio.h> #include <frratomic.h> @@ -48,6 +49,8 @@ struct memgroup { struct memgroup *next, **ref; struct memtype *types, **insert; const char *name; + /* ignore group on dumping memleaks at exit */ + bool active_at_exit; }; /* macro usage: @@ -76,7 +79,7 @@ struct memgroup { */ #define DECLARE_MGROUP(name) extern struct memgroup _mg_##name; -#define DEFINE_MGROUP(mname, desc) \ +#define _DEFINE_MGROUP(mname, desc, ...) \ struct memgroup _mg_##mname \ __attribute__((section(".data.mgroups"))) = { \ .name = desc, \ @@ -84,6 +87,7 @@ struct memgroup { .next = NULL, \ .insert = NULL, \ .ref = NULL, \ + __VA_ARGS__ \ }; \ static void _mginit_##mname(void) __attribute__((_CONSTRUCTOR(1000))); \ static void _mginit_##mname(void) \ @@ -99,7 +103,13 @@ struct memgroup { if (_mg_##mname.next) \ _mg_##mname.next->ref = _mg_##mname.ref; \ *_mg_##mname.ref = _mg_##mname.next; \ - } + } \ + /* end */ + +#define DEFINE_MGROUP(mname, desc) \ + _DEFINE_MGROUP(mname, desc, ) +#define DEFINE_MGROUP_ACTIVEATEXIT(mname, desc) \ + _DEFINE_MGROUP(mname, desc, .active_at_exit = true) #define DECLARE_MTYPE(name) \ extern struct memtype MTYPE_##name[1]; \ diff --git a/lib/mpls.c b/lib/mpls.c index 759fe1206d..ac5792a686 100644 --- a/lib/mpls.c +++ b/lib/mpls.c @@ -79,7 +79,7 @@ int mpls_str2label(const char *label_str, uint8_t *num_labels, /* * Label to string conversion, labels in string separated by '/'. */ -char *mpls_label2str(uint8_t num_labels, mpls_label_t *labels, char *buf, +char *mpls_label2str(uint8_t num_labels, const mpls_label_t *labels, char *buf, int len, int pretty) { char label_buf[BUFSIZ]; diff --git a/lib/mpls.h b/lib/mpls.h index 635ecc77a1..05cf2935e8 100644 --- a/lib/mpls.h +++ b/lib/mpls.h @@ -209,10 +209,13 @@ static inline char *label2str(mpls_label_t label, char *buf, size_t len) int mpls_str2label(const char *label_str, uint8_t *num_labels, mpls_label_t *labels); +/* Generic string buffer for label-stack-to-str */ +#define MPLS_LABEL_STRLEN 1024 + /* * Label to string conversion, labels in string separated by '/'. */ -char *mpls_label2str(uint8_t num_labels, mpls_label_t *labels, char *buf, +char *mpls_label2str(uint8_t num_labels, const mpls_label_t *labels, char *buf, int len, int pretty); #ifdef __cplusplus diff --git a/lib/network.c b/lib/network.c index 411661a5e1..d2482bd55e 100644 --- a/lib/network.c +++ b/lib/network.c @@ -121,3 +121,21 @@ float ntohf(float net) { return htonf(net); } + +/** + * Helper function that returns a random long value. The main purpose of + * this function is to hide a `random()` call that gets flagged by coverity + * scan and put it into one place. + * + * The main usage of this function should be for generating jitter or weak + * random values for simple purposes. + * + * See 'man 3 random' for more information. + * + * \returns random long integer. + */ +long frr_weak_random(void) +{ + /* coverity[dont_call] */ + return random(); +} diff --git a/lib/network.h b/lib/network.h index a00c5a0a65..83c9e59e76 100644 --- a/lib/network.h +++ b/lib/network.h @@ -45,6 +45,8 @@ extern int set_cloexec(int fd); extern float htonf(float); extern float ntohf(float); +extern long frr_weak_random(void); + #ifdef __cplusplus } #endif diff --git a/lib/nexthop.c b/lib/nexthop.c index c3be0a71e6..0d239e091b 100644 --- a/lib/nexthop.c +++ b/lib/nexthop.c @@ -23,11 +23,9 @@ #include "table.h" #include "memory.h" #include "command.h" -#include "if.h" #include "log.h" #include "sockunion.h" #include "linklist.h" -#include "thread.h" #include "prefix.h" #include "nexthop.h" #include "mpls.h" @@ -155,7 +153,24 @@ static int _nexthop_cmp_no_labels(const struct nexthop *next1, } ret = _nexthop_source_cmp(next1, next2); + if (ret != 0) + goto done; + + if (!CHECK_FLAG(next1->flags, NEXTHOP_FLAG_HAS_BACKUP) && + CHECK_FLAG(next2->flags, NEXTHOP_FLAG_HAS_BACKUP)) + return -1; + + if (CHECK_FLAG(next1->flags, NEXTHOP_FLAG_HAS_BACKUP) && + !CHECK_FLAG(next2->flags, NEXTHOP_FLAG_HAS_BACKUP)) + return 1; + + if (next1->backup_idx < next2->backup_idx) + return -1; + + if (next1->backup_idx > next2->backup_idx) + return 1; +done: return ret; } @@ -240,7 +255,7 @@ struct nexthop *nexthop_new(void) * The linux kernel does some weird stuff with adding +1 to * all nexthop weights it gets over netlink. * To handle this, just default everything to 1 right from - * from the beggining so we don't have to special case + * from the beginning so we don't have to special case * default weights in the linux netlink code. * * 1 should be a valid on all platforms anyway. @@ -393,8 +408,8 @@ struct nexthop *nexthop_from_blackhole(enum blackhole_type bh_type) } /* Update nexthop with label information. */ -void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t type, - uint8_t num_labels, mpls_label_t *label) +void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t ltype, + uint8_t num_labels, const mpls_label_t *labels) { struct mpls_label_stack *nh_label; int i; @@ -402,13 +417,18 @@ void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t type, if (num_labels == 0) return; - nexthop->nh_label_type = type; + /* Enforce limit on label stack size */ + if (num_labels > MPLS_MAX_LABELS) + num_labels = MPLS_MAX_LABELS; + + nexthop->nh_label_type = ltype; + nh_label = XCALLOC(MTYPE_NH_LABEL, sizeof(struct mpls_label_stack) + num_labels * sizeof(mpls_label_t)); nh_label->num_labels = num_labels; for (i = 0; i < num_labels; i++) - nh_label->label[i] = *(label + i); + nh_label->label[i] = *(labels + i); nexthop->nh_label = nh_label; } @@ -503,6 +523,7 @@ unsigned int nexthop_level(struct nexthop *nexthop) uint32_t nexthop_hash_quick(const struct nexthop *nexthop) { uint32_t key = 0x45afe398; + uint32_t val; key = jhash_3words(nexthop->type, nexthop->vrf_id, nexthop->nh_label_type, key); @@ -532,8 +553,12 @@ uint32_t nexthop_hash_quick(const struct nexthop *nexthop) key = jhash_1word(nexthop->nh_label->label[i], key); } - key = jhash_2words(nexthop->ifindex, - CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK), + val = 0; + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) + val = (uint32_t)nexthop->backup_idx; + + key = jhash_3words(nexthop->ifindex, + CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK), val, key); return key; @@ -573,6 +598,7 @@ void nexthop_copy_no_recurse(struct nexthop *copy, copy->type = nexthop->type; copy->flags = nexthop->flags; copy->weight = nexthop->weight; + copy->backup_idx = nexthop->backup_idx; memcpy(©->gate, &nexthop->gate, sizeof(nexthop->gate)); memcpy(©->src, &nexthop->src, sizeof(nexthop->src)); memcpy(©->rmap_src, &nexthop->rmap_src, sizeof(nexthop->rmap_src)); diff --git a/lib/nexthop.h b/lib/nexthop.h index 267f9f28ad..9b71262589 100644 --- a/lib/nexthop.h +++ b/lib/nexthop.h @@ -25,6 +25,7 @@ #include "prefix.h" #include "mpls.h" +#include "vxlan.h" #ifdef __cplusplus extern "C" { @@ -60,6 +61,10 @@ enum blackhole_type { ? (type) \ : ((type) | 1) +enum nh_encap_type { + NET_VXLAN = 100, /* value copied from FPM_NH_ENCAP_VXLAN. */ +}; + /* Nexthop structure. */ struct nexthop { struct nexthop *next; @@ -86,6 +91,8 @@ struct nexthop { * active one */ #define NEXTHOP_FLAG_RNH_FILTERED (1 << 5) /* rmap filtered, used by rnh */ +#define NEXTHOP_FLAG_HAS_BACKUP (1 << 6) /* Backup nexthop index is set */ + #define NEXTHOP_IS_ACTIVE(flags) \ (CHECK_FLAG(flags, NEXTHOP_FLAG_ACTIVE) \ && !CHECK_FLAG(flags, NEXTHOP_FLAG_DUPLICATE)) @@ -116,15 +123,37 @@ struct nexthop { /* Weight of the nexthop ( for unequal cost ECMP ) */ uint8_t weight; + + /* Index of a corresponding backup nexthop in a backup list; + * only meaningful if the HAS_BACKUP flag is set. + */ + uint8_t backup_idx; + + /* Encapsulation information. */ + enum nh_encap_type nh_encap_type; + union { + vni_t vni; + } nh_encap; }; +/* Backup index value is limited */ +#define NEXTHOP_BACKUP_IDX_MAX 255 + +/* Utility to append one nexthop to another. */ +#define NEXTHOP_APPEND(to, new) \ + do { \ + (to)->next = (new); \ + (new)->prev = (to); \ + (new)->next = NULL; \ + } while (0) + struct nexthop *nexthop_new(void); void nexthop_free(struct nexthop *nexthop); void nexthops_free(struct nexthop *nexthop); -void nexthop_add_labels(struct nexthop *, enum lsp_types_t, uint8_t, - mpls_label_t *); +void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t ltype, + uint8_t num_labels, const mpls_label_t *labels); void nexthop_del_labels(struct nexthop *); /* diff --git a/lib/nexthop_group.c b/lib/nexthop_group.c index d660428bcd..c23c57d2e1 100644 --- a/lib/nexthop_group.c +++ b/lib/nexthop_group.c @@ -43,8 +43,12 @@ struct nexthop_hold { char *intf; char *labels; uint32_t weight; + int backup_idx; /* Index of backup nexthop, if >= 0 */ }; +/* Invalid/unset value for nexthop_hold's backup_idx */ +#define NHH_BACKUP_IDX_INVALID -1 + struct nexthop_group_hooks { void (*new)(const char *name); void (*add_nexthop)(const struct nexthop_group_cmd *nhg, @@ -143,6 +147,59 @@ struct nexthop *nexthop_exists(const struct nexthop_group *nhg, return NULL; } +/* + * Helper that locates a nexthop in an nhg config list. Note that + * this uses a specific matching / equality rule that's different from + * the complete match performed by 'nexthop_same()'. + */ +static struct nexthop *nhg_nh_find(const struct nexthop_group *nhg, + const struct nexthop *nh) +{ + struct nexthop *nexthop; + int ret; + + /* We compare: vrf, gateway, and interface */ + + for (nexthop = nhg->nexthop; nexthop; nexthop = nexthop->next) { + + /* Compare vrf and type */ + if (nexthop->vrf_id != nh->vrf_id) + continue; + if (nexthop->type != nh->type) + continue; + + /* Compare gateway */ + switch (nexthop->type) { + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV6: + ret = nexthop_g_addr_cmp(nexthop->type, + &nexthop->gate, &nh->gate); + if (ret != 0) + continue; + break; + case NEXTHOP_TYPE_IPV4_IFINDEX: + case NEXTHOP_TYPE_IPV6_IFINDEX: + ret = nexthop_g_addr_cmp(nexthop->type, + &nexthop->gate, &nh->gate); + if (ret != 0) + continue; + /* Intentional Fall-Through */ + case NEXTHOP_TYPE_IFINDEX: + if (nexthop->ifindex != nh->ifindex) + continue; + break; + case NEXTHOP_TYPE_BLACKHOLE: + if (nexthop->bh_type != nh->bh_type) + continue; + break; + } + + return nexthop; + } + + return NULL; +} + static bool nexthop_group_equal_common(const struct nexthop_group *nhg1, const struct nexthop_group *nhg2, @@ -225,6 +282,10 @@ void nexthop_group_copy(struct nexthop_group *to, void nexthop_group_delete(struct nexthop_group **nhg) { + /* OK to call with NULL group */ + if ((*nhg) == NULL) + return; + if ((*nhg)->nexthop) nexthops_free((*nhg)->nexthop); @@ -322,6 +383,25 @@ void _nexthop_del(struct nexthop_group *nhg, struct nexthop *nh) nh->next = NULL; } +/* Unlink a nexthop from the list it's on, unconditionally */ +static void nexthop_unlink(struct nexthop_group *nhg, struct nexthop *nexthop) +{ + + if (nexthop->prev) + nexthop->prev->next = nexthop->next; + else { + assert(nhg->nexthop == nexthop); + assert(nexthop->prev == NULL); + nhg->nexthop = nexthop->next; + } + + if (nexthop->next) + nexthop->next->prev = nexthop->prev; + + nexthop->prev = NULL; + nexthop->next = NULL; +} + /* * Copy a list of nexthops in 'nh' to an nhg, enforcing canonical sort order */ @@ -567,11 +647,36 @@ DEFUN_NOSH(no_nexthop_group, no_nexthop_group_cmd, "no nexthop-group NHGNAME", return CMD_SUCCESS; } +DEFPY(nexthop_group_backup, nexthop_group_backup_cmd, + "backup-group WORD$name", + "Specify a group name containing backup nexthops\n" + "The name of the backup group\n") +{ + VTY_DECLVAR_CONTEXT(nexthop_group_cmd, nhgc); + + strlcpy(nhgc->backup_list_name, name, sizeof(nhgc->backup_list_name)); + + return CMD_SUCCESS; +} + +DEFPY(no_nexthop_group_backup, no_nexthop_group_backup_cmd, + "no backup-group [WORD$name]", + NO_STR + "Clear group name containing backup nexthops\n" + "The name of the backup group\n") +{ + VTY_DECLVAR_CONTEXT(nexthop_group_cmd, nhgc); + + nhgc->backup_list_name[0] = 0; + + return CMD_SUCCESS; +} + static void nexthop_group_save_nhop(struct nexthop_group_cmd *nhgc, const char *nhvrf_name, const union sockunion *addr, const char *intf, const char *labels, - const uint32_t weight) + const uint32_t weight, int backup_idx) { struct nexthop_hold *nh; @@ -588,14 +693,22 @@ static void nexthop_group_save_nhop(struct nexthop_group_cmd *nhgc, nh->weight = weight; + nh->backup_idx = backup_idx; + listnode_add_sort(nhgc->nhg_list, nh); } +/* + * Remove config info about a nexthop from group 'nhgc'. Note that we + * use only a subset of the available attributes here to determine + * a 'match'. + * Note that this doesn't change the list of nexthops, only the config + * information. + */ static void nexthop_group_unsave_nhop(struct nexthop_group_cmd *nhgc, const char *nhvrf_name, const union sockunion *addr, - const char *intf, const char *labels, - const uint32_t weight) + const char *intf) { struct nexthop_hold *nh; struct listnode *node; @@ -603,9 +716,7 @@ static void nexthop_group_unsave_nhop(struct nexthop_group_cmd *nhgc, for (ALL_LIST_ELEMENTS_RO(nhgc->nhg_list, node, nh)) { if (nhgc_cmp_helper(nhvrf_name, nh->nhvrf_name) == 0 && nhgc_addr_cmp_helper(addr, nh->addr) == 0 - && nhgc_cmp_helper(intf, nh->intf) == 0 - && nhgc_cmp_helper(labels, nh->labels) == 0 - && weight == nh->weight) + && nhgc_cmp_helper(intf, nh->intf) == 0) break; } @@ -629,7 +740,7 @@ static bool nexthop_group_parse_nexthop(struct nexthop *nhop, const union sockunion *addr, const char *intf, const char *name, const char *labels, int *lbl_ret, - uint32_t weight) + uint32_t weight, int backup_idx) { int ret = 0; struct vrf *vrf; @@ -688,6 +799,15 @@ static bool nexthop_group_parse_nexthop(struct nexthop *nhop, nhop->weight = weight; + if (backup_idx != NHH_BACKUP_IDX_INVALID) { + /* Validate index value */ + if (backup_idx > NEXTHOP_BACKUP_IDX_MAX) + return false; + + SET_FLAG(nhop->flags, NEXTHOP_FLAG_HAS_BACKUP); + nhop->backup_idx = backup_idx; + } + return true; } @@ -699,7 +819,7 @@ static bool nexthop_group_parse_nhh(struct nexthop *nhop, { return (nexthop_group_parse_nexthop(nhop, nhh->addr, nhh->intf, nhh->nhvrf_name, nhh->labels, NULL, - nhh->weight)); + nhh->weight, nhh->backup_idx)); } DEFPY(ecmp_nexthops, ecmp_nexthops_cmd, @@ -712,6 +832,7 @@ DEFPY(ecmp_nexthops, ecmp_nexthops_cmd, nexthop-vrf NAME$vrf_name \ |label WORD \ |weight (1-255) \ + |backup-idx$bi_str (0-254)$idx \ }]", NO_STR "Specify one of the nexthops in this ECMP group\n" @@ -724,16 +845,23 @@ DEFPY(ecmp_nexthops, ecmp_nexthops_cmd, "Specify label(s) for this nexthop\n" "One or more labels in the range (16-1048575) separated by '/'\n" "Weight to be used by the nexthop for purposes of ECMP\n" - "Weight value to be used\n") + "Weight value to be used\n" + "Backup nexthop index in another group\n" + "Nexthop index value\n") { VTY_DECLVAR_CONTEXT(nexthop_group_cmd, nhgc); struct nexthop nhop; struct nexthop *nh; int lbl_ret = 0; bool legal; + int backup_idx = idx; + bool yes = !no; + + if (bi_str == NULL) + backup_idx = NHH_BACKUP_IDX_INVALID; legal = nexthop_group_parse_nexthop(&nhop, addr, intf, vrf_name, label, - &lbl_ret, weight); + &lbl_ret, weight, backup_idx); if (nhop.type == NEXTHOP_TYPE_IPV6 && IN6_IS_ADDR_LINKLOCAL(&nhop.gate.ipv6)) { @@ -763,21 +891,30 @@ DEFPY(ecmp_nexthops, ecmp_nexthops_cmd, return CMD_WARNING_CONFIG_FAILED; } - nh = nexthop_exists(&nhgc->nhg, &nhop); + /* Look for an existing nexthop in the config. Note that the test + * here tests only some attributes - it's not a complete comparison. + * Note that we've got two kinds of objects to manage: 'nexthop_hold' + * that represent config that may or may not be valid (yet), and + * actual nexthops that have been validated and parsed. + */ + nh = nhg_nh_find(&nhgc->nhg, &nhop); - if (no) { - nexthop_group_unsave_nhop(nhgc, vrf_name, addr, intf, label, - weight); - if (nh) { - _nexthop_del(&nhgc->nhg, nh); + /* Always attempt to remove old config info. */ + nexthop_group_unsave_nhop(nhgc, vrf_name, addr, intf); - if (nhg_hooks.del_nexthop) - nhg_hooks.del_nexthop(nhgc, nh); + /* Remove any existing nexthop, for delete and replace cases. */ + if (nh) { + nexthop_unlink(&nhgc->nhg, nh); - nexthop_free(nh); - } - } else if (!nh) { - /* must be adding new nexthop since !no and !nexthop_exists */ + if (nhg_hooks.del_nexthop) + nhg_hooks.del_nexthop(nhgc, nh); + + nexthop_free(nh); + } + if (yes) { + /* Add/replace case: capture nexthop if valid, and capture + * config info always. + */ if (legal) { nh = nexthop_new(); @@ -785,8 +922,9 @@ DEFPY(ecmp_nexthops, ecmp_nexthops_cmd, _nexthop_add(&nhgc->nhg.nexthop, nh); } + /* Save config always */ nexthop_group_save_nhop(nhgc, vrf_name, addr, intf, label, - weight); + weight, backup_idx); if (legal && nhg_hooks.add_nexthop) nhg_hooks.add_nexthop(nhgc, nh); @@ -795,10 +933,13 @@ DEFPY(ecmp_nexthops, ecmp_nexthops_cmd, return CMD_SUCCESS; } +static int nexthop_group_write(struct vty *vty); static struct cmd_node nexthop_group_node = { - NH_GROUP_NODE, - "%s(config-nh-group)# ", - 1 + .name = "nexthop-group", + .node = NH_GROUP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-nh-group)# ", + .config_write = nexthop_group_write, }; void nexthop_group_write_nexthop(struct vty *vty, struct nexthop *nh) @@ -849,6 +990,9 @@ void nexthop_group_write_nexthop(struct vty *vty, struct nexthop *nh) if (nh->weight) vty_out(vty, " weight %u", nh->weight); + if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_HAS_BACKUP)) + vty_out(vty, " backup-idx %d", nh->backup_idx); + vty_out(vty, "\n"); } @@ -874,6 +1018,9 @@ static void nexthop_group_write_nexthop_internal(struct vty *vty, if (nh->weight) vty_out(vty, " weight %u", nh->weight); + if (nh->backup_idx != NHH_BACKUP_IDX_INVALID) + vty_out(vty, " backup-idx %d", nh->backup_idx); + vty_out(vty, "\n"); } @@ -887,6 +1034,10 @@ static int nexthop_group_write(struct vty *vty) vty_out(vty, "nexthop-group %s\n", nhgc->name); + if (nhgc->backup_list_name[0]) + vty_out(vty, " backup-group %s\n", + nhgc->backup_list_name); + for (ALL_LIST_ELEMENTS_RO(nhgc->nhg_list, node, nh)) { vty_out(vty, " "); nexthop_group_write_nexthop_internal(vty, nh); @@ -1062,11 +1213,13 @@ void nexthop_group_init(void (*new)(const char *name), cmd_variable_handler_register(nhg_name_handlers); - install_node(&nexthop_group_node, nexthop_group_write); + install_node(&nexthop_group_node); install_element(CONFIG_NODE, &nexthop_group_cmd); install_element(CONFIG_NODE, &no_nexthop_group_cmd); install_default(NH_GROUP_NODE); + install_element(NH_GROUP_NODE, &nexthop_group_backup_cmd); + install_element(NH_GROUP_NODE, &no_nexthop_group_backup_cmd); install_element(NH_GROUP_NODE, &ecmp_nexthops_cmd); memset(&nhg_hooks, 0, sizeof(nhg_hooks)); diff --git a/lib/nexthop_group.h b/lib/nexthop_group.h index f99a53f694..3a5a1299c1 100644 --- a/lib/nexthop_group.h +++ b/lib/nexthop_group.h @@ -57,6 +57,8 @@ void copy_nexthops(struct nexthop **tnh, const struct nexthop *nh, uint32_t nexthop_group_hash_no_recurse(const struct nexthop_group *nhg); uint32_t nexthop_group_hash(const struct nexthop_group *nhg); void nexthop_group_mark_duplicates(struct nexthop_group *nhg); + +/* Add a nexthop to a list, enforcing the canonical sort order. */ void nexthop_group_add_sorted(struct nexthop_group *nhg, struct nexthop *nexthop); @@ -79,11 +81,16 @@ void nexthop_group_add_sorted(struct nexthop_group *nhg, (nhop) = nexthop_next(nhop) +#define NHGC_NAME_SIZE 80 + struct nexthop_group_cmd { RB_ENTRY(nexthop_group_cmd) nhgc_entry; - char name[80]; + char name[NHGC_NAME_SIZE]; + + /* Name of group containing backup nexthops (if set) */ + char backup_list_name[NHGC_NAME_SIZE]; struct nexthop_group nhg; diff --git a/lib/northbound.c b/lib/northbound.c index cebedcff09..85e723d7cf 100644 --- a/lib/northbound.c +++ b/lib/northbound.c @@ -1866,6 +1866,13 @@ static void nb_load_callbacks(const struct frr_yang_module_info *module) struct nb_node *nb_node; uint32_t priority; + if (i > YANG_MODULE_MAX_NODES) { + zlog_err( + "%s: %s.yang has more than %u nodes. Please increase YANG_MODULE_MAX_NODES to fix this problem.", + __func__, module->name, YANG_MODULE_MAX_NODES); + exit(1); + } + nb_node = nb_node_find(module->nodes[i].xpath); if (!nb_node) { flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH, diff --git a/lib/northbound.h b/lib/northbound.h index 76a11e518c..19a2ba0865 100644 --- a/lib/northbound.h +++ b/lib/northbound.h @@ -403,6 +403,13 @@ struct nb_node { /* The YANG list doesn't contain key leafs. */ #define F_NB_NODE_KEYLESS_LIST 0x02 +/* + * HACK: old gcc versions (< 5.x) have a bug that prevents C99 flexible arrays + * from working properly on shared libraries. For those compilers, use a fixed + * size array to work around the problem. + */ +#define YANG_MODULE_MAX_NODES 1024 + struct frr_yang_module_info { /* YANG module name. */ const char *name; @@ -417,7 +424,11 @@ struct frr_yang_module_info { /* Priority - lower priorities are processed first. */ uint32_t priority; +#if defined(__GNUC__) && ((__GNUC__ - 0) < 5) && !defined(__clang__) + } nodes[YANG_MODULE_MAX_NODES + 1]; +#else } nodes[]; +#endif }; /* Northbound error codes. */ diff --git a/lib/northbound_cli.c b/lib/northbound_cli.c index 17dc256281..d4467facaf 100644 --- a/lib/northbound_cli.c +++ b/lib/northbound_cli.c @@ -1674,7 +1674,12 @@ static int nb_debug_config_write(struct vty *vty) } static struct debug_callbacks nb_dbg_cbs = {.debug_set_all = nb_debug_set_all}; -static struct cmd_node nb_debug_node = {NORTHBOUND_DEBUG_NODE, "", 1}; +static struct cmd_node nb_debug_node = { + .name = "northbound debug", + .node = NORTHBOUND_DEBUG_NODE, + .prompt = "", + .config_write = nb_debug_config_write, +}; void nb_cli_install_default(int node) { @@ -1738,7 +1743,7 @@ void nb_cli_init(struct thread_master *tm) debug_init(&nb_dbg_cbs); - install_node(&nb_debug_node, nb_debug_config_write); + install_node(&nb_debug_node); install_element(ENABLE_NODE, &debug_nb_cmd); install_element(CONFIG_NODE, &debug_nb_cmd); diff --git a/lib/northbound_grpc.cpp b/lib/northbound_grpc.cpp index 089899368d..b195f1aeca 100644 --- a/lib/northbound_grpc.cpp +++ b/lib/northbound_grpc.cpp @@ -884,7 +884,14 @@ static int frr_grpc_finish(void) return 0; } -static int frr_grpc_module_late_init(struct thread_master *tm) +/* + * This is done this way because module_init and module_late_init are both + * called during daemon pre-fork initialization. Because the GRPC library + * spawns threads internally, we need to delay initializing it until after + * fork. This is done by scheduling this init function as an event task, since + * the event loop doesn't run until after fork. + */ +static int frr_grpc_module_very_late_init(struct thread *thread) { static unsigned long port = GRPC_DEFAULT_PORT; const char *args = THIS_MODULE->load_args; @@ -910,15 +917,19 @@ static int frr_grpc_module_late_init(struct thread_master *tm) if (frr_grpc_init(&port) < 0) goto error; - hook_register(frr_fini, frr_grpc_finish); - - return 0; - error: flog_err(EC_LIB_GRPC_INIT, "failed to initialize the gRPC module"); return -1; } +static int frr_grpc_module_late_init(struct thread_master *tm) +{ + thread_add_event(tm, frr_grpc_module_very_late_init, NULL, 0, NULL); + hook_register(frr_fini, frr_grpc_finish); + + return 0; +} + static int frr_grpc_module_init(void) { hook_register(frr_late_init, frr_grpc_module_late_init); diff --git a/lib/plist.c b/lib/plist.c index b7a020c6f7..67a91e2a0d 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -348,14 +348,14 @@ static void prefix_list_delete(struct prefix_list *plist) static struct prefix_list_entry * prefix_list_entry_make(struct prefix *prefix, enum prefix_list_type type, - int64_t seq, int le, int ge, int any) + int64_t seq, int le, int ge, bool any) { struct prefix_list_entry *pentry; pentry = prefix_list_entry_new(); if (any) - pentry->any = 1; + pentry->any = true; prefix_copy(&pentry->prefix, prefix); pentry->type = type; @@ -851,7 +851,7 @@ static int vty_prefix_list_install(struct vty *vty, afi_t afi, const char *name, struct prefix_list_entry *pentry; struct prefix_list_entry *dup; struct prefix p, p_tmp; - int any = 0; + bool any = false; int64_t seqnum = -1; int lenum = 0; int genum = 0; @@ -889,7 +889,7 @@ static int vty_prefix_list_install(struct vty *vty, afi_t afi, const char *name, (struct prefix_ipv4 *)&p); genum = 0; lenum = IPV4_MAX_BITLEN; - any = 1; + any = true; } else ret = str2prefix_ipv4(prefix, (struct prefix_ipv4 *)&p); @@ -908,7 +908,7 @@ static int vty_prefix_list_install(struct vty *vty, afi_t afi, const char *name, ret = str2prefix_ipv6("::/0", (struct prefix_ipv6 *)&p); genum = 0; lenum = IPV6_MAX_BITLEN; - any = 1; + any = true; } else ret = str2prefix_ipv6(prefix, (struct prefix_ipv6 *)&p); @@ -1898,7 +1898,7 @@ int prefix_bgp_orf_set(char *name, afi_t afi, struct orf_prefix *orfp, if (set) { pentry = prefix_list_entry_make( &orfp->p, (permit ? PREFIX_PERMIT : PREFIX_DENY), - orfp->seq, orfp->le, orfp->ge, 0); + orfp->seq, orfp->le, orfp->ge, false); if (prefix_entry_dup_check(plist, pentry)) { prefix_list_entry_free(pentry); @@ -2044,10 +2044,14 @@ static void prefix_list_reset_afi(afi_t afi, int orf) } +static int config_write_prefix_ipv4(struct vty *vty); /* Prefix-list node. */ -static struct cmd_node prefix_node = {PREFIX_NODE, - "", /* Prefix list has no interface. */ - 1}; +static struct cmd_node prefix_node = { + .name = "ipv4 prefix list", + .node = PREFIX_NODE, + .prompt = "", + .config_write = config_write_prefix_ipv4, +}; static int config_write_prefix_ipv4(struct vty *vty) { @@ -2085,7 +2089,7 @@ static const struct cmd_variable_handler plist_var_handlers[] = { static void prefix_list_init_ipv4(void) { - install_node(&prefix_node, config_write_prefix_ipv4); + install_node(&prefix_node); install_element(CONFIG_NODE, &ip_prefix_list_cmd); install_element(CONFIG_NODE, &no_ip_prefix_list_cmd); @@ -2107,10 +2111,14 @@ static void prefix_list_init_ipv4(void) install_element(ENABLE_NODE, &clear_ip_prefix_list_cmd); } +static int config_write_prefix_ipv6(struct vty *vty); /* Prefix-list node. */ static struct cmd_node prefix_ipv6_node = { - PREFIX_IPV6_NODE, "", /* Prefix list has no interface. */ - 1}; + .name = "ipv6 prefix list", + .node = PREFIX_IPV6_NODE, + .prompt = "", + .config_write = config_write_prefix_ipv6, +}; static int config_write_prefix_ipv6(struct vty *vty) { @@ -2119,7 +2127,7 @@ static int config_write_prefix_ipv6(struct vty *vty) static void prefix_list_init_ipv6(void) { - install_node(&prefix_ipv6_node, config_write_prefix_ipv6); + install_node(&prefix_ipv6_node); install_element(CONFIG_NODE, &ipv6_prefix_list_cmd); install_element(CONFIG_NODE, &no_ipv6_prefix_list_cmd); diff --git a/lib/plist_int.h b/lib/plist_int.h index 443b0c614d..ec8bbe1315 100644 --- a/lib/plist_int.h +++ b/lib/plist_int.h @@ -59,7 +59,7 @@ struct prefix_list_entry { enum prefix_list_type type; - int any; + bool any; struct prefix prefix; unsigned long refcnt; diff --git a/lib/qobj.c b/lib/qobj.c index 1e48b541dc..cb3254cbe9 100644 --- a/lib/qobj.c +++ b/lib/qobj.c @@ -26,6 +26,7 @@ #include "log.h" #include "qobj.h" #include "jhash.h" +#include "network.h" static uint32_t qobj_hash(const struct qobj_node *node) { @@ -53,8 +54,8 @@ void qobj_reg(struct qobj_node *node, const struct qobj_nodetype *type) node->type = type; pthread_rwlock_wrlock(&nodes_lock); do { - node->nid = (uint64_t)random(); - node->nid ^= (uint64_t)random() << 32; + node->nid = (uint64_t)frr_weak_random(); + node->nid ^= (uint64_t)frr_weak_random() << 32; } while (!node->nid || qobj_nodes_find(&nodes, node)); qobj_nodes_add(&nodes, node); pthread_rwlock_unlock(&nodes_lock); diff --git a/lib/resolver.c b/lib/resolver.c index 1be47bd6e1..e5caadb2d0 100644 --- a/lib/resolver.c +++ b/lib/resolver.c @@ -245,7 +245,13 @@ DEFUN(debug_resolver, return CMD_SUCCESS; } -static struct cmd_node resolver_debug_node = {RESOLVER_DEBUG_NODE, "", 1}; +static int resolver_config_write_debug(struct vty *vty); +static struct cmd_node resolver_debug_node = { + .name = "resolver debug", + .node = RESOLVER_DEBUG_NODE, + .prompt = "", + .config_write = resolver_config_write_debug, +}; static int resolver_config_write_debug(struct vty *vty) { @@ -274,7 +280,7 @@ void resolver_init(struct thread_master *tm) ARES_OPT_SOCK_STATE_CB | ARES_OPT_TIMEOUT | ARES_OPT_TRIES); - install_node(&resolver_debug_node, resolver_config_write_debug); + install_node(&resolver_debug_node); install_element(CONFIG_NODE, &debug_resolver_cmd); install_element(ENABLE_NODE, &debug_resolver_cmd); } diff --git a/lib/routemap.c b/lib/routemap.c index e2baa36f24..210512212d 100644 --- a/lib/routemap.c +++ b/lib/routemap.c @@ -3015,7 +3015,13 @@ DEFUN (no_debug_rmap, } /* Debug node. */ -static struct cmd_node rmap_debug_node = {RMAP_DEBUG_NODE, "", 1}; +static int rmap_config_write_debug(struct vty *vty); +static struct cmd_node rmap_debug_node = { + .name = "route-map debug", + .node = RMAP_DEBUG_NODE, + .prompt = "", + .config_write = rmap_config_write_debug, +}; /* Configuration write function. */ static int rmap_config_write_debug(struct vty *vty) @@ -3242,7 +3248,7 @@ void route_map_init(void) route_map_cli_init(); /* Install route map top node. */ - install_node(&rmap_debug_node, rmap_config_write_debug); + install_node(&rmap_debug_node); /* Install route map commands. */ install_element(CONFIG_NODE, &debug_rmap_cmd); diff --git a/lib/routemap_cli.c b/lib/routemap_cli.c index 5b03b5266f..2c45f09751 100644 --- a/lib/routemap_cli.c +++ b/lib/routemap_cli.c @@ -148,6 +148,12 @@ void route_map_instance_show(struct vty *vty, struct lyd_node *dnode, SKIP_RULE("ipv6 next-hop type"); SKIP_RULE("metric"); SKIP_RULE("tag"); + /* Zebra specific match conditions. */ + SKIP_RULE("ip address prefix-len"); + SKIP_RULE("ipv6 address prefix-len"); + SKIP_RULE("ip next-hop prefix-len"); + SKIP_RULE("source-protocol"); + SKIP_RULE("source-instance"); vty_out(vty, " match %s %s\n", rmr->cmd->str, rmr->rule_str ? rmr->rule_str : ""); @@ -158,6 +164,8 @@ void route_map_instance_show(struct vty *vty, struct lyd_node *dnode, /* Skip all sets implemented by northbound. */ SKIP_RULE("metric"); SKIP_RULE("tag"); + /* Zebra specific set actions. */ + SKIP_RULE("src"); vty_out(vty, " set %s %s\n", rmr->cmd->str, rmr->rule_str ? rmr->rule_str : ""); @@ -666,8 +674,25 @@ void route_map_condition_show(struct vty *vty, struct lyd_node *dnode, vty_out(vty, " match tag %s\n", yang_dnode_get_string(dnode, "./tag")); break; - case 100: - /* NOTHING: custom field, should be handled by daemon. */ + case 100: /* ipv4-prefix-length */ + vty_out(vty, " match ip address prefix-len %s\n", + yang_dnode_get_string(dnode,"./frr-zebra:ipv4-prefix-length")); + break; + case 101: /* ipv6-prefix-length */ + vty_out(vty, " match ipv6 address prefix-len %s\n", + yang_dnode_get_string(dnode, "./frr-zebra:ipv6-prefix-length")); + break; + case 102: /* ipv4-next-hop-prefix-length */ + vty_out(vty, " match ip next-hop prefix-len %s\n", + yang_dnode_get_string(dnode, "./frr-zebra:ipv4-prefix-length")); + break; + case 103: /* source-protocol */ + vty_out(vty, " match source-protocol %s\n", + yang_dnode_get_string(dnode, "./frr-zebra:source-protocol")); + break; + case 104: /* source-instance */ + vty_out(vty, " match source-instance %s\n", + yang_dnode_get_string(dnode, "./frr-zebra:source-instance")); break; } } @@ -868,8 +893,13 @@ void route_map_action_show(struct vty *vty, struct lyd_node *dnode, vty_out(vty, " set tag %s\n", yang_dnode_get_string(dnode, "./tag")); break; - case 100: - /* NOTHING: custom field, should be handled by daemon. */ + case 100: /* source */ + if (yang_dnode_exists(dnode, "./frr-zebra:source-v4")) + vty_out(vty, " set src %s\n", + yang_dnode_get_string(dnode, "./frr-zebra:source-v4")); + else + vty_out(vty, " set src %s\n", + yang_dnode_get_string(dnode, "./frr-zebra:source-v6")); break; } } @@ -1034,7 +1064,14 @@ static int route_map_config_write(struct vty *vty) } /* Route map node structure. */ -static struct cmd_node rmap_node = {RMAP_NODE, "%s(config-route-map)# ", 1}; +static int route_map_config_write(struct vty *vty); +static struct cmd_node rmap_node = { + .name = "routemap", + .node = RMAP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-route-map)# ", + .config_write = route_map_config_write, +}; static void rmap_autocomplete(vector comps, struct cmd_token *token) { @@ -1057,7 +1094,7 @@ void route_map_cli_init(void) cmd_variable_handler_register(rmap_var_handlers); /* CLI commands. */ - install_node(&rmap_node, route_map_config_write); + install_node(&rmap_node); install_default(RMAP_NODE); install_element(CONFIG_NODE, &route_map_cmd); install_element(CONFIG_NODE, &no_route_map_cmd); diff --git a/lib/routemap_northbound.c b/lib/routemap_northbound.c index 69cebbd2a1..dd4cbd7d99 100644 --- a/lib/routemap_northbound.c +++ b/lib/routemap_northbound.c @@ -1221,32 +1221,7 @@ lib_route_map_entry_set_action_tag_destroy(enum nb_event event, } /* clang-format off */ -#if defined(__GNUC__) && ((__GNUC__ - 0) < 5) && !defined(__clang__) -/* - * gcc versions before 5.x miscalculate the size for structs with variable - * length arrays (they just count it as size 0) - */ -struct frr_yang_module_info_sizen { - /* YANG module name. */ - const char *name; - - /* Northbound callbacks. */ - const struct { - /* Data path of this YANG node. */ - const char *xpath; - - /* Callbacks implemented for this node. */ - struct nb_callbacks cbs; - - /* Priority - lower priorities are processed first. */ - uint32_t priority; - } nodes[28]; -}; - -const struct frr_yang_module_info_sizen frr_route_map_info_sizen asm("frr_route_map_info") = { -#else const struct frr_yang_module_info frr_route_map_info = { -#endif .name = "frr-route-map", .nodes = { { diff --git a/lib/skiplist.c b/lib/skiplist.c index fa25770efa..2bef18f525 100644 --- a/lib/skiplist.c +++ b/lib/skiplist.c @@ -61,6 +61,7 @@ #include "vty.h" #include "skiplist.h" #include "lib_errors.h" +#include "network.h" DEFINE_MTYPE_STATIC(LIB, SKIP_LIST, "Skip List") DEFINE_MTYPE_STATIC(LIB, SKIP_LIST_NODE, "Skip Node") @@ -95,7 +96,7 @@ static int randomLevel(void) do { if (randomsLeft <= 0) { - randomBits = random(); + randomBits = frr_weak_random(); randomsLeft = BitsInRandom / 2; } b = randomBits & 3; @@ -624,7 +625,7 @@ void skiplist_test(struct vty *vty) zlog_debug("%s: (%d:%d)", __func__, i, k); } // keys[k] = (void *)random(); - keys[k] = (void *)scramble(k); + keys[k] = scramble(k); if (skiplist_insert(l, keys[k], keys[k])) zlog_debug("error in insert #%d,#%d", i, k); } @@ -649,7 +650,7 @@ void skiplist_test(struct vty *vty) zlog_debug("<%d:%d>", i, k); if (skiplist_delete(l, keys[k], keys[k])) zlog_debug("error in delete"); - keys[k] = (void *)scramble(k ^ 0xf0f0f0f0); + keys[k] = scramble(k ^ 0xf0f0f0f0); if (skiplist_insert(l, keys[k], keys[k])) zlog_debug("error in insert #%d,#%d", i, k); } diff --git a/lib/spf_backoff.c b/lib/spf_backoff.c index 41d4e2bb57..4e74714489 100644 --- a/lib/spf_backoff.c +++ b/lib/spf_backoff.c @@ -7,7 +7,7 @@ * Copyright (C) 2017 Orange Labs http://www.orange.com/ * Copyright (C) 2017 by Christian Franke, Open Source Routing / NetDEF Inc. * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/lib/spf_backoff.h b/lib/spf_backoff.h index 11b2701e3e..2617195d79 100644 --- a/lib/spf_backoff.h +++ b/lib/spf_backoff.h @@ -7,7 +7,7 @@ * Copyright (C) 2017 Orange Labs http://www.orange.com/ * Copyright (C) 2017 by Christian Franke, Open Source Routing / NetDEF Inc. * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/lib/srcdest_table.c b/lib/srcdest_table.c index ee87d73077..66b735919b 100644 --- a/lib/srcdest_table.c +++ b/lib/srcdest_table.c @@ -4,7 +4,7 @@ * Copyright (C) 2017 by David Lamparter & Christian Franke, * Open Source Routing / NetDEF Inc. * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/lib/srcdest_table.h b/lib/srcdest_table.h index 90418944c7..7982260777 100644 --- a/lib/srcdest_table.h +++ b/lib/srcdest_table.h @@ -4,7 +4,7 @@ * Copyright (C) 2017 by David Lamparter & Christian Franke, * Open Source Routing / NetDEF Inc. * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/lib/stream.c b/lib/stream.c index f046572f41..683a130e44 100644 --- a/lib/stream.c +++ b/lib/stream.c @@ -543,6 +543,27 @@ uint64_t stream_getq(struct stream *s) return q; } +bool stream_getq2(struct stream *s, uint64_t *q) +{ + STREAM_VERIFY_SANE(s); + + if (STREAM_READABLE(s) < sizeof(uint64_t)) { + STREAM_BOUND_WARN2(s, "get uint64"); + return false; + } + + *q = ((uint64_t)s->data[s->getp++]) << 56; + *q |= ((uint64_t)s->data[s->getp++]) << 48; + *q |= ((uint64_t)s->data[s->getp++]) << 40; + *q |= ((uint64_t)s->data[s->getp++]) << 32; + *q |= ((uint64_t)s->data[s->getp++]) << 24; + *q |= ((uint64_t)s->data[s->getp++]) << 16; + *q |= ((uint64_t)s->data[s->getp++]) << 8; + *q |= ((uint64_t)s->data[s->getp++]); + + return true; +} + /* Get next long word from the stream. */ uint32_t stream_get_ipv4(struct stream *s) { diff --git a/lib/stream.h b/lib/stream.h index 6fcf9a53cf..5c7d94fab8 100644 --- a/lib/stream.h +++ b/lib/stream.h @@ -215,6 +215,7 @@ extern bool stream_getl2(struct stream *s, uint32_t *l); extern uint32_t stream_getl_from(struct stream *, size_t); extern uint64_t stream_getq(struct stream *); extern uint64_t stream_getq_from(struct stream *, size_t); +bool stream_getq2(struct stream *s, uint64_t *q); extern uint32_t stream_get_ipv4(struct stream *); /* IEEE-754 floats */ @@ -402,6 +403,25 @@ static inline const uint8_t *ptr_get_be32(const uint8_t *ptr, uint32_t *out) (P) = _pval; \ } while (0) +#define STREAM_GETF(S, P) \ + do { \ + union { \ + float r; \ + uint32_t d; \ + } _pval; \ + if (stream_getl2((S), &_pval.d)) \ + goto stream_failure; \ + (P) = _pval.r; \ + } while (0) + +#define STREAM_GETQ(S, P) \ + do { \ + uint64_t _pval; \ + if (!stream_getq2((S), &_pval)) \ + goto stream_failure; \ + (P) = _pval; \ + } while (0) + #define STREAM_GET(P, STR, SIZE) \ do { \ if (!stream_get2((P), (STR), (SIZE))) \ diff --git a/lib/subdir.am b/lib/subdir.am index 4f62eb2264..ed6cf31b34 100644 --- a/lib/subdir.am +++ b/lib/subdir.am @@ -45,6 +45,7 @@ lib_libfrr_la_SOURCES = \ lib/libfrr.c \ lib/linklist.c \ lib/log.c \ + lib/log_filter.c \ lib/log_vty.c \ lib/md5.c \ lib/memory.c \ @@ -100,6 +101,8 @@ lib_libfrr_la_SOURCES = \ lib/yang_translator.c \ lib/yang_wrappers.c \ lib/zclient.c \ + lib/zlog.c \ + lib/zlog_targets.c \ lib/printf/printf-pos.c \ lib/printf/vfprintf.c \ lib/printf/glue.c \ @@ -110,8 +113,13 @@ nodist_lib_libfrr_la_SOURCES = \ yang/frr-interface.yang.c \ yang/frr-route-map.yang.c \ yang/frr-route-types.yang.c \ + yang/frr-vrf.yang.c \ + yang/frr-routing.yang.c \ yang/ietf/ietf-routing-types.yang.c \ + yang/ietf/ietf-interfaces.yang.c \ yang/frr-module-translator.yang.c \ + yang/frr-nexthop.yang.c \ + yang/frr-igmp.yang.c \ # end vtysh_scan += \ @@ -254,6 +262,8 @@ pkginclude_HEADERS += \ lib/zassert.h \ lib/zclient.h \ lib/zebra.h \ + lib/zlog.h \ + lib/zlog_targets.h \ lib/pbr.h \ # end @@ -265,7 +275,6 @@ nodist_pkginclude_HEADERS += \ noinst_HEADERS += \ lib/clippy.h \ - lib/log_int.h \ lib/plist_int.h \ lib/printf/printfcommon.h \ lib/printf/printflocal.h \ diff --git a/lib/table.c b/lib/table.c index 1a89a95f4f..86347cbacd 100644 --- a/lib/table.c +++ b/lib/table.c @@ -160,7 +160,7 @@ static void route_common(const struct prefix *n, const struct prefix *p, np = (const uint8_t *)&n->u.prefix; pp = (const uint8_t *)&p->u.prefix; - newp = (uint8_t *)&new->u.prefix; + newp = &new->u.prefix; for (i = 0; i < p->prefixlen / 8; i++) { if (np[i] == pp[i]) diff --git a/lib/thread.c b/lib/thread.c index dbf668a699..4d689a9f88 100644 --- a/lib/thread.c +++ b/lib/thread.c @@ -724,6 +724,7 @@ static int fd_poll(struct thread_master *m, struct pollfd *pfds, nfds_t pfdsize, < 0) // effect a poll (return immediately) timeout = 0; + zlog_tls_buffer_flush(); rcu_read_unlock(); rcu_assert_read_unlocked(); diff --git a/lib/typesafe.c b/lib/typesafe.c index 6635cf7506..a52b55b734 100644 --- a/lib/typesafe.c +++ b/lib/typesafe.c @@ -23,6 +23,7 @@ #include "typesafe.h" #include "memory.h" +#include "network.h" DEFINE_MTYPE_STATIC(LIB, TYPEDHASH_BUCKET, "Typed-hash bucket") DEFINE_MTYPE_STATIC(LIB, SKIPLIST_OFLOW, "Skiplist overflow") @@ -196,7 +197,7 @@ struct sskip_item *typesafe_skiplist_add(struct sskip_head *head, int cmpval; /* level / newlevel are 1-counted here */ - newlevel = __builtin_ctz(random()) + 1; + newlevel = __builtin_ctz(frr_weak_random()) + 1; if (newlevel > SKIPLIST_MAXDEPTH) newlevel = SKIPLIST_MAXDEPTH; @@ -36,6 +36,8 @@ #include "privs.h" #include "nexthop_group.h" #include "lib_errors.h" +#include "northbound.h" +#include "northbound_cli.h" /* default VRF ID value used when VRF backend is not NETNS */ #define VRF_DEFAULT_INTERNAL 0 @@ -116,7 +118,7 @@ static void vrf_update_vrf_id(ns_id_t ns_id, void *opaqueptr) vrf->vrf_id = vrf_id; RB_INSERT(vrf_id_head, &vrfs_by_id, vrf); if (old_vrf_id == VRF_UNKNOWN) - vrf_enable((struct vrf *)vrf); + vrf_enable(vrf); } int vrf_switch_to_netns(vrf_id_t vrf_id) @@ -324,10 +326,7 @@ const char *vrf_id_to_name(vrf_id_t vrf_id) struct vrf *vrf; vrf = vrf_lookup_by_id(vrf_id); - if (vrf) - return vrf->name; - - return "n/a"; + return VRF_LOGNAME(vrf); } vrf_id_t vrf_name_to_id(const char *name) @@ -593,16 +592,30 @@ int vrf_get_backend(void) return vrf_backend; } -void vrf_configure_backend(int vrf_backend_netns) +int vrf_configure_backend(enum vrf_backend_type backend) { - vrf_backend = vrf_backend_netns; + /* Work around issue in old gcc */ + switch (backend) { + case VRF_BACKEND_UNKNOWN: + case VRF_BACKEND_NETNS: + case VRF_BACKEND_VRF_LITE: + break; + default: + return -1; + } + + vrf_backend = backend; vrf_backend_configured = 1; + + return 0; } int vrf_handler_create(struct vty *vty, const char *vrfname, struct vrf **vrf) { struct vrf *vrfp; + char xpath_list[XPATH_MAXLEN]; + int ret; if (strlen(vrfname) > VRF_NAMSIZ) { if (vty) @@ -617,13 +630,24 @@ int vrf_handler_create(struct vty *vty, const char *vrfname, return CMD_WARNING_CONFIG_FAILED; } - vrfp = vrf_get(VRF_UNKNOWN, vrfname); - - if (vty) - VTY_PUSH_CONTEXT(VRF_NODE, vrfp); + if (vty) { + snprintf(xpath_list, sizeof(xpath_list), + "/frr-vrf:lib/vrf[name='%s']", vrfname); + + nb_cli_enqueue_change(vty, xpath_list, NB_OP_CREATE, NULL); + ret = nb_cli_apply_changes(vty, xpath_list); + if (ret == CMD_SUCCESS) { + VTY_PUSH_XPATH(VRF_NODE, xpath_list); + vrfp = vrf_lookup_by_name(vrfname); + if (vrfp) + VTY_PUSH_CONTEXT(VRF_NODE, vrfp); + } + } else { + vrfp = vrf_get(VRF_UNKNOWN, vrfname); - if (vrf) - *vrf = vrfp; + if (vrf) + *vrf = vrfp; + } return CMD_SUCCESS; } @@ -726,6 +750,7 @@ DEFUN (no_vrf, "VRF's name\n") { const char *vrfname = argv[2]->arg; + char xpath_list[XPATH_MAXLEN]; struct vrf *vrfp; @@ -741,15 +766,20 @@ DEFUN (no_vrf, return CMD_WARNING_CONFIG_FAILED; } - /* Clear configured flag and invoke delete. */ - UNSET_FLAG(vrfp->status, VRF_CONFIGURED); - vrf_delete(vrfp); + snprintf(xpath_list, sizeof(xpath_list), "/frr-vrf:lib/vrf[name='%s']", + vrfname); - return CMD_SUCCESS; + nb_cli_enqueue_change(vty, xpath_list, NB_OP_DESTROY, NULL); + return nb_cli_apply_changes(vty, xpath_list); } -static struct cmd_node vrf_node = {VRF_NODE, "%s(config-vrf)# ", 1}; +static struct cmd_node vrf_node = { + .name = "vrf", + .node = VRF_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-vrf)# ", +}; DEFUN_NOSH (vrf_netns, vrf_netns_cmd, @@ -839,11 +869,17 @@ static int vrf_write_host(struct vty *vty) return 1; } -static struct cmd_node vrf_debug_node = {VRF_DEBUG_NODE, "", 1}; +static int vrf_write_host(struct vty *vty); +static struct cmd_node vrf_debug_node = { + .name = "vrf debug", + .node = VRF_DEBUG_NODE, + .prompt = "", + .config_write = vrf_write_host, +}; void vrf_install_commands(void) { - install_node(&vrf_debug_node, vrf_write_host); + install_node(&vrf_debug_node); install_element(CONFIG_NODE, &vrf_debug_cmd); install_element(ENABLE_NODE, &vrf_debug_cmd); @@ -856,7 +892,8 @@ void vrf_cmd_init(int (*writefunc)(struct vty *vty), { install_element(CONFIG_NODE, &vrf_cmd); install_element(CONFIG_NODE, &no_vrf_cmd); - install_node(&vrf_node, writefunc); + vrf_node.config_write = writefunc; + install_node(&vrf_node); install_default(VRF_NODE); install_element(VRF_NODE, &vrf_exit_cmd); if (vrf_is_backend_netns() && ns_have_netns()) { @@ -1010,3 +1047,147 @@ vrf_id_t vrf_generate_id(void) return ++vrf_id_local; } + +/* ------- Northbound callbacks ------- */ + +/* + * XPath: /frr-vrf:lib/vrf + */ +static int lib_vrf_create(enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource) +{ + const char *vrfname; + struct vrf *vrfp; + + vrfname = yang_dnode_get_string(dnode, "./name"); + + if (event != NB_EV_APPLY) + return NB_OK; + + vrfp = vrf_get(VRF_UNKNOWN, vrfname); + + nb_running_set_entry(dnode, vrfp); + + return NB_OK; +} + +static int lib_vrf_destroy(enum nb_event event, const struct lyd_node *dnode) +{ + struct vrf *vrfp; + + switch (event) { + case NB_EV_VALIDATE: + vrfp = nb_running_get_entry(dnode, NULL, true); + if (CHECK_FLAG(vrfp->status, VRF_ACTIVE)) { + zlog_debug("%s Only inactive VRFs can be deleted", + __func__); + return NB_ERR_VALIDATION; + } + break; + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrfp = nb_running_unset_entry(dnode); + + /* Clear configured flag and invoke delete. */ + UNSET_FLAG(vrfp->status, VRF_CONFIGURED); + vrf_delete(vrfp); + break; + } + + return NB_OK; +} + +static const void *lib_vrf_get_next(const void *parent_list_entry, + const void *list_entry) +{ + struct vrf *vrfp = (struct vrf *)list_entry; + + if (list_entry == NULL) { + vrfp = RB_MIN(vrf_name_head, &vrfs_by_name); + } else { + vrfp = RB_NEXT(vrf_name_head, vrfp); + } + + return vrfp; +} + +static int lib_vrf_get_keys(const void *list_entry, struct yang_list_keys *keys) +{ + struct vrf *vrfp = (struct vrf *)list_entry; + + keys->num = 1; + strlcpy(keys->key[0], vrfp->name, sizeof(keys->key[0])); + + return NB_OK; +} + +static const void *lib_vrf_lookup_entry(const void *parent_list_entry, + const struct yang_list_keys *keys) +{ + const char *vrfname = keys->key[0]; + + struct vrf *vrf = vrf_lookup_by_name(vrfname); + + return vrf; +} + +/* + * XPath: /frr-vrf:lib/vrf/id + */ +static struct yang_data *lib_vrf_state_id_get_elem(const char *xpath, + const void *list_entry) +{ + struct vrf *vrfp = (struct vrf *)list_entry; + + return yang_data_new_uint32(xpath, vrfp->vrf_id); +} + +/* + * XPath: /frr-vrf:lib/vrf/active + */ +static struct yang_data *lib_vrf_state_active_get_elem(const char *xpath, + const void *list_entry) +{ + struct vrf *vrfp = (struct vrf *)list_entry; + + if (vrfp->status == VRF_ACTIVE) + return yang_data_new_bool( + xpath, vrfp->status == VRF_ACTIVE ? true : false); + + return NULL; +} + +/* clang-format off */ +const struct frr_yang_module_info frr_vrf_info = { + .name = "frr-vrf", + .nodes = { + { + .xpath = "/frr-vrf:lib/vrf", + .cbs = { + .create = lib_vrf_create, + .destroy = lib_vrf_destroy, + .get_next = lib_vrf_get_next, + .get_keys = lib_vrf_get_keys, + .lookup_entry = lib_vrf_lookup_entry, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/state/id", + .cbs = { + .get_elem = lib_vrf_state_id_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/state/active", + .cbs = { + .get_elem = lib_vrf_state_active_get_elem, + } + }, + { + .xpath = NULL, + }, + } +}; + @@ -101,9 +101,12 @@ RB_PROTOTYPE(vrf_name_head, vrf, name_entry, vrf_name_compare) DECLARE_QOBJ_TYPE(vrf) /* Allow VRF with netns as backend */ -#define VRF_BACKEND_VRF_LITE 0 -#define VRF_BACKEND_NETNS 1 -#define VRF_BACKEND_UNKNOWN 2 +enum vrf_backend_type { + VRF_BACKEND_VRF_LITE, + VRF_BACKEND_NETNS, + VRF_BACKEND_UNKNOWN, + VRF_BACKEND_MAX, +}; extern struct vrf_id_head vrfs_by_id; extern struct vrf_name_head vrfs_by_name; @@ -292,10 +295,10 @@ extern void vrf_install_commands(void); * VRF utilities */ -/* API for configuring VRF backend - * should be called from zebra only +/* + * API for configuring VRF backend */ -extern void vrf_configure_backend(int vrf_backend_netns); +extern int vrf_configure_backend(enum vrf_backend_type backend); extern int vrf_get_backend(void); extern int vrf_is_backend_netns(void); @@ -322,6 +325,8 @@ extern int vrf_enable(struct vrf *vrf); extern void vrf_delete(struct vrf *vrf); extern vrf_id_t vrf_generate_id(void); +extern const struct frr_yang_module_info frr_vrf_info; + #ifdef __cplusplus } #endif @@ -2199,6 +2199,9 @@ void vty_close(struct vty *vty) int i; bool was_stdio = false; + /* Drop out of configure / transaction if needed. */ + vty_config_exit(vty); + /* Cancel threads.*/ THREAD_OFF(vty->t_read); THREAD_OFF(vty->t_write); @@ -2242,9 +2245,6 @@ void vty_close(struct vty *vty) list_delete(&vty->error); } - /* Check configure. */ - vty_config_exit(vty); - /* OK free vty. */ XFREE(MTYPE_VTY, vty); @@ -2605,6 +2605,28 @@ int vty_config_enter(struct vty *vty, bool private_config, bool exclusive) void vty_config_exit(struct vty *vty) { + enum node_type node = vty->node; + struct cmd_node *cnode; + + /* unlock and jump up to ENABLE_NODE if -and only if- we're + * somewhere below CONFIG_NODE */ + while (node && node != CONFIG_NODE) { + cnode = vector_lookup(cmdvec, node); + node = cnode->parent_node; + } + if (node != CONFIG_NODE) + /* called outside config, e.g. vty_close() in ENABLE_NODE */ + return; + + while (vty->node != ENABLE_NODE) + /* will call vty_config_node_exit() below */ + cmd_exit(vty); +} + +int vty_config_node_exit(struct vty *vty) +{ + vty->xpath_index = 0; + /* Check if there's a pending confirmed commit. */ if (vty->t_confirmed_commit_timeout) { vty_out(vty, @@ -2626,6 +2648,7 @@ void vty_config_exit(struct vty *vty) } vty->config = false; + return 1; } /* Master of the threads. */ @@ -2989,8 +3012,13 @@ static int vty_config_write(struct vty *vty) return CMD_SUCCESS; } +static int vty_config_write(struct vty *vty); struct cmd_node vty_node = { - VTY_NODE, "%s(config-line)# ", 1, + .name = "vty", + .node = VTY_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-line)# ", + .config_write = vty_config_write, }; /* Reset all VTY status. */ @@ -3084,7 +3112,7 @@ void vty_init(struct thread_master *master_thread, bool do_command_logging) Vvty_serv_thread = vector_init(VECTOR_MIN_SIZE); /* Install bgp top node. */ - install_node(&vty_node, vty_config_write); + install_node(&vty_node); install_element(VIEW_NODE, &config_who_cmd); install_element(VIEW_NODE, &show_history_cmd); @@ -323,6 +323,7 @@ extern void vty_log(const char *level, const char *proto, const char *msg, extern int vty_config_enter(struct vty *vty, bool private_config, bool exclusive); extern void vty_config_exit(struct vty *); +extern int vty_config_node_exit(struct vty *); extern int vty_shell(struct vty *); extern int vty_shell_serv(struct vty *); extern void vty_hello(struct vty *); diff --git a/lib/yang.c b/lib/yang.c index 93e6db3055..c80bf20306 100644 --- a/lib/yang.c +++ b/lib/yang.c @@ -72,13 +72,17 @@ static const char *yang_module_imp_clb(const char *mod_name, return NULL; } +/* clang-format off */ static const char *const frr_native_modules[] = { "frr-interface", + "frr-vrf", "frr-ripd", "frr-ripngd", "frr-isisd", "frr-vrrpd", + "frr-zebra", }; +/* clang-format on */ /* Generate the yang_modules tree. */ static inline int yang_module_compare(const struct yang_module *a, @@ -628,7 +632,7 @@ void yang_debugging_set(bool enable) } } -struct ly_ctx *yang_ctx_new_setup(void) +struct ly_ctx *yang_ctx_new_setup(bool embedded_modules) { struct ly_ctx *ctx; const char *yang_models_path = YANG_MODELS_PATH; @@ -647,18 +651,21 @@ struct ly_ctx *yang_ctx_new_setup(void) ctx = ly_ctx_new(yang_models_path, LY_CTX_DISABLE_SEARCHDIR_CWD); if (!ctx) return NULL; - ly_ctx_set_module_imp_clb(ctx, yang_module_imp_clb, NULL); + + if (embedded_modules) + ly_ctx_set_module_imp_clb(ctx, yang_module_imp_clb, NULL); + return ctx; } -void yang_init(void) +void yang_init(bool embedded_modules) { /* Initialize libyang global parameters that affect all containers. */ ly_set_log_clb(ly_log_cb, 1); ly_log_options(LY_LOLOG | LY_LOSTORE); /* Initialize libyang container for native models. */ - ly_native_ctx = yang_ctx_new_setup(); + ly_native_ctx = yang_ctx_new_setup(embedded_modules); if (!ly_native_ctx) { flog_err(EC_LIB_LIBYANG, "%s: ly_ctx_new() failed", __func__); exit(1); diff --git a/lib/yang.h b/lib/yang.h index 6892e36019..126521707b 100644 --- a/lib/yang.h +++ b/lib/yang.h @@ -34,7 +34,7 @@ extern "C" { #endif /* Maximum XPath length. */ -#define XPATH_MAXLEN 256 +#define XPATH_MAXLEN 512 /* Maximum list key length. */ #define LIST_MAXKEYS 8 @@ -482,8 +482,11 @@ extern struct yang_data *yang_data_list_find(const struct list *list, /* * Create and set up a libyang context (for use by the translator) + * + * embedded_modules + * Specify whether libyang should attempt to look for embedded YANG modules. */ -extern struct ly_ctx *yang_ctx_new_setup(void); +extern struct ly_ctx *yang_ctx_new_setup(bool embedded_modules); /* * Enable or disable libyang verbose debugging. @@ -496,8 +499,11 @@ extern void yang_debugging_set(bool enable); /* * Initialize the YANG subsystem. Should be called only once during the * daemon initialization process. + * + * embedded_modules + * Specify whether libyang should attempt to look for embedded YANG modules. */ -extern void yang_init(void); +extern void yang_init(bool embedded_modules); /* * Finish the YANG subsystem gracefully. Should be called only when the daemon diff --git a/lib/yang_translator.c b/lib/yang_translator.c index 341420eeda..7dbb1f3f1a 100644 --- a/lib/yang_translator.c +++ b/lib/yang_translator.c @@ -171,7 +171,7 @@ struct yang_translator *yang_translator_load(const char *path) RB_INSERT(yang_translators, &yang_translators, translator); /* Initialize the translator libyang context. */ - translator->ly_ctx = yang_ctx_new_setup(); + translator->ly_ctx = yang_ctx_new_setup(false); if (!translator->ly_ctx) { flog_warn(EC_LIB_LIBYANG, "%s: ly_ctx_new() failed", __func__); goto error; @@ -511,7 +511,7 @@ static unsigned int yang_module_nodes_count(const struct lys_module *module) void yang_translator_init(void) { - ly_translator_ctx = yang_ctx_new_setup(); + ly_translator_ctx = yang_ctx_new_setup(true); if (!ly_translator_ctx) { flog_err(EC_LIB_LIBYANG, "%s: ly_ctx_new() failed", __func__); exit(1); diff --git a/lib/yang_wrappers.c b/lib/yang_wrappers.c index 2b502d635b..7d94a2f744 100644 --- a/lib/yang_wrappers.c +++ b/lib/yang_wrappers.c @@ -23,6 +23,7 @@ #include "lib_errors.h" #include "northbound.h" #include "printfrr.h" +#include "nexthop.h" static const char *yang_get_default_value(const char *xpath) { @@ -783,6 +784,14 @@ void yang_get_default_string_buf(char *buf, size_t size, const char *xpath_fmt, } /* + * Primitive type: empty. + */ +struct yang_data *yang_data_new_empty(const char *xpath) +{ + return yang_data_new(xpath, NULL); +} + +/* * Derived type: IP prefix. */ void yang_str2prefix(const char *value, union prefixptr prefix) @@ -1114,3 +1123,44 @@ void yang_get_default_ip(struct ipaddr *var, const char *xpath_fmt, ...) value = yang_get_default_value(xpath); yang_str2ip(value, var); } + +struct yang_data *yang_data_new_mac(const char *xpath, + const struct ethaddr *mac) +{ + char value_str[ETHER_ADDR_STRLEN]; + + prefix_mac2str(mac, value_str, sizeof(value_str)); + return yang_data_new(xpath, value_str); +} + +void yang_str2mac(const char *value, struct ethaddr *mac) +{ + (void)prefix_str2mac(value, mac); +} + +const char *yang_nexthop_type2str(uint32_t ntype) +{ + switch (ntype) { + case NEXTHOP_TYPE_IFINDEX: + return "ifindex"; + break; + case NEXTHOP_TYPE_IPV4: + return "ip4"; + break; + case NEXTHOP_TYPE_IPV4_IFINDEX: + return "ip4-ifindex"; + break; + case NEXTHOP_TYPE_IPV6: + return "ip6"; + break; + case NEXTHOP_TYPE_IPV6_IFINDEX: + return "ip6-ifindex"; + break; + case NEXTHOP_TYPE_BLACKHOLE: + return "blackhole"; + break; + default: + return "unknown"; + break; + } +} diff --git a/lib/yang_wrappers.h b/lib/yang_wrappers.h index 10d1ea314f..d853b61ae1 100644 --- a/lib/yang_wrappers.h +++ b/lib/yang_wrappers.h @@ -114,6 +114,9 @@ extern const char *yang_get_default_string(const char *xpath_fmt, ...); extern void yang_get_default_string_buf(char *buf, size_t size, const char *xpath_fmt, ...); +/* empty */ +extern struct yang_data *yang_data_new_empty(const char *xpath); + /* ip prefix */ extern void yang_str2prefix(const char *value, union prefixptr prefix); extern struct yang_data *yang_data_new_prefix(const char *xpath, @@ -172,4 +175,11 @@ extern void yang_dnode_get_ip(struct ipaddr *addr, const struct lyd_node *dnode, const char *xpath_fmt, ...); extern void yang_get_default_ip(struct ipaddr *var, const char *xpath_fmt, ...); +/* mac */ +extern struct yang_data *yang_data_new_mac(const char *xpath, + const struct ethaddr *mac); +extern void yang_str2mac(const char *value, struct ethaddr *mac); + +extern const char *yang_nexthop_type2str(uint32_t ntype); + #endif /* _FRR_NORTHBOUND_WRAPPERS_H_ */ diff --git a/lib/zclient.c b/lib/zclient.c index 1ac0e49e13..be2c4e54a0 100644 --- a/lib/zclient.c +++ b/lib/zclient.c @@ -388,6 +388,7 @@ int zclient_send_hello(struct zclient *zclient) zclient_create_header(s, ZEBRA_HELLO, VRF_DEFAULT); stream_putc(s, zclient->redist_default); stream_putw(s, zclient->instance); + stream_putl(s, zclient->session_id); if (zclient->receive_notify) stream_putc(s, 1); else @@ -904,6 +905,7 @@ int zapi_nexthop_encode(struct stream *s, const struct zapi_nexthop *api_nh, } } + /* If present, set 'weight' flag before encoding flags */ if (api_nh->weight) SET_FLAG(nh_flags, ZAPI_NEXTHOP_FLAG_WEIGHT); @@ -948,6 +950,10 @@ int zapi_nexthop_encode(struct stream *s, const struct zapi_nexthop *api_nh, stream_put(s, &(api_nh->rmac), sizeof(struct ethaddr)); + /* Index of backup nexthop */ + if (CHECK_FLAG(nh_flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP)) + stream_putc(s, api_nh->backup_idx); + done: return ret; } @@ -985,7 +991,7 @@ int zapi_route_encode(uint8_t cmd, struct stream *s, struct zapi_route *api) stream_putc(s, api->prefix.family); psize = PSIZE(api->prefix.prefixlen); stream_putc(s, api->prefix.prefixlen); - stream_write(s, (uint8_t *)&api->prefix.u.prefix, psize); + stream_write(s, &api->prefix.u.prefix, psize); if (CHECK_FLAG(api->message, ZAPI_MESSAGE_SRCPFX)) { psize = PSIZE(api->src_prefix.prefixlen); @@ -1007,6 +1013,10 @@ int zapi_route_encode(uint8_t cmd, struct stream *s, struct zapi_route *api) return -1; } + /* We canonicalize the nexthops by sorting them; this allows + * zebra to resolve the list of nexthops to a nexthop-group + * more efficiently. + */ zapi_nexthop_group_sort(api->nexthops, api->nexthop_num); stream_putw(s, api->nexthop_num); @@ -1033,6 +1043,50 @@ int zapi_route_encode(uint8_t cmd, struct stream *s, struct zapi_route *api) } } + /* Backup nexthops */ + if (CHECK_FLAG(api->message, ZAPI_MESSAGE_BACKUP_NEXTHOPS)) { + /* limit the number of nexthops if necessary */ + if (api->backup_nexthop_num > MULTIPATH_NUM) { + char buf[PREFIX2STR_BUFFER]; + + prefix2str(&api->prefix, buf, sizeof(buf)); + flog_err( + EC_LIB_ZAPI_ENCODE, + "%s: prefix %s: can't encode %u backup nexthops (maximum is %u)", + __func__, buf, api->backup_nexthop_num, + MULTIPATH_NUM); + return -1; + } + + /* Note that we do not sort the list of backup nexthops - + * this list is treated as an array and indexed by each + * primary nexthop that is associated with a backup. + */ + + stream_putw(s, api->backup_nexthop_num); + + for (i = 0; i < api->backup_nexthop_num; i++) { + api_nh = &api->backup_nexthops[i]; + + /* MPLS labels for BGP-LU or Segment Routing */ + if (api_nh->label_num > MPLS_MAX_LABELS) { + char buf[PREFIX2STR_BUFFER]; + + prefix2str(&api->prefix, buf, sizeof(buf)); + + flog_err(EC_LIB_ZAPI_ENCODE, + "%s: prefix %s: backup: can't encode %u labels (maximum is %u)", + __func__, buf, + api_nh->label_num, + MPLS_MAX_LABELS); + return -1; + } + + if (zapi_nexthop_encode(s, api_nh, api->flags) != 0) + return -1; + } + } + /* Attributes. */ if (CHECK_FLAG(api->message, ZAPI_MESSAGE_DISTANCE)) stream_putc(s, api->distance); @@ -1108,6 +1162,10 @@ static int zapi_nexthop_decode(struct stream *s, struct zapi_nexthop *api_nh, STREAM_GET(&(api_nh->rmac), s, sizeof(struct ethaddr)); + /* Backup nexthop index */ + if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP)) + STREAM_GETC(s, api_nh->backup_idx); + /* Success */ ret = 0; @@ -1214,6 +1272,24 @@ int zapi_route_decode(struct stream *s, struct zapi_route *api) } } + /* Backup nexthops. */ + if (CHECK_FLAG(api->message, ZAPI_MESSAGE_BACKUP_NEXTHOPS)) { + STREAM_GETW(s, api->backup_nexthop_num); + if (api->backup_nexthop_num > MULTIPATH_NUM) { + flog_err(EC_LIB_ZAPI_ENCODE, + "%s: invalid number of backup nexthops (%u)", + __func__, api->backup_nexthop_num); + return -1; + } + + for (i = 0; i < api->backup_nexthop_num; i++) { + api_nh = &api->backup_nexthops[i]; + + if (zapi_nexthop_decode(s, api_nh, api->flags) != 0) + return -1; + } + } + /* Attributes. */ if (CHECK_FLAG(api->message, ZAPI_MESSAGE_DISTANCE)) STREAM_GETC(s, api->distance); @@ -1388,7 +1464,7 @@ stream_failure: return false; } -struct nexthop *nexthop_from_zapi_nexthop(struct zapi_nexthop *znh) +struct nexthop *nexthop_from_zapi_nexthop(const struct zapi_nexthop *znh) { struct nexthop *n = nexthop_new(); @@ -1405,6 +1481,11 @@ struct nexthop *nexthop_from_zapi_nexthop(struct zapi_nexthop *znh) znh->labels); } + if (CHECK_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP)) { + SET_FLAG(n->flags, NEXTHOP_FLAG_HAS_BACKUP); + n->backup_idx = znh->backup_idx; + } + return n; } @@ -1420,10 +1501,16 @@ int zapi_nexthop_from_nexthop(struct zapi_nexthop *znh, znh->type = nh->type; znh->vrf_id = nh->vrf_id; + znh->weight = nh->weight; znh->ifindex = nh->ifindex; znh->gate = nh->gate; if (nh->nh_label && (nh->nh_label->num_labels > 0)) { + + /* Validate */ + if (nh->nh_label->num_labels > MPLS_MAX_LABELS) + return -1; + for (i = 0; i < nh->nh_label->num_labels; i++) znh->labels[i] = nh->nh_label->label[i]; @@ -1431,10 +1518,31 @@ int zapi_nexthop_from_nexthop(struct zapi_nexthop *znh, SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_LABEL); } + if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_HAS_BACKUP)) { + SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP); + znh->backup_idx = nh->backup_idx; + } + return 0; } /* + * Wrapper that converts backup nexthop + */ +int zapi_backup_nexthop_from_nexthop(struct zapi_nexthop *znh, + const struct nexthop *nh) +{ + int ret; + + /* Ensure that zapi flags are correct: backups don't have backups */ + ret = zapi_nexthop_from_nexthop(znh, nh); + if (ret == 0) + UNSET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP); + + return ret; +} + +/* * Decode the nexthop-tracking update message */ bool zapi_nexthop_update_decode(struct stream *s, struct zapi_route *nhr) @@ -1528,33 +1636,34 @@ int zebra_redistribute_default_send(int command, struct zclient *zclient, } /* Get prefix in ZServ format; family should be filled in on prefix */ -static void zclient_stream_get_prefix(struct stream *s, struct prefix *p) +static int zclient_stream_get_prefix(struct stream *s, struct prefix *p) { size_t plen = prefix_blen(p); uint8_t c; p->prefixlen = 0; if (plen == 0) - return; + return -1; - stream_get(&p->u.prefix, s, plen); + STREAM_GET(&p->u.prefix, s, plen); STREAM_GETC(s, c); p->prefixlen = MIN(plen * 8, c); + return 0; stream_failure: - return; + return -1; } /* Router-id update from zebra daemon. */ -void zebra_router_id_update_read(struct stream *s, struct prefix *rid) +int zebra_router_id_update_read(struct stream *s, struct prefix *rid) { /* Fetch interface address. */ STREAM_GETC(s, rid->family); - zclient_stream_get_prefix(s, rid); + return zclient_stream_get_prefix(s, rid); stream_failure: - return; + return -1; } /* Interface addition from zebra daemon. */ @@ -1603,24 +1712,36 @@ stream_failure: * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ -static void zclient_vrf_add(struct zclient *zclient, vrf_id_t vrf_id) +static int zclient_vrf_add(struct zclient *zclient, vrf_id_t vrf_id) { struct vrf *vrf; - char vrfname_tmp[VRF_NAMSIZ]; + char vrfname_tmp[VRF_NAMSIZ + 1] = {}; struct vrf_data data; - stream_get(&data, zclient->ibuf, sizeof(struct vrf_data)); + STREAM_GET(&data, zclient->ibuf, sizeof(struct vrf_data)); /* Read interface name. */ - stream_get(vrfname_tmp, zclient->ibuf, VRF_NAMSIZ); + STREAM_GET(vrfname_tmp, zclient->ibuf, VRF_NAMSIZ); - /* Lookup/create vrf by vrf_id. */ + if (strlen(vrfname_tmp) == 0) + goto stream_failure; + + /* Lookup/create vrf by name, then vrf_id. */ vrf = vrf_get(vrf_id, vrfname_tmp); + + /* If there's already a VRF with this name, don't create vrf */ + if (!vrf) + return 0; + vrf->data.l.table_id = data.l.table_id; memcpy(vrf->data.l.netns_name, data.l.netns_name, NS_NAMSIZ); /* overwrite default vrf */ if (vrf_id == VRF_DEFAULT) vrf_set_default_name(vrfname_tmp, false); vrf_enable(vrf); + + return 0; +stream_failure: + return -1; } static void zclient_vrf_delete(struct zclient *zclient, vrf_id_t vrf_id) @@ -1641,21 +1762,32 @@ static void zclient_vrf_delete(struct zclient *zclient, vrf_id_t vrf_id) vrf_delete(vrf); } -static void zclient_interface_add(struct zclient *zclient, vrf_id_t vrf_id) +static int zclient_interface_add(struct zclient *zclient, vrf_id_t vrf_id) { struct interface *ifp; - char ifname_tmp[INTERFACE_NAMSIZ]; + char ifname_tmp[INTERFACE_NAMSIZ + 1] = {}; struct stream *s = zclient->ibuf; /* Read interface name. */ - stream_get(ifname_tmp, s, INTERFACE_NAMSIZ); + STREAM_GET(ifname_tmp, s, INTERFACE_NAMSIZ); /* Lookup/create interface by name. */ + if (!vrf_get(vrf_id, NULL)) { + zlog_debug( + "Rx'd interface add from Zebra, but VRF %u does not exist", + vrf_id); + return -1; + } + ifp = if_get_by_name(ifname_tmp, vrf_id); zebra_interface_if_set_value(s, ifp); if_new_via_zapi(ifp); + + return 0; +stream_failure: + return -1; } /* @@ -1667,10 +1799,10 @@ static void zclient_interface_add(struct zclient *zclient, vrf_id_t vrf_id) struct interface *zebra_interface_state_read(struct stream *s, vrf_id_t vrf_id) { struct interface *ifp; - char ifname_tmp[INTERFACE_NAMSIZ]; + char ifname_tmp[INTERFACE_NAMSIZ + 1] = {}; /* Read interface name. */ - stream_get(ifname_tmp, s, INTERFACE_NAMSIZ); + STREAM_GET(ifname_tmp, s, INTERFACE_NAMSIZ); /* Lookup this by interface index. */ ifp = if_lookup_by_name(ifname_tmp, vrf_id); @@ -1684,6 +1816,8 @@ struct interface *zebra_interface_state_read(struct stream *s, vrf_id_t vrf_id) zebra_interface_if_set_value(s, ifp); return ifp; +stream_failure: + return NULL; } static void zclient_interface_delete(struct zclient *zclient, vrf_id_t vrf_id) @@ -1737,21 +1871,23 @@ static void zclient_handle_error(ZAPI_CALLBACK_ARGS) (*zclient->handle_error)(error); } -static void link_params_set_value(struct stream *s, struct if_link_params *iflp) +static int link_params_set_value(struct stream *s, struct if_link_params *iflp) { if (iflp == NULL) - return; + return -1; + + uint32_t bwclassnum; - iflp->lp_status = stream_getl(s); - iflp->te_metric = stream_getl(s); - iflp->max_bw = stream_getf(s); - iflp->max_rsv_bw = stream_getf(s); - uint32_t bwclassnum = stream_getl(s); + STREAM_GETL(s, iflp->lp_status); + STREAM_GETL(s, iflp->te_metric); + STREAM_GETF(s, iflp->max_bw); + STREAM_GETF(s, iflp->max_rsv_bw); + STREAM_GETL(s, bwclassnum); { unsigned int i; for (i = 0; i < bwclassnum && i < MAX_CLASS_TYPE; i++) - iflp->unrsv_bw[i] = stream_getf(s); + STREAM_GETF(s, iflp->unrsv_bw[i]); if (i < bwclassnum) flog_err( EC_LIB_ZAPI_MISSMATCH, @@ -1759,19 +1895,23 @@ static void link_params_set_value(struct stream *s, struct if_link_params *iflp) " - outdated library?", __func__, bwclassnum, MAX_CLASS_TYPE); } - iflp->admin_grp = stream_getl(s); - iflp->rmt_as = stream_getl(s); + STREAM_GETL(s, iflp->admin_grp); + STREAM_GETL(s, iflp->rmt_as); iflp->rmt_ip.s_addr = stream_get_ipv4(s); - iflp->av_delay = stream_getl(s); - iflp->min_delay = stream_getl(s); - iflp->max_delay = stream_getl(s); - iflp->delay_var = stream_getl(s); + STREAM_GETL(s, iflp->av_delay); + STREAM_GETL(s, iflp->min_delay); + STREAM_GETL(s, iflp->max_delay); + STREAM_GETL(s, iflp->delay_var); - iflp->pkt_loss = stream_getf(s); - iflp->res_bw = stream_getf(s); - iflp->ava_bw = stream_getf(s); - iflp->use_bw = stream_getf(s); + STREAM_GETF(s, iflp->pkt_loss); + STREAM_GETF(s, iflp->res_bw); + STREAM_GETF(s, iflp->ava_bw); + STREAM_GETF(s, iflp->use_bw); + + return 0; +stream_failure: + return -1; } struct interface *zebra_interface_link_params_read(struct stream *s, @@ -1780,9 +1920,7 @@ struct interface *zebra_interface_link_params_read(struct stream *s, struct if_link_params *iflp; ifindex_t ifindex; - assert(s); - - ifindex = stream_getl(s); + STREAM_GETL(s, ifindex); struct interface *ifp = if_lookup_by_index(ifindex, vrf_id); @@ -1796,36 +1934,41 @@ struct interface *zebra_interface_link_params_read(struct stream *s, if ((iflp = if_link_params_get(ifp)) == NULL) return NULL; - link_params_set_value(s, iflp); + if (link_params_set_value(s, iflp) != 0) + goto stream_failure; return ifp; + +stream_failure: + return NULL; } static void zebra_interface_if_set_value(struct stream *s, struct interface *ifp) { uint8_t link_params_status = 0; - ifindex_t old_ifindex; + ifindex_t old_ifindex, new_ifindex; old_ifindex = ifp->ifindex; /* Read interface's index. */ - if_set_index(ifp, stream_getl(s)); - ifp->status = stream_getc(s); + STREAM_GETL(s, new_ifindex); + if_set_index(ifp, new_ifindex); + STREAM_GETC(s, ifp->status); /* Read interface's value. */ - ifp->flags = stream_getq(s); - ifp->ptm_enable = stream_getc(s); - ifp->ptm_status = stream_getc(s); - ifp->metric = stream_getl(s); - ifp->speed = stream_getl(s); - ifp->mtu = stream_getl(s); - ifp->mtu6 = stream_getl(s); - ifp->bandwidth = stream_getl(s); - ifp->link_ifindex = stream_getl(s); - ifp->ll_type = stream_getl(s); - ifp->hw_addr_len = stream_getl(s); + STREAM_GETQ(s, ifp->flags); + STREAM_GETC(s, ifp->ptm_enable); + STREAM_GETC(s, ifp->ptm_status); + STREAM_GETL(s, ifp->metric); + STREAM_GETL(s, ifp->speed); + STREAM_GETL(s, ifp->mtu); + STREAM_GETL(s, ifp->mtu6); + STREAM_GETL(s, ifp->bandwidth); + STREAM_GETL(s, ifp->link_ifindex); + STREAM_GETL(s, ifp->ll_type); + STREAM_GETL(s, ifp->hw_addr_len); if (ifp->hw_addr_len) - stream_get(ifp->hw_addr, s, + STREAM_GET(ifp->hw_addr, s, MIN(ifp->hw_addr_len, INTERFACE_HWADDR_MAX)); /* Read Traffic Engineering status */ @@ -1837,6 +1980,11 @@ static void zebra_interface_if_set_value(struct stream *s, } nexthop_group_interface_state_change(ifp, old_ifindex); + + return; +stream_failure: + zlog_err("Could not parse interface values; aborting"); + assert(!"Failed to parse interface values"); } size_t zebra_interface_link_params_write(struct stream *s, @@ -1935,7 +2083,7 @@ struct connected *zebra_interface_address_read(int type, struct stream *s, memset(&d, 0, sizeof(d)); /* Get interface index. */ - ifindex = stream_getl(s); + STREAM_GETL(s, ifindex); /* Lookup index. */ ifp = if_lookup_by_index(ifindex, vrf_id); @@ -1948,16 +2096,18 @@ struct connected *zebra_interface_address_read(int type, struct stream *s, } /* Fetch flag. */ - ifc_flags = stream_getc(s); + STREAM_GETC(s, ifc_flags); /* Fetch interface address. */ - d.family = p.family = stream_getc(s); + STREAM_GETC(s, d.family); + p.family = d.family; plen = prefix_blen(&d); - zclient_stream_get_prefix(s, &p); + if (zclient_stream_get_prefix(s, &p) != 0) + goto stream_failure; /* Fetch destination address. */ - stream_get(&d.u.prefix, s, plen); + STREAM_GET(&d.u.prefix, s, plen); /* N.B. NULL destination pointers are encoded as all zeroes */ dp = memconstant(&d.u.prefix, 0, plen) ? NULL : &d; @@ -1993,6 +2143,9 @@ struct connected *zebra_interface_address_read(int type, struct stream *s, } return ifc; + +stream_failure: + return NULL; } /* @@ -2028,7 +2181,7 @@ zebra_interface_nbr_address_read(int type, struct stream *s, vrf_id_t vrf_id) struct nbr_connected *ifc; /* Get interface index. */ - ifindex = stream_getl(s); + STREAM_GETL(s, ifindex); /* Lookup index. */ ifp = if_lookup_by_index(ifindex, vrf_id); @@ -2041,9 +2194,9 @@ zebra_interface_nbr_address_read(int type, struct stream *s, vrf_id_t vrf_id) return NULL; } - p.family = stream_getc(s); - stream_get(&p.u.prefix, s, prefix_blen(&p)); - p.prefixlen = stream_getc(s); + STREAM_GETC(s, p.family); + STREAM_GET(&p.u.prefix, s, prefix_blen(&p)); + STREAM_GETC(s, p.prefixlen); if (type == ZEBRA_INTERFACE_NBR_ADDRESS_ADD) { /* Currently only supporting P2P links, so any new RA source @@ -2067,18 +2220,21 @@ zebra_interface_nbr_address_read(int type, struct stream *s, vrf_id_t vrf_id) } return ifc; + +stream_failure: + return NULL; } struct interface *zebra_interface_vrf_update_read(struct stream *s, vrf_id_t vrf_id, vrf_id_t *new_vrf_id) { - char ifname[INTERFACE_NAMSIZ]; + char ifname[INTERFACE_NAMSIZ + 1] = {}; struct interface *ifp; vrf_id_t new_id; /* Read interface name. */ - stream_get(ifname, s, INTERFACE_NAMSIZ); + STREAM_GET(ifname, s, INTERFACE_NAMSIZ); /* Lookup interface. */ ifp = if_lookup_by_name(ifname, vrf_id); @@ -2090,10 +2246,13 @@ struct interface *zebra_interface_vrf_update_read(struct stream *s, } /* Fetch new VRF Id. */ - new_id = stream_getl(s); + STREAM_GETL(s, new_id); *new_vrf_id = new_id; return ifp; + +stream_failure: + return NULL; } /* filter unwanted messages until the expected one arrives */ @@ -2202,8 +2361,11 @@ int lm_label_manager_connect(struct zclient *zclient, int async) s = zclient->ibuf; /* read instance and proto */ - uint8_t proto = stream_getc(s); - uint16_t instance = stream_getw(s); + uint8_t proto; + uint16_t instance; + + STREAM_GETC(s, proto); + STREAM_GETW(s, instance); /* sanity */ if (proto != zclient->redist_default) @@ -2218,11 +2380,14 @@ int lm_label_manager_connect(struct zclient *zclient, int async) instance, zclient->instance); /* result code */ - result = stream_getc(s); + STREAM_GETC(s, result); if (zclient_debug) zlog_debug("LM connect-response received, result %u", result); return (int)result; + +stream_failure: + return -1; } /* @@ -2330,8 +2495,11 @@ int lm_get_label_chunk(struct zclient *zclient, uint8_t keep, uint32_t base, s = zclient->ibuf; /* read proto and instance */ - uint8_t proto = stream_getc(s); - uint16_t instance = stream_getw(s); + uint8_t proto; + uint8_t instance; + + STREAM_GETC(s, proto); + STREAM_GETW(s, instance); /* sanities */ if (proto != zclient->redist_default) @@ -2353,10 +2521,10 @@ int lm_get_label_chunk(struct zclient *zclient, uint8_t keep, uint32_t base, } /* keep */ - response_keep = stream_getc(s); + STREAM_GETC(s, response_keep); /* start and end labels */ - *start = stream_getl(s); - *end = stream_getl(s); + STREAM_GETL(s, *start); + STREAM_GETL(s, *end); /* not owning this response */ if (keep != response_keep) { @@ -2378,6 +2546,9 @@ int lm_get_label_chunk(struct zclient *zclient, uint8_t keep, uint32_t base, response_keep); return 0; + +stream_failure: + return -1; } /** @@ -2767,7 +2938,7 @@ int zebra_send_pw(struct zclient *zclient, int command, struct zapi_pw *pw) /* * Receive PW status update from Zebra and send it to LDE process. */ -void zebra_read_pw_status_update(ZAPI_CALLBACK_ARGS, struct zapi_pw_status *pw) +int zebra_read_pw_status_update(ZAPI_CALLBACK_ARGS, struct zapi_pw_status *pw) { struct stream *s; @@ -2776,8 +2947,12 @@ void zebra_read_pw_status_update(ZAPI_CALLBACK_ARGS, struct zapi_pw_status *pw) /* Get data. */ stream_get(pw->ifname, s, IF_NAMESIZE); - pw->ifindex = stream_getl(s); - pw->status = stream_getl(s); + STREAM_GETL(s, pw->ifindex); + STREAM_GETL(s, pw->status); + + return 0; +stream_failure: + return -1; } static void zclient_capability_decode(ZAPI_CALLBACK_ARGS) @@ -2788,7 +2963,14 @@ static void zclient_capability_decode(ZAPI_CALLBACK_ARGS) uint8_t mpls_enabled; STREAM_GETL(s, vrf_backend); - vrf_configure_backend(vrf_backend); + + if (vrf_backend < 0 || vrf_configure_backend(vrf_backend)) { + flog_err(EC_LIB_ZAPI_ENCODE, + "%s: Garbage VRF backend type: %d\n", __func__, + vrf_backend); + goto stream_failure; + } + memset(&cap, 0, sizeof(cap)); STREAM_GETC(s, mpls_enabled); diff --git a/lib/zclient.h b/lib/zclient.h index 4de42a35bb..6e8066381f 100644 --- a/lib/zclient.h +++ b/lib/zclient.h @@ -258,6 +258,9 @@ struct zclient { /* Is this a synchronous client? */ bool synchronous; + /* Session id (optional) to support clients with multiple sessions */ + uint32_t session_id; + /* Socket to zebra daemon. */ int sock; @@ -341,6 +344,9 @@ struct zclient { #define ZAPI_MESSAGE_TAG 0x08 #define ZAPI_MESSAGE_MTU 0x10 #define ZAPI_MESSAGE_SRCPFX 0x20 +/* Backup nexthops are present */ +#define ZAPI_MESSAGE_BACKUP_NEXTHOPS 0x40 + /* * This should only be used by a DAEMON that needs to communicate * the table being used is not in the VRF. You must pass the @@ -377,14 +383,21 @@ struct zapi_nexthop { struct ethaddr rmac; uint32_t weight; + + /* Index of backup nexthop */ + uint8_t backup_idx; }; /* - * ZAPI nexthop flags values + * ZAPI nexthop flags values - we're encoding a single octet + * initially, so ensure that the on-the-wire encoding continues + * to match the number of valid flags. */ + #define ZAPI_NEXTHOP_FLAG_ONLINK 0x01 #define ZAPI_NEXTHOP_FLAG_LABEL 0x02 #define ZAPI_NEXTHOP_FLAG_WEIGHT 0x04 +#define ZAPI_NEXTHOP_FLAG_HAS_BACKUP 0x08 /* Nexthop has a backup */ /* * Some of these data structures do not map easily to @@ -448,6 +461,10 @@ struct zapi_route { uint16_t nexthop_num; struct zapi_nexthop nexthops[MULTIPATH_NUM]; + /* Support backup routes for IP FRR, TI-LFA, traffic engineering */ + uint16_t backup_nexthop_num; + struct zapi_nexthop backup_nexthops[MULTIPATH_NUM]; + uint8_t distance; uint32_t metric; @@ -709,7 +726,7 @@ zebra_interface_nbr_address_read(int, struct stream *, vrf_id_t); extern struct interface *zebra_interface_vrf_update_read(struct stream *s, vrf_id_t vrf_id, vrf_id_t *new_vrf_id); -extern void zebra_router_id_update_read(struct stream *s, struct prefix *rid); +extern int zebra_router_id_update_read(struct stream *s, struct prefix *rid); extern struct interface *zebra_interface_link_params_read(struct stream *s, vrf_id_t vrf_id); @@ -738,7 +755,8 @@ extern int zapi_labels_decode(struct stream *s, struct zapi_labels *zl); extern int zebra_send_pw(struct zclient *zclient, int command, struct zapi_pw *pw); -extern void zebra_read_pw_status_update(ZAPI_CALLBACK_ARGS, struct zapi_pw_status *pw); +extern int zebra_read_pw_status_update(ZAPI_CALLBACK_ARGS, + struct zapi_pw_status *pw); extern int zclient_route_send(uint8_t, struct zclient *, struct zapi_route *); extern int zclient_send_rnh(struct zclient *zclient, int command, @@ -769,9 +787,12 @@ bool zapi_iptable_notify_decode(struct stream *s, uint32_t *unique, enum zapi_iptable_notify_owner *note); -extern struct nexthop *nexthop_from_zapi_nexthop(struct zapi_nexthop *znh); +extern struct nexthop * +nexthop_from_zapi_nexthop(const struct zapi_nexthop *znh); int zapi_nexthop_from_nexthop(struct zapi_nexthop *znh, const struct nexthop *nh); +int zapi_backup_nexthop_from_nexthop(struct zapi_nexthop *znh, + const struct nexthop *nh); extern bool zapi_nexthop_update_decode(struct stream *s, struct zapi_route *nhr); diff --git a/lib/zlog.c b/lib/zlog.c new file mode 100644 index 0000000000..45726755f8 --- /dev/null +++ b/lib/zlog.c @@ -0,0 +1,701 @@ +/* + * Copyright (c) 2015-19 David Lamparter, for NetDEF, Inc. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "zebra.h" + +#include <unistd.h> +#include <sys/time.h> +#include <sys/mman.h> +#include <sys/types.h> +#include <time.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <stdarg.h> +#include <pthread.h> + +/* gettid() & co. */ +#ifdef HAVE_PTHREAD_NP_H +#include <pthread_np.h> +#endif +#ifdef linux +#include <sys/syscall.h> +#endif +#ifdef __FreeBSD__ +#include <sys/thr.h> +#endif +#ifdef __NetBSD__ +#include <lwp.h> +#endif +#ifdef __DragonFly__ +#include <sys/lwp.h> +#endif +#ifdef __APPLE__ +#include <mach/mach_traps.h> +#endif + +#include "memory.h" +#include "atomlist.h" +#include "printfrr.h" +#include "frrcu.h" +#include "zlog.h" + +DEFINE_MTYPE_STATIC(LIB, LOG_MESSAGE, "log message") +DEFINE_MTYPE_STATIC(LIB, LOG_TLSBUF, "log thread-local buffer") + +DEFINE_HOOK(zlog_init, (const char *progname, const char *protoname, + unsigned short instance, uid_t uid, gid_t gid), + (progname, protoname, instance, uid, gid)) +DEFINE_KOOH(zlog_fini, (), ()) +DEFINE_HOOK(zlog_aux_init, (const char *prefix, int prio_min), + (prefix, prio_min)) + +char zlog_prefix[128]; +size_t zlog_prefixsz; +int zlog_tmpdirfd = -1; + +/* these are kept around because logging is initialized (and directories + * & files created) before zprivs code switches to the FRR user; therefore + * we need to chown() things so we don't get permission errors later when + * trying to delete things on shutdown + */ +static uid_t zlog_uid = -1; +static gid_t zlog_gid = -1; + +DECLARE_ATOMLIST(zlog_targets, struct zlog_target, head); +static struct zlog_targets_head zlog_targets; + +/* cf. zlog.h for additional comments on this struct. + * + * Note: you MUST NOT pass the format string + va_list to non-FRR format + * string functions (e.g. vsyslog, sd_journal_printv, ...) since FRR uses an + * extended prinf() with additional formats (%pI4 and the like). + * + * Also remember to use va_copy() on args. + */ + +struct zlog_msg { + struct timespec ts; + int prio; + + const char *fmt; + va_list args; + + char *stackbuf; + size_t stackbufsz; + char *text; + size_t textlen; + + /* This is always ISO8601 with sub-second precision 9 here, it's + * converted for callers as needed. ts_dot points to the "." + * separating sub-seconds. ts_zonetail is "Z" or "+00:00" for the + * local time offset. + * + * Valid if ZLOG_TS_ISO8601 is set. + * (0 if timestamp has not been formatted yet) + */ + uint32_t ts_flags; + char ts_str[32], *ts_dot, ts_zonetail[8]; +}; + +/* thread-local log message buffering + * + * This is strictly optional and set up by calling zlog_tls_buffer_init() + * on a particular thread. + * + * If in use, this will create a temporary file in /var/tmp which is used as + * memory-mapped MAP_SHARED log message buffer. The idea there is that buffer + * access doesn't require any syscalls, but in case of a crash the kernel + * knows to sync the memory back to disk. This way the user can still get the + * last log messages if there were any left unwritten in the buffer. + * + * Sizing this dynamically isn't particularly useful, so here's an 8k buffer + * with a message limit of 64 messages. Message metadata (e.g. priority, + * timestamp) aren't in the mmap region, so they're lost on crash, but we can + * live with that. + */ + +#if defined(HAVE_OPENAT) && defined(HAVE_UNLINKAT) +#define CAN_DO_TLS 1 +#endif + +#define TLS_LOG_BUF_SIZE 8192 +#define TLS_LOG_MAXMSG 64 + +struct zlog_tls { + char *mmbuf; + size_t bufpos; + + size_t nmsgs; + struct zlog_msg msgs[TLS_LOG_MAXMSG]; + struct zlog_msg *msgp[TLS_LOG_MAXMSG]; +}; + +static inline void zlog_tls_free(void *arg); + +/* proper ELF TLS is a bit faster than pthread_[gs]etspecific, so if it's + * available we'll use it here + */ + +#ifdef __OpenBSD__ +static pthread_key_t zlog_tls_key; + +static void zlog_tls_key_init(void) __attribute__((_CONSTRUCTOR(500))); +static void zlog_tls_key_init(void) +{ + pthread_key_create(&zlog_tls_key, zlog_tls_free); +} + +static void zlog_tls_key_fini(void) __attribute__((_DESTRUCTOR(500))); +static void zlog_tls_key_fini(void) +{ + pthread_key_delete(zlog_tls_key); +} + +static inline struct zlog_tls *zlog_tls_get(void) +{ + return pthread_getspecific(zlog_tls_key); +} + +static inline void zlog_tls_set(struct zlog_tls *val) +{ + pthread_setspecific(zlog_tls_key, val); +} +#else +# ifndef thread_local +# define thread_local __thread +# endif + +static thread_local struct zlog_tls *zlog_tls_var + __attribute__((tls_model("initial-exec"))); + +static inline struct zlog_tls *zlog_tls_get(void) +{ + return zlog_tls_var; +} + +static inline void zlog_tls_set(struct zlog_tls *val) +{ + zlog_tls_var = val; +} +#endif + +#ifdef CAN_DO_TLS +static long zlog_gettid(void) +{ + long rv = -1; +#ifdef HAVE_PTHREAD_GETTHREADID_NP + rv = pthread_getthreadid_np(); +#elif defined(linux) + rv = syscall(__NR_gettid); +#elif defined(__NetBSD__) + rv = _lwp_self(); +#elif defined(__FreeBSD__) + thr_self(&rv); +#elif defined(__DragonFly__) + rv = lwp_gettid(); +#elif defined(__OpenBSD__) + rv = getthrid(); +#elif defined(__sun) + rv = pthread_self(); +#elif defined(__APPLE__) + rv = mach_thread_self(); + mach_port_deallocate(mach_task_self(), rv); +#endif + return rv; +} + +void zlog_tls_buffer_init(void) +{ + struct zlog_tls *zlog_tls; + char mmpath[MAXPATHLEN]; + int mmfd; + size_t i; + + zlog_tls = zlog_tls_get(); + + if (zlog_tls || zlog_tmpdirfd < 0) + return; + + zlog_tls = XCALLOC(MTYPE_LOG_TLSBUF, sizeof(*zlog_tls)); + for (i = 0; i < array_size(zlog_tls->msgp); i++) + zlog_tls->msgp[i] = &zlog_tls->msgs[i]; + + snprintfrr(mmpath, sizeof(mmpath), "logbuf.%ld", zlog_gettid()); + + mmfd = openat(zlog_tmpdirfd, mmpath, + O_RDWR | O_CREAT | O_EXCL | O_CLOEXEC, 0600); + if (mmfd < 0) { + zlog_err("failed to open thread log buffer \"%s\": %s", + mmpath, strerror(errno)); + goto out_anon; + } + fchown(mmfd, zlog_uid, zlog_gid); + +#ifdef HAVE_POSIX_FALLOCATE + if (posix_fallocate(mmfd, 0, TLS_LOG_BUF_SIZE) < 0) { +#else + if (ftruncate(mmfd, TLS_LOG_BUF_SIZE) < 0) { +#endif + zlog_err("failed to allocate thread log buffer \"%s\": %s", + mmpath, strerror(errno)); + goto out_anon_unlink; + } + + zlog_tls->mmbuf = mmap(NULL, TLS_LOG_BUF_SIZE, PROT_READ | PROT_WRITE, + MAP_SHARED, mmfd, 0); + if (zlog_tls->mmbuf == MAP_FAILED) { + zlog_err("failed to mmap thread log buffer \"%s\": %s", + mmpath, strerror(errno)); + goto out_anon_unlink; + } + + close(mmfd); + zlog_tls_set(zlog_tls); + return; + +out_anon_unlink: + unlink(mmpath); + close(mmfd); +out_anon: + +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + zlog_tls->mmbuf = mmap(NULL, TLS_LOG_BUF_SIZE, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + + if (!zlog_tls->mmbuf) { + zlog_err("failed to anonymous-mmap thread log buffer: %s", + strerror(errno)); + XFREE(MTYPE_LOG_TLSBUF, zlog_tls); + zlog_tls_set(NULL); + return; + } + + zlog_tls_set(zlog_tls); +} + +void zlog_tls_buffer_fini(void) +{ + char mmpath[MAXPATHLEN]; + + zlog_tls_buffer_flush(); + + zlog_tls_free(zlog_tls_get()); + zlog_tls_set(NULL); + + snprintfrr(mmpath, sizeof(mmpath), "logbuf.%ld", zlog_gettid()); + if (unlinkat(zlog_tmpdirfd, mmpath, 0)) + zlog_err("unlink logbuf: %s (%d)", strerror(errno), errno); +} + +#else /* !CAN_DO_TLS */ +void zlog_tls_buffer_init(void) +{ +} + +void zlog_tls_buffer_fini(void) +{ +} +#endif + +static inline void zlog_tls_free(void *arg) +{ + struct zlog_tls *zlog_tls = arg; + + if (!zlog_tls) + return; + + munmap(zlog_tls->mmbuf, TLS_LOG_BUF_SIZE); + XFREE(MTYPE_LOG_TLSBUF, zlog_tls); +} + +void zlog_tls_buffer_flush(void) +{ + struct zlog_target *zt; + struct zlog_tls *zlog_tls = zlog_tls_get(); + + if (!zlog_tls) + return; + if (!zlog_tls->nmsgs) + return; + + rcu_read_lock(); + frr_each (zlog_targets, &zlog_targets, zt) { + if (!zt->logfn) + continue; + + zt->logfn(zt, zlog_tls->msgp, zlog_tls->nmsgs); + } + rcu_read_unlock(); + + zlog_tls->bufpos = 0; + zlog_tls->nmsgs = 0; +} + + +static void vzlog_notls(int prio, const char *fmt, va_list ap) +{ + struct zlog_target *zt; + struct zlog_msg stackmsg = { + .prio = prio & LOG_PRIMASK, + .fmt = fmt, + }, *msg = &stackmsg; + char stackbuf[512]; + + clock_gettime(CLOCK_REALTIME, &msg->ts); + va_copy(msg->args, ap); + msg->stackbuf = stackbuf; + msg->stackbufsz = sizeof(stackbuf); + + rcu_read_lock(); + frr_each (zlog_targets, &zlog_targets, zt) { + if (prio > zt->prio_min) + continue; + if (!zt->logfn) + continue; + + zt->logfn(zt, &msg, 1); + } + rcu_read_unlock(); + + va_end(msg->args); + if (msg->text && msg->text != stackbuf) + XFREE(MTYPE_LOG_MESSAGE, msg->text); +} + +static void vzlog_tls(struct zlog_tls *zlog_tls, int prio, + const char *fmt, va_list ap) +{ + struct zlog_target *zt; + struct zlog_msg *msg; + char *buf; + bool ignoremsg = true; + bool immediate = false; + + /* avoid further processing cost if no target wants this message */ + rcu_read_lock(); + frr_each (zlog_targets, &zlog_targets, zt) { + if (prio > zt->prio_min) + continue; + ignoremsg = false; + break; + } + rcu_read_unlock(); + + if (ignoremsg) + return; + + msg = &zlog_tls->msgs[zlog_tls->nmsgs]; + zlog_tls->nmsgs++; + if (zlog_tls->nmsgs == array_size(zlog_tls->msgs)) + immediate = true; + + memset(msg, 0, sizeof(*msg)); + clock_gettime(CLOCK_REALTIME, &msg->ts); + va_copy(msg->args, ap); + msg->stackbuf = buf = zlog_tls->mmbuf + zlog_tls->bufpos; + msg->stackbufsz = TLS_LOG_BUF_SIZE - zlog_tls->bufpos - 1; + msg->fmt = fmt; + msg->prio = prio & LOG_PRIMASK; + if (msg->prio < LOG_INFO) + immediate = true; + + if (!immediate) { + /* messages written later need to take the formatting cost + * immediately since we can't hold a reference on varargs + */ + zlog_msg_text(msg, NULL); + + if (msg->text != buf) + /* zlog_msg_text called malloc() on us :( */ + immediate = true; + else { + zlog_tls->bufpos += msg->textlen + 1; + /* write a second \0 to mark current end position + * (in case of crash this signals end of unwritten log + * messages in mmap'd logbuf file) + */ + zlog_tls->mmbuf[zlog_tls->bufpos] = '\0'; + + /* avoid malloc() for next message */ + if (TLS_LOG_BUF_SIZE - zlog_tls->bufpos < 256) + immediate = true; + } + } + + if (immediate) + zlog_tls_buffer_flush(); + + va_end(msg->args); + if (msg->text && msg->text != buf) + XFREE(MTYPE_LOG_MESSAGE, msg->text); +} + +void vzlog(int prio, const char *fmt, va_list ap) +{ + struct zlog_tls *zlog_tls = zlog_tls_get(); + + if (zlog_tls) + vzlog_tls(zlog_tls, prio, fmt, ap); + else + vzlog_notls(prio, fmt, ap); +} + +void zlog_sigsafe(const char *text, size_t len) +{ + struct zlog_target *zt; + const char *end = text + len, *nlpos; + + while (text < end) { + nlpos = memchr(text, '\n', end - text); + if (!nlpos) + nlpos = end; + + frr_each (zlog_targets, &zlog_targets, zt) { + if (LOG_CRIT > zt->prio_min) + continue; + if (!zt->logfn_sigsafe) + continue; + + zt->logfn_sigsafe(zt, text, nlpos - text); + } + + if (nlpos == end) + break; + text = nlpos + 1; + } +} + + +int zlog_msg_prio(struct zlog_msg *msg) +{ + return msg->prio; +} + +const char *zlog_msg_text(struct zlog_msg *msg, size_t *textlen) +{ + if (!msg->text) { + va_list args; + + va_copy(args, msg->args); + msg->text = vasnprintfrr(MTYPE_LOG_MESSAGE, msg->stackbuf, + msg->stackbufsz, msg->fmt, args); + msg->textlen = strlen(msg->text); + va_end(args); + } + if (textlen) + *textlen = msg->textlen; + return msg->text; +} + +#define ZLOG_TS_FORMAT (ZLOG_TS_ISO8601 | ZLOG_TS_LEGACY) +#define ZLOG_TS_FLAGS ~ZLOG_TS_PREC + +size_t zlog_msg_ts(struct zlog_msg *msg, char *out, size_t outsz, + uint32_t flags) +{ + size_t len1; + + if (!(flags & ZLOG_TS_FORMAT)) + return 0; + + if (!(msg->ts_flags & ZLOG_TS_FORMAT) || + ((msg->ts_flags ^ flags) & ZLOG_TS_UTC)) { + struct tm tm; + + if (flags & ZLOG_TS_UTC) + gmtime_r(&msg->ts.tv_sec, &tm); + else + localtime_r(&msg->ts.tv_sec, &tm); + + strftime(msg->ts_str, sizeof(msg->ts_str), + "%Y-%m-%dT%H:%M:%S", &tm); + + if (flags & ZLOG_TS_UTC) { + msg->ts_zonetail[0] = 'Z'; + msg->ts_zonetail[1] = '\0'; + } else + snprintfrr(msg->ts_zonetail, sizeof(msg->ts_zonetail), + "%+03d:%02d", + (int)(tm.tm_gmtoff / 3600), + (int)(labs(tm.tm_gmtoff) / 60) % 60); + + msg->ts_dot = msg->ts_str + strlen(msg->ts_str); + snprintfrr(msg->ts_dot, + msg->ts_str + sizeof(msg->ts_str) - msg->ts_dot, + ".%09lu", (unsigned long)msg->ts.tv_nsec); + + msg->ts_flags = ZLOG_TS_ISO8601 | (flags & ZLOG_TS_UTC); + } + + len1 = flags & ZLOG_TS_PREC; + len1 = (msg->ts_dot - msg->ts_str) + (len1 ? len1 + 1 : 0); + + if (len1 > strlen(msg->ts_str)) + len1 = strlen(msg->ts_str); + + if (flags & ZLOG_TS_LEGACY) { + if (len1 + 1 > outsz) + return 0; + + /* just swap out the formatting, faster than redoing it */ + for (char *p = msg->ts_str; p < msg->ts_str + len1; p++) { + switch (*p) { + case '-': + *out++ = '/'; + break; + case 'T': + *out++ = ' '; + break; + default: + *out++ = *p; + } + } + *out = '\0'; + return len1; + } else { + size_t len2 = strlen(msg->ts_zonetail); + + if (len1 + len2 + 1 > outsz) + return 0; + memcpy(out, msg->ts_str, len1); + memcpy(out + len1, msg->ts_zonetail, len2); + out[len1 + len2] = '\0'; + return len1 + len2; + } +} + +/* setup functions */ + +struct zlog_target *zlog_target_clone(struct memtype *mt, + struct zlog_target *oldzt, size_t size) +{ + struct zlog_target *newzt; + + newzt = XCALLOC(mt, size); + if (oldzt) { + newzt->prio_min = oldzt->prio_min; + newzt->logfn = oldzt->logfn; + newzt->logfn_sigsafe = oldzt->logfn_sigsafe; + } + + return newzt; +} + +struct zlog_target *zlog_target_replace(struct zlog_target *oldzt, + struct zlog_target *newzt) +{ + if (newzt) + zlog_targets_add_tail(&zlog_targets, newzt); + if (oldzt) + zlog_targets_del(&zlog_targets, oldzt); + return oldzt; +} + + +/* common init */ + +#define TMPBASEDIR "/var/tmp/frr" + +static char zlog_tmpdir[MAXPATHLEN]; + +void zlog_aux_init(const char *prefix, int prio_min) +{ + if (prefix) + strlcpy(zlog_prefix, prefix, sizeof(zlog_prefix)); + + hook_call(zlog_aux_init, prefix, prio_min); +} + +void zlog_init(const char *progname, const char *protoname, + unsigned short instance, uid_t uid, gid_t gid) +{ + zlog_uid = uid; + zlog_gid = gid; + + if (instance) { + snprintfrr(zlog_tmpdir, sizeof(zlog_tmpdir), + "/var/tmp/frr/%s-%d.%ld", + progname, instance, (long)getpid()); + + zlog_prefixsz = snprintfrr(zlog_prefix, sizeof(zlog_prefix), + "%s[%d]: ", protoname, instance); + } else { + snprintfrr(zlog_tmpdir, sizeof(zlog_tmpdir), + "/var/tmp/frr/%s.%ld", + progname, (long)getpid()); + + zlog_prefixsz = snprintfrr(zlog_prefix, sizeof(zlog_prefix), + "%s: ", protoname); + } + + if (mkdir(TMPBASEDIR, 0700) != 0) { + if (errno != EEXIST) { + zlog_err("failed to mkdir \"%s\": %s", + TMPBASEDIR, strerror(errno)); + goto out_warn; + } + } + chown(TMPBASEDIR, zlog_uid, zlog_gid); + + if (mkdir(zlog_tmpdir, 0700) != 0) { + zlog_err("failed to mkdir \"%s\": %s", + zlog_tmpdir, strerror(errno)); + goto out_warn; + } + +#ifdef O_PATH + zlog_tmpdirfd = open(zlog_tmpdir, + O_PATH | O_RDONLY | O_CLOEXEC); +#else + zlog_tmpdirfd = open(zlog_tmpdir, + O_DIRECTORY | O_RDONLY | O_CLOEXEC); +#endif + if (zlog_tmpdirfd < 0) { + zlog_err("failed to open \"%s\": %s", + zlog_tmpdir, strerror(errno)); + goto out_warn; + } + +#ifdef AT_EMPTY_PATH + fchownat(zlog_tmpdirfd, "", zlog_uid, zlog_gid, AT_EMPTY_PATH); +#else + chown(zlog_tmpdir, zlog_uid, zlog_gid); +#endif + + hook_call(zlog_init, progname, protoname, instance, uid, gid); + return; + +out_warn: + zlog_err("crashlog and per-thread log buffering unavailable!"); + hook_call(zlog_init, progname, protoname, instance, uid, gid); +} + +void zlog_fini(void) +{ + hook_call(zlog_fini); + + if (zlog_tmpdirfd >= 0) { + close(zlog_tmpdirfd); + zlog_tmpdirfd = -1; + + if (rmdir(zlog_tmpdir)) + zlog_err("failed to rmdir \"%s\": %s", + zlog_tmpdir, strerror(errno)); + } +} diff --git a/lib/zlog.h b/lib/zlog.h new file mode 100644 index 0000000000..fd42ad50e4 --- /dev/null +++ b/lib/zlog.h @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2015-19 David Lamparter, for NetDEF, Inc. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _FRR_ZLOG_H +#define _FRR_ZLOG_H + +#include <stdarg.h> +#include <stdbool.h> +#include <stdint.h> +#include <string.h> +#include <syslog.h> +#include <unistd.h> +#include <sys/uio.h> + +#include "atomlist.h" +#include "frrcu.h" +#include "memory.h" +#include "hook.h" + +extern char zlog_prefix[]; +extern size_t zlog_prefixsz; +extern int zlog_tmpdirfd; + +/* These functions are set up to write to stdout/stderr without explicit + * initialization and/or before config load. There is no need to call e.g. + * fprintf(stderr, ...) just because it's "too early" at startup. Depending + * on context, it may still be the right thing to use fprintf though -- try to + * determine wether something is a log message or something else. + */ + +extern void vzlog(int prio, const char *fmt, va_list ap); + +__attribute__ ((format (printf, 2, 3))) +static inline void zlog(int prio, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vzlog(prio, fmt, ap); + va_end(ap); +} + +#define zlog_err(...) zlog(LOG_ERR, __VA_ARGS__) +#define zlog_warn(...) zlog(LOG_WARNING, __VA_ARGS__) +#define zlog_info(...) zlog(LOG_INFO, __VA_ARGS__) +#define zlog_notice(...) zlog(LOG_NOTICE, __VA_ARGS__) +#define zlog_debug(...) zlog(LOG_DEBUG, __VA_ARGS__) + +extern void zlog_sigsafe(const char *text, size_t len); + +/* extra priority value to disable a target without deleting it */ +#define ZLOG_DISABLED (LOG_EMERG-1) + +/* zlog_msg encapsulates a particular logging call from somewhere in the code. + * The same struct is passed around to all zlog_targets. + * + * This is used to defer formatting the log message until it is actually + * requested by one of the targets. If none of the targets needs the message + * formatted, the formatting call is avoided entirely. + * + * This struct is opaque / private to the core zlog code. Logging targets + * should use zlog_msg_* functions to get text / timestamps / ... for a + * message. + */ + +struct zlog_msg; + +extern int zlog_msg_prio(struct zlog_msg *msg); + +/* pass NULL as textlen if you don't need it. */ +extern const char *zlog_msg_text(struct zlog_msg *msg, size_t *textlen); + +/* timestamp formatting control flags */ + +/* sub-second digit count */ +#define ZLOG_TS_PREC 0xfU + +/* 8601: 0000-00-00T00:00:00Z (if used with ZLOG_TS_UTC) + * 0000-00-00T00:00:00+00:00 (otherwise) + * Legacy: 0000/00/00 00:00:00 (no TZ indicated!) + */ +#define ZLOG_TS_ISO8601 (1 << 8) +#define ZLOG_TS_LEGACY (1 << 9) + +/* default is local time zone */ +#define ZLOG_TS_UTC (1 << 10) + +extern size_t zlog_msg_ts(struct zlog_msg *msg, char *out, size_t outsz, + uint32_t flags); + +/* This list & struct implements the actual logging targets. It is accessed + * lock-free from all threads, and thus MUST only be changed atomically, i.e. + * RCU. + * + * Since there's no atomic replace, the replacement action is an add followed + * by a delete. This means that during logging config changes, log messages + * may be duplicated in the log target that is being changed. The old entry + * being changed MUST also at the very least not crash or do other stupid + * things. + * + * This list and struct are NOT related to config. Logging config is kept + * separately, and results in creating appropriate zlog_target(s) to realize + * the config. Log targets may also be created from varying sources, e.g. + * command line options, or VTY commands ("log monitor"). + * + * struct zlog_target is intended to be embedded into a larger structure that + * contains additional field for the specific logging target, e.g. an fd or + * additional options. It MUST be the first field in that larger struct. + */ + +PREDECL_ATOMLIST(zlog_targets) +struct zlog_target { + struct zlog_targets_item head; + + int prio_min; + + void (*logfn)(struct zlog_target *zt, struct zlog_msg *msg[], + size_t nmsgs); + + /* for crash handlers, set to NULL if log target can't write crash logs + * without possibly deadlocking (AS-Safe) + * + * text is not \0 terminated & split up into lines (e.g. no \n) + */ + void (*logfn_sigsafe)(struct zlog_target *zt, const char *text, + size_t len); + + struct rcu_head rcu_head; +}; + +/* make a copy for RCUpdating. oldzt may be NULL to allocate a fresh one. */ +extern struct zlog_target *zlog_target_clone(struct memtype *mt, + struct zlog_target *oldzt, + size_t size); + +/* update the zlog_targets list; both oldzt and newzt may be NULL. You + * still need to zlog_target_free() the old target afterwards if it wasn't + * NULL. + * + * Returns oldzt so you can zlog_target_free(zlog_target_replace(old, new)); + * (Some log targets may need extra cleanup inbetween, but remember the old + * target MUST remain functional until the end of the current RCU cycle.) + */ +extern struct zlog_target *zlog_target_replace(struct zlog_target *oldzt, + struct zlog_target *newzt); + +/* Mostly for symmetry for zlog_target_clone(), just rcu_free() internally. */ +#define zlog_target_free(mt, zt) \ + rcu_free(mt, zt, rcu_head) + +extern void zlog_init(const char *progname, const char *protoname, + unsigned short instance, uid_t uid, gid_t gid); +DECLARE_HOOK(zlog_init, (const char *progname, const char *protoname, + unsigned short instance, uid_t uid, gid_t gid), + (progname, protoname, instance, uid, gid)) + +extern void zlog_fini(void); +DECLARE_KOOH(zlog_fini, (), ()) + +/* for tools & test programs, i.e. anything not a daemon. + * (no cleanup needed at exit) + */ +extern void zlog_aux_init(const char *prefix, int prio_min); +DECLARE_HOOK(zlog_aux_init, (const char *prefix, int prio_min), + (prefix, prio_min)) + +extern void zlog_startup_end(void); + +extern void zlog_tls_buffer_init(void); +extern void zlog_tls_buffer_flush(void); +extern void zlog_tls_buffer_fini(void); + +#endif /* _FRR_ZLOG_H */ diff --git a/lib/zlog_targets.c b/lib/zlog_targets.c new file mode 100644 index 0000000000..b23ab073b4 --- /dev/null +++ b/lib/zlog_targets.c @@ -0,0 +1,574 @@ +/* + * Copyright (c) 2015-19 David Lamparter, for NetDEF, Inc. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "zebra.h" + +#include <sys/un.h> +#include <syslog.h> + +#include "memory.h" +#include "frrcu.h" +#include "frr_pthread.h" +#include "printfrr.h" +#include "zlog.h" +#include "zlog_targets.h" + +/* these allocations are intentionally left active even when doing full exit + * cleanup, in order to keep the logging subsystem fully functional until the + * absolute end. + */ + +DECLARE_MGROUP(LOG) +DEFINE_MGROUP_ACTIVEATEXIT(LOG, "logging subsystem") + +DEFINE_MTYPE_STATIC(LOG, LOG_FD, "log file target") +DEFINE_MTYPE_STATIC(LOG, LOG_FD_NAME, "log file name") +DEFINE_MTYPE_STATIC(LOG, LOG_FD_ROTATE, "log file rotate helper") +DEFINE_MTYPE_STATIC(LOG, LOG_SYSL, "syslog target") + +struct zlt_fd { + struct zlog_target zt; + + atomic_uint_fast32_t fd; + + char ts_subsec; + bool record_priority; + + struct rcu_head_close head_close; +}; + +static const char * const prionames[] = { + [LOG_EMERG] = "emergencies: ", + [LOG_ALERT] = "alerts: ", + [LOG_CRIT] = "critical: ", + [LOG_ERR] = "errors: ", + [LOG_WARNING] = "warnings: ", + [LOG_NOTICE] = "notifications: ", + [LOG_INFO] = "informational: ", + [LOG_DEBUG] = "debugging: ", +}; + +void zlog_fd(struct zlog_target *zt, struct zlog_msg *msgs[], size_t nmsgs) +{ + struct zlt_fd *zte = container_of(zt, struct zlt_fd, zt); + int fd; + size_t i, textlen, iovpos = 0; + size_t niov = MIN(4 * nmsgs + 1, IOV_MAX); + struct iovec iov[niov]; + /* "\nYYYY-MM-DD HH:MM:SS.NNNNNNNNN+ZZ:ZZ " = 37 chars */ +#define TS_LEN 40 + char ts_buf[TS_LEN * nmsgs], *ts_pos = ts_buf; + + fd = atomic_load_explicit(&zte->fd, memory_order_relaxed); + + for (i = 0; i < nmsgs; i++) { + struct zlog_msg *msg = msgs[i]; + int prio = zlog_msg_prio(msg); + + if (prio > zt->prio_min) + continue; + + iov[iovpos].iov_base = ts_pos; + if (iovpos > 0) + *ts_pos++ = '\n'; + ts_pos += zlog_msg_ts(msg, ts_pos, sizeof(ts_buf) - 1 + - (ts_pos - ts_buf), + ZLOG_TS_LEGACY | zte->ts_subsec); + *ts_pos++ = ' '; + iov[iovpos].iov_len = ts_pos - (char *)iov[iovpos].iov_base; + + iovpos++; + + if (zte->record_priority) { + iov[iovpos].iov_base = (char *)prionames[prio]; + iov[iovpos].iov_len = strlen(iov[iovpos].iov_base); + + iovpos++; + } + + iov[iovpos].iov_base = zlog_prefix; + iov[iovpos].iov_len = zlog_prefixsz; + + iovpos++; + + iov[iovpos].iov_base = (char *)zlog_msg_text(msg, &textlen); + iov[iovpos].iov_len = textlen; + + iovpos++; + + if (ts_buf + sizeof(ts_buf) - ts_pos < TS_LEN + || i + 1 == nmsgs + || array_size(iov) - iovpos < 5) { + iov[iovpos].iov_base = (char *)"\n"; + iov[iovpos].iov_len = 1; + + iovpos++; + + writev(fd, iov, iovpos); + + iovpos = 0; + ts_pos = ts_buf; + } + } + + assert(iovpos == 0); +} + +static void zlog_fd_sigsafe(struct zlog_target *zt, const char *text, + size_t len) +{ + struct zlt_fd *zte = container_of(zt, struct zlt_fd, zt); + struct iovec iov[4]; + int fd; + + iov[0].iov_base = (char *)prionames[LOG_CRIT]; + iov[0].iov_len = zte->record_priority ? strlen(iov[0].iov_base) : 0; + + iov[1].iov_base = zlog_prefix; + iov[1].iov_len = zlog_prefixsz; + + iov[2].iov_base = (char *)text; + iov[2].iov_len = len; + + iov[3].iov_base = (char *)"\n"; + iov[3].iov_len = 1; + + fd = atomic_load_explicit(&zte->fd, memory_order_relaxed); + + writev(fd, iov, array_size(iov)); +} + +/* + * (re-)configuration + */ + +void zlog_file_init(struct zlog_cfg_file *zcf) +{ + memset(zcf, 0, sizeof(*zcf)); + zcf->prio_min = ZLOG_DISABLED; + zcf->fd = -1; + pthread_mutex_init(&zcf->cfg_mtx, NULL); +} + +static void zlog_file_target_free(struct zlt_fd *zlt) +{ + if (!zlt) + return; + + rcu_close(&zlt->head_close, zlt->fd); + rcu_free(MTYPE_LOG_FD, zlt, zt.rcu_head); +} + +void zlog_file_fini(struct zlog_cfg_file *zcf) +{ + if (zcf->active) { + struct zlt_fd *ztf; + struct zlog_target *zt; + + zt = zlog_target_replace(&zcf->active->zt, NULL); + ztf = container_of(zt, struct zlt_fd, zt); + zlog_file_target_free(ztf); + } + XFREE(MTYPE_LOG_FD_NAME, zcf->filename); + pthread_mutex_destroy(&zcf->cfg_mtx); +} + +static bool zlog_file_cycle(struct zlog_cfg_file *zcf) +{ + struct zlog_target *zt, *old; + struct zlt_fd *zlt = NULL; + int fd; + bool rv = true; + + do { + if (zcf->prio_min == ZLOG_DISABLED) + break; + + if (zcf->fd != -1) + fd = dup(zcf->fd); + else if (zcf->filename) + fd = open(zcf->filename, + O_WRONLY | O_APPEND | O_CREAT | O_CLOEXEC + | O_NOCTTY, + LOGFILE_MASK); + else + fd = -1; + + if (fd < 0) { + rv = false; + break; + } + + zt = zlog_target_clone(MTYPE_LOG_FD, &zcf->active->zt, + sizeof(*zlt)); + zlt = container_of(zt, struct zlt_fd, zt); + + zlt->fd = fd; + zlt->record_priority = zcf->record_priority; + zlt->ts_subsec = zcf->ts_subsec; + + zlt->zt.prio_min = zcf->prio_min; + zlt->zt.logfn = zcf->zlog_wrap ? zcf->zlog_wrap : zlog_fd; + zlt->zt.logfn_sigsafe = zlog_fd_sigsafe; + } while (0); + + old = zlog_target_replace(&zcf->active->zt, &zlt->zt); + zcf->active = zlt; + + zlog_file_target_free(container_of(old, struct zlt_fd, zt)); + + return rv; +} + +void zlog_file_set_other(struct zlog_cfg_file *zcf) +{ + frr_with_mutex(&zcf->cfg_mtx) { + zlog_file_cycle(zcf); + } +} + +bool zlog_file_set_filename(struct zlog_cfg_file *zcf, const char *filename) +{ + frr_with_mutex(&zcf->cfg_mtx) { + XFREE(MTYPE_LOG_FD_NAME, zcf->filename); + zcf->filename = XSTRDUP(MTYPE_LOG_FD_NAME, filename); + zcf->fd = -1; + + return zlog_file_cycle(zcf); + } + assert(0); +} + +bool zlog_file_set_fd(struct zlog_cfg_file *zcf, int fd) +{ + frr_with_mutex(&zcf->cfg_mtx) { + if (zcf->fd == fd) + return true; + + XFREE(MTYPE_LOG_FD_NAME, zcf->filename); + zcf->fd = fd; + + return zlog_file_cycle(zcf); + } + assert(0); +} + +struct rcu_close_rotate { + struct rcu_head_close head_close; + struct rcu_head head_self; +}; + +bool zlog_file_rotate(struct zlog_cfg_file *zcf) +{ + struct rcu_close_rotate *rcr; + int fd; + + frr_with_mutex(&zcf->cfg_mtx) { + if (!zcf->active || !zcf->filename) + return true; + + fd = open(zcf->filename, + O_WRONLY | O_APPEND | O_CREAT | O_CLOEXEC | O_NOCTTY, + LOGFILE_MASK); + if (fd < 0) + return false; + + fd = atomic_exchange_explicit(&zcf->active->fd, + (uint_fast32_t)fd, + memory_order_relaxed); + } + + rcr = XCALLOC(MTYPE_LOG_FD_ROTATE, sizeof(*rcr)); + rcu_close(&rcr->head_close, fd); + rcu_free(MTYPE_LOG_FD_ROTATE, rcr, head_self); + + return true; +} + +/* fixed crash logging */ + +static struct zlt_fd zlog_crashlog; + +static void zlog_crashlog_sigsafe(struct zlog_target *zt, const char *text, + size_t len) +{ + static int crashlog_fd = -1; + + if (crashlog_fd == -1) { +#ifdef HAVE_OPENAT + crashlog_fd = openat(zlog_tmpdirfd, "crashlog", + O_WRONLY | O_APPEND | O_CREAT, + LOGFILE_MASK); +#endif + if (crashlog_fd < 0) + crashlog_fd = -2; + } + + if (crashlog_fd == -2) + return; + + zlog_crashlog.fd = crashlog_fd; + zlog_fd_sigsafe(&zlog_crashlog.zt, text, len); +} + +/* this is used for assert failures (they don't need AS-Safe logging) */ +static void zlog_crashlog_plain(struct zlog_target *zt, struct zlog_msg *msgs[], + size_t nmsgs) +{ + size_t i, len; + const char *text; + + for (i = 0; i < nmsgs; i++) { + if (zlog_msg_prio(msgs[i]) > zt->prio_min) + continue; + + text = zlog_msg_text(msgs[i], &len); + zlog_crashlog_sigsafe(zt, text, len); + } +} + +static void zlog_crashlog_init(void) +{ + zlog_crashlog.zt.prio_min = LOG_CRIT; + zlog_crashlog.zt.logfn = zlog_crashlog_plain; + zlog_crashlog.zt.logfn_sigsafe = zlog_crashlog_sigsafe; + zlog_crashlog.fd = -1; + + zlog_target_replace(NULL, &zlog_crashlog.zt); +} + +/* fixed logging for test/auxiliary programs */ + +static struct zlt_fd zlog_aux_stdout; +static bool zlog_is_aux; + +static int zlt_aux_init(const char *prefix, int prio_min) +{ + zlog_is_aux = true; + + zlog_aux_stdout.zt.prio_min = prio_min; + zlog_aux_stdout.zt.logfn = zlog_fd; + zlog_aux_stdout.zt.logfn_sigsafe = zlog_fd_sigsafe; + zlog_aux_stdout.fd = STDOUT_FILENO; + + zlog_target_replace(NULL, &zlog_aux_stdout.zt); + zlog_startup_end(); + return 0; +} + +static int zlt_init(const char *progname, const char *protoname, + unsigned short instance, uid_t uid, gid_t gid) +{ + openlog(progname, LOG_CONS | LOG_NDELAY | LOG_PID, LOG_DAEMON); + return 0; +} + +static int zlt_fini(void) +{ + closelog(); + return 0; +} + +/* fixed startup logging to stderr */ + +static struct zlt_fd zlog_startup_stderr; + +__attribute__((_CONSTRUCTOR(450))) static void zlog_startup_init(void) +{ + zlog_startup_stderr.zt.prio_min = LOG_WARNING; + zlog_startup_stderr.zt.logfn = zlog_fd; + zlog_startup_stderr.zt.logfn_sigsafe = zlog_fd_sigsafe; + zlog_startup_stderr.fd = STDERR_FILENO; + + zlog_target_replace(NULL, &zlog_startup_stderr.zt); + + hook_register(zlog_aux_init, zlt_aux_init); + hook_register(zlog_init, zlt_init); + hook_register(zlog_fini, zlt_fini); +} + +void zlog_startup_end(void) +{ + static bool startup_ended = false; + + if (startup_ended) + return; + startup_ended = true; + + zlog_target_replace(&zlog_startup_stderr.zt, NULL); + + if (zlog_is_aux) + return; + + /* until here, crashlogs go to stderr */ + zlog_crashlog_init(); +} + +/* syslog */ + +struct zlt_syslog { + struct zlog_target zt; + + int syslog_facility; +}; + +static void zlog_syslog(struct zlog_target *zt, struct zlog_msg *msgs[], + size_t nmsgs) +{ + size_t i; + struct zlt_syslog *zte = container_of(zt, struct zlt_syslog, zt); + + for (i = 0; i < nmsgs; i++) { + if (zlog_msg_prio(msgs[i]) > zt->prio_min) + continue; + + syslog(zlog_msg_prio(msgs[i]) | zte->syslog_facility, "%s", + zlog_msg_text(msgs[i], NULL)); + } +} + +#ifndef _PATH_LOG +#define _PATH_LOG "/dev/log" +#endif + +static void zlog_syslog_sigsafe(struct zlog_target *zt, const char *text, + size_t len) +{ + static int syslog_fd = -1; + + char hdr[192]; + size_t hdrlen; + struct iovec iov[2]; + + if (syslog_fd == -1) { + syslog_fd = socket(AF_UNIX, SOCK_DGRAM, 0); + if (syslog_fd >= 0) { + struct sockaddr_un sa; + socklen_t salen = sizeof(sa); + + sa.sun_family = AF_UNIX; + strlcpy(sa.sun_path, _PATH_LOG, sizeof(sa.sun_path)); +#ifdef HAVE_STRUCT_SOCKADDR_UN_SUN_LEN + salen = sa.sun_len = SUN_LEN(&sa); +#endif + if (connect(syslog_fd, (struct sockaddr *)&sa, salen)) { + close(syslog_fd); + syslog_fd = -1; + } + } + + /* /dev/log could be a fifo instead of a socket */ + if (syslog_fd == -1) { + syslog_fd = open(_PATH_LOG, O_WRONLY | O_NOCTTY); + if (syslog_fd < 0) + /* give up ... */ + syslog_fd = -2; + } + } + + if (syslog_fd == -2) + return; + + /* note zlog_prefix includes trailing ": ", need to cut off 2 chars */ + hdrlen = snprintfrr(hdr, sizeof(hdr), "<%d>%.*s[%ld]: ", LOG_CRIT, + zlog_prefixsz > 2 ? (int)(zlog_prefixsz - 2) : 0, + zlog_prefix, (long)getpid()); + + iov[0].iov_base = hdr; + iov[0].iov_len = hdrlen; + + iov[1].iov_base = (char *)text; + iov[1].iov_len = len; + + writev(syslog_fd, iov, array_size(iov)); +} + + +static pthread_mutex_t syslog_cfg_mutex = PTHREAD_MUTEX_INITIALIZER; +static struct zlt_syslog *zlt_syslog; +static int syslog_facility = LOG_DAEMON; +static int syslog_prio_min = ZLOG_DISABLED; + +void zlog_syslog_set_facility(int facility) +{ + struct zlog_target *newztc; + struct zlt_syslog *newzt; + + frr_with_mutex(&syslog_cfg_mutex) { + if (facility == syslog_facility) + return; + syslog_facility = facility; + + if (syslog_prio_min == ZLOG_DISABLED) + return; + + newztc = zlog_target_clone(MTYPE_LOG_SYSL, &zlt_syslog->zt, + sizeof(*newzt)); + newzt = container_of(newztc, struct zlt_syslog, zt); + newzt->syslog_facility = syslog_facility; + + zlog_target_free(MTYPE_LOG_SYSL, + zlog_target_replace(&zlt_syslog->zt, + &newzt->zt)); + + zlt_syslog = newzt; + } +} + +int zlog_syslog_get_facility(void) +{ + frr_with_mutex(&syslog_cfg_mutex) { + return syslog_facility; + } + assert(0); +} + +void zlog_syslog_set_prio_min(int prio_min) +{ + struct zlog_target *newztc; + struct zlt_syslog *newzt = NULL; + + frr_with_mutex(&syslog_cfg_mutex) { + if (prio_min == syslog_prio_min) + return; + syslog_prio_min = prio_min; + + if (syslog_prio_min != ZLOG_DISABLED) { + newztc = zlog_target_clone(MTYPE_LOG_SYSL, + &zlt_syslog->zt, + sizeof(*newzt)); + newzt = container_of(newztc, struct zlt_syslog, zt); + newzt->zt.prio_min = prio_min; + newzt->zt.logfn = zlog_syslog; + newzt->zt.logfn_sigsafe = zlog_syslog_sigsafe; + newzt->syslog_facility = syslog_facility; + } + + zlog_target_free(MTYPE_LOG_SYSL, + zlog_target_replace(&zlt_syslog->zt, + &newzt->zt)); + + zlt_syslog = newzt; + } +} + +int zlog_syslog_get_prio_min(void) +{ + frr_with_mutex(&syslog_cfg_mutex) { + return syslog_prio_min; + } + assert(0); +} diff --git a/lib/zlog_targets.h b/lib/zlog_targets.h new file mode 100644 index 0000000000..f95d349a57 --- /dev/null +++ b/lib/zlog_targets.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2015-19 David Lamparter, for NetDEF, Inc. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _FRR_ZLOG_TARGETS_H +#define _FRR_ZLOG_TARGETS_H + +#include <pthread.h> + +#include "zlog.h" + +/* multiple file log targets can be active */ + +struct zlt_fd; + +struct zlog_cfg_file { + struct zlt_fd *active; + + pthread_mutex_t cfg_mtx; + + /* call zlog_file_set_other() to apply these */ + int prio_min; + char ts_subsec; + bool record_priority; + + /* call zlog_file_set_filename/fd() to change this */ + char *filename; + int fd; + + void (*zlog_wrap)(struct zlog_target *zt, struct zlog_msg *msgs[], + size_t nmsgs); +}; + +extern void zlog_file_init(struct zlog_cfg_file *zcf); +extern void zlog_file_fini(struct zlog_cfg_file *zcf); + +extern void zlog_file_set_other(struct zlog_cfg_file *zcf); +extern bool zlog_file_set_filename(struct zlog_cfg_file *zcf, const char *name); +extern bool zlog_file_set_fd(struct zlog_cfg_file *zcf, int fd); +extern bool zlog_file_rotate(struct zlog_cfg_file *zcf); + +extern void zlog_fd(struct zlog_target *zt, struct zlog_msg *msgs[], + size_t nmsgs); + +/* syslog is always limited to one target */ + +extern void zlog_syslog_set_facility(int facility); +extern int zlog_syslog_get_facility(void); + +/* use ZLOG_DISABLED to disable */ +extern void zlog_syslog_set_prio_min(int prio_min); +extern int zlog_syslog_get_prio_min(void); + +#endif /* _FRR_ZLOG_TARGETS_H */ diff --git a/nhrpd/nhrp_vty.c b/nhrpd/nhrp_vty.c index a3066f917e..fe681b4052 100644 --- a/nhrpd/nhrp_vty.c +++ b/nhrpd/nhrp_vty.c @@ -17,16 +17,22 @@ #include "nhrpd.h" #include "netlink.h" +static int nhrp_config_write(struct vty *vty); static struct cmd_node zebra_node = { + .name = "zebra", .node = ZEBRA_NODE, + .parent_node = CONFIG_NODE, .prompt = "%s(config-router)# ", - .vtysh = 1, + .config_write = nhrp_config_write, }; +static int interface_config_write(struct vty *vty); static struct cmd_node nhrp_interface_node = { + .name = "interface", .node = INTERFACE_NODE, + .parent_node = CONFIG_NODE, .prompt = "%s(config-if)# ", - .vtysh = 1, + .config_write = interface_config_write, }; #define NHRP_DEBUG_FLAGS_CMD "<all|common|event|interface|kernel|route|vici>" @@ -622,7 +628,7 @@ static void show_ip_nhrp_cache(struct nhrp_cache *c, void *pctx) else snprintf(buf[1], sizeof(buf[1]), "-"); - if (json) { + if (ctx->json) { json = json_object_new_object(); json_object_string_add(json, "interface", c->ifp->name); json_object_string_add(json, "type", @@ -1096,7 +1102,7 @@ static int interface_config_write(struct vty *vty) void nhrp_config_init(void) { - install_node(&zebra_node, nhrp_config_write); + install_node(&zebra_node); install_default(ZEBRA_NODE); /* access-list commands */ @@ -1120,7 +1126,7 @@ void nhrp_config_init(void) install_element(CONFIG_NODE, &no_nhrp_nflog_group_cmd); /* interface specific commands */ - install_node(&nhrp_interface_node, interface_config_write); + install_node(&nhrp_interface_node); if_cmd_init(); install_element(INTERFACE_NODE, &tunnel_protection_cmd); diff --git a/nhrpd/zbuf.h b/nhrpd/zbuf.h index d03f4ca3a2..e6f7101d63 100644 --- a/nhrpd/zbuf.h +++ b/nhrpd/zbuf.h @@ -86,9 +86,9 @@ static inline void *__zbuf_pull(struct zbuf *zb, size_t size, int error) } #define zbuf_pull(zb, type) ((type *)__zbuf_pull(zb, sizeof(type), 1)) -#define zbuf_pulln(zb, sz) ((void *)__zbuf_pull(zb, sz, 1)) +#define zbuf_pulln(zb, sz) (__zbuf_pull(zb, sz, 1)) #define zbuf_may_pull(zb, type) ((type *)__zbuf_pull(zb, sizeof(type), 0)) -#define zbuf_may_pulln(zb, sz) ((void *)__zbuf_pull(zb, sz, 0)) +#define zbuf_may_pulln(zb, sz) (__zbuf_pull(zb, sz, 0)) void *zbuf_may_pull_until(struct zbuf *zb, const char *sep, struct zbuf *msg); @@ -149,9 +149,9 @@ static inline void *__zbuf_push(struct zbuf *zb, size_t size, int error) } #define zbuf_push(zb, type) ((type *)__zbuf_push(zb, sizeof(type), 1)) -#define zbuf_pushn(zb, sz) ((void *)__zbuf_push(zb, sz, 1)) +#define zbuf_pushn(zb, sz) (__zbuf_push(zb, sz, 1)) #define zbuf_may_push(zb, type) ((type *)__zbuf_may_push(zb, sizeof(type), 0)) -#define zbuf_may_pushn(zb, sz) ((void *)__zbuf_push(zb, sz, 0)) +#define zbuf_may_pushn(zb, sz) (__zbuf_push(zb, sz, 0)) static inline void zbuf_put(struct zbuf *zb, const void *src, size_t len) { diff --git a/ospf6d/ospf6_abr.c b/ospf6d/ospf6_abr.c index ead186b6fc..1f6cc9d527 100644 --- a/ospf6d/ospf6_abr.c +++ b/ospf6d/ospf6_abr.c @@ -1200,9 +1200,23 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa) listcount(old_route->nh_list)); } } else { - /* adv. router exists in the list, update the nhs */ - list_delete_all_node(o_path->nh_list); - ospf6_copy_nexthops(o_path->nh_list, route->nh_list); + struct ospf6_route *tmp_route = ospf6_route_create(); + + ospf6_copy_nexthops(tmp_route->nh_list, + o_path->nh_list); + + if (ospf6_route_cmp_nexthops(tmp_route, route) != 0) { + /* adv. router exists in the list, update nhs */ + list_delete_all_node(o_path->nh_list); + ospf6_copy_nexthops(o_path->nh_list, + route->nh_list); + ospf6_route_delete(tmp_route); + } else { + /* adv. router has no change in nhs */ + old_entry_updated = false; + ospf6_route_delete(tmp_route); + continue; + } } if (is_debug) @@ -1427,7 +1441,7 @@ void install_element_ospf6_debug_abr(void) install_element(CONFIG_NODE, &no_debug_ospf6_abr_cmd); } -static const struct ospf6_lsa_handler inter_prefix_handler = { +static struct ospf6_lsa_handler inter_prefix_handler = { .lh_type = OSPF6_LSTYPE_INTER_PREFIX, .lh_name = "Inter-Prefix", .lh_short_name = "IAP", @@ -1435,7 +1449,7 @@ static const struct ospf6_lsa_handler inter_prefix_handler = { .lh_get_prefix_str = ospf6_inter_area_prefix_lsa_get_prefix_str, .lh_debug = 0}; -static const struct ospf6_lsa_handler inter_router_handler = { +static struct ospf6_lsa_handler inter_router_handler = { .lh_type = OSPF6_LSTYPE_INTER_ROUTER, .lh_name = "Inter-Router", .lh_short_name = "IAR", diff --git a/ospf6d/ospf6_asbr.c b/ospf6d/ospf6_asbr.c index 805e411c7b..6e71a21bd5 100644 --- a/ospf6d/ospf6_asbr.c +++ b/ospf6d/ospf6_asbr.c @@ -918,8 +918,8 @@ void ospf6_asbr_distribute_list_update(int type) ZROUTE_NAME(type)); ospf6->t_distribute_update = NULL; - thread_add_timer_msec(master, ospf6_asbr_routemap_update_timer, - (void **)args, OSPF_MIN_LS_INTERVAL, + thread_add_timer_msec(master, ospf6_asbr_routemap_update_timer, args, + OSPF_MIN_LS_INTERVAL, &ospf6->t_distribute_update); } @@ -1853,7 +1853,7 @@ DEFUN (show_ipv6_ospf6_redistribute, return CMD_SUCCESS; } -static const struct ospf6_lsa_handler as_external_handler = { +static struct ospf6_lsa_handler as_external_handler = { .lh_type = OSPF6_LSTYPE_AS_EXTERNAL, .lh_name = "AS-External", .lh_short_name = "ASE", diff --git a/ospf6d/ospf6_flood.c b/ospf6d/ospf6_flood.c index 85d02c186b..b144c6804e 100644 --- a/ospf6d/ospf6_flood.c +++ b/ospf6d/ospf6_flood.c @@ -332,11 +332,12 @@ void ospf6_flood_interface(struct ospf6_neighbor *from, struct ospf6_lsa *lsa, if (req == on->last_ls_req) { /* sanity check refcount */ assert(req->lock >= 2); - ospf6_lsa_unlock(req); + req = ospf6_lsa_unlock(req); on->last_ls_req = NULL; } - ospf6_lsdb_remove(req, - on->request_list); + if (req) + ospf6_lsdb_remove( + req, on->request_list); ospf6_check_nbr_loading(on); continue; } @@ -348,7 +349,7 @@ void ospf6_flood_interface(struct ospf6_neighbor *from, struct ospf6_lsa *lsa, zlog_debug( "Received is newer, remove requesting"); if (req == on->last_ls_req) { - ospf6_lsa_unlock(req); + req = ospf6_lsa_unlock(req); on->last_ls_req = NULL; } if (req) diff --git a/ospf6d/ospf6_interface.c b/ospf6d/ospf6_interface.c index 93265afc43..1209997514 100644 --- a/ospf6d/ospf6_interface.c +++ b/ospf6d/ospf6_interface.c @@ -1943,8 +1943,13 @@ static int config_write_ospf6_interface(struct vty *vty) return 0; } +static int config_write_ospf6_interface(struct vty *vty); static struct cmd_node interface_node = { - INTERFACE_NODE, "%s(config-if)# ", 1 /* VTYSH */ + .name = "interface", + .node = INTERFACE_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-if)# ", + .config_write = config_write_ospf6_interface, }; static int ospf6_ifp_create(struct interface *ifp) @@ -2001,7 +2006,7 @@ static int ospf6_ifp_destroy(struct interface *ifp) void ospf6_interface_init(void) { /* Install interface node. */ - install_node(&interface_node, config_write_ospf6_interface); + install_node(&interface_node); if_cmd_init(); if_zapi_callbacks(ospf6_ifp_create, ospf6_ifp_up, ospf6_ifp_down, ospf6_ifp_destroy); diff --git a/ospf6d/ospf6_intra.c b/ospf6d/ospf6_intra.c index 9c239b75ff..b700899ccf 100644 --- a/ospf6d/ospf6_intra.c +++ b/ospf6d/ospf6_intra.c @@ -2235,7 +2235,7 @@ void ospf6_intra_brouter_calculation(struct ospf6_area *oa) __func__, oa->name); } -static const struct ospf6_lsa_handler router_handler = { +static struct ospf6_lsa_handler router_handler = { .lh_type = OSPF6_LSTYPE_ROUTER, .lh_name = "Router", .lh_short_name = "Rtr", @@ -2243,7 +2243,7 @@ static const struct ospf6_lsa_handler router_handler = { .lh_get_prefix_str = ospf6_router_lsa_get_nbr_id, .lh_debug = 0}; -static const struct ospf6_lsa_handler network_handler = { +static struct ospf6_lsa_handler network_handler = { .lh_type = OSPF6_LSTYPE_NETWORK, .lh_name = "Network", .lh_short_name = "Net", @@ -2251,7 +2251,7 @@ static const struct ospf6_lsa_handler network_handler = { .lh_get_prefix_str = ospf6_network_lsa_get_ar_id, .lh_debug = 0}; -static const struct ospf6_lsa_handler link_handler = { +static struct ospf6_lsa_handler link_handler = { .lh_type = OSPF6_LSTYPE_LINK, .lh_name = "Link", .lh_short_name = "Lnk", @@ -2259,7 +2259,7 @@ static const struct ospf6_lsa_handler link_handler = { .lh_get_prefix_str = ospf6_link_lsa_get_prefix_str, .lh_debug = 0}; -static const struct ospf6_lsa_handler intra_prefix_handler = { +static struct ospf6_lsa_handler intra_prefix_handler = { .lh_type = OSPF6_LSTYPE_INTRA_PREFIX, .lh_name = "Intra-Prefix", .lh_short_name = "INP", diff --git a/ospf6d/ospf6_lsa.c b/ospf6d/ospf6_lsa.c index 9acbd09b1a..aa32fae6ad 100644 --- a/ospf6d/ospf6_lsa.c +++ b/ospf6d/ospf6_lsa.c @@ -77,16 +77,16 @@ static struct ospf6_lsa_handler unknown_handler = { .lh_debug = 0 /* No default debug */ }; -void ospf6_install_lsa_handler(const struct ospf6_lsa_handler *handler) +void ospf6_install_lsa_handler(struct ospf6_lsa_handler *handler) { /* type in handler is host byte order */ int index = handler->lh_type & OSPF6_LSTYPE_FCODE_MASK; vector_set_index(ospf6_lsa_handler_vector, index, (void *)handler); } -const struct ospf6_lsa_handler *ospf6_get_lsa_handler(uint16_t type) +struct ospf6_lsa_handler *ospf6_get_lsa_handler(uint16_t type) { - const struct ospf6_lsa_handler *handler = NULL; + struct ospf6_lsa_handler *handler = NULL; unsigned int index = ntohs(type) & OSPF6_LSTYPE_FCODE_MASK; if (index >= vector_active(ospf6_lsa_handler_vector)) @@ -527,7 +527,7 @@ struct ospf6_lsa *ospf6_lsa_create(struct ospf6_lsa_header *header) /* allocate memory */ lsa = XCALLOC(MTYPE_OSPF6_LSA, sizeof(struct ospf6_lsa)); - lsa->header = (struct ospf6_lsa_header *)new_header; + lsa->header = new_header; /* dump string */ ospf6_lsa_printbuf(lsa, lsa->name, sizeof(lsa->name)); @@ -554,7 +554,7 @@ struct ospf6_lsa *ospf6_lsa_create_headeronly(struct ospf6_lsa_header *header) /* allocate memory */ lsa = XCALLOC(MTYPE_OSPF6_LSA, sizeof(struct ospf6_lsa)); - lsa->header = (struct ospf6_lsa_header *)new_header; + lsa->header = new_header; SET_FLAG(lsa->flag, OSPF6_LSA_HEADERONLY); /* dump string */ @@ -608,16 +608,17 @@ void ospf6_lsa_lock(struct ospf6_lsa *lsa) } /* decrement reference counter of struct ospf6_lsa */ -void ospf6_lsa_unlock(struct ospf6_lsa *lsa) +struct ospf6_lsa *ospf6_lsa_unlock(struct ospf6_lsa *lsa) { /* decrement reference counter */ assert(lsa->lock > 0); lsa->lock--; if (lsa->lock != 0) - return; + return lsa; ospf6_lsa_delete(lsa); + return NULL; } diff --git a/ospf6d/ospf6_lsa.h b/ospf6d/ospf6_lsa.h index d871a8842e..5519dd1b80 100644 --- a/ospf6d/ospf6_lsa.h +++ b/ospf6d/ospf6_lsa.h @@ -227,7 +227,7 @@ extern void ospf6_lsa_delete(struct ospf6_lsa *lsa); extern struct ospf6_lsa *ospf6_lsa_copy(struct ospf6_lsa *); extern void ospf6_lsa_lock(struct ospf6_lsa *); -extern void ospf6_lsa_unlock(struct ospf6_lsa *); +extern struct ospf6_lsa *ospf6_lsa_unlock(struct ospf6_lsa *); extern int ospf6_lsa_expire(struct thread *); extern int ospf6_lsa_refresh(struct thread *); @@ -237,8 +237,8 @@ extern int ospf6_lsa_checksum_valid(struct ospf6_lsa_header *); extern int ospf6_lsa_prohibited_duration(uint16_t type, uint32_t id, uint32_t adv_router, void *scope); -extern void ospf6_install_lsa_handler(const struct ospf6_lsa_handler *handler); -extern const struct ospf6_lsa_handler *ospf6_get_lsa_handler(uint16_t type); +extern void ospf6_install_lsa_handler(struct ospf6_lsa_handler *handler); +extern struct ospf6_lsa_handler *ospf6_get_lsa_handler(uint16_t type); extern void ospf6_lsa_init(void); extern void ospf6_lsa_terminate(void); diff --git a/ospf6d/ospf6_main.c b/ospf6d/ospf6_main.c index e4bed7a79d..4dbe5ca321 100644 --- a/ospf6d/ospf6_main.c +++ b/ospf6d/ospf6_main.c @@ -168,6 +168,7 @@ struct quagga_signal_t ospf6_signals[] = { static const struct frr_yang_module_info *const ospf6d_yang_modules[] = { &frr_interface_info, &frr_route_map_info, + &frr_vrf_info, }; FRR_DAEMON_INFO(ospf6d, OSPF6, .vty_port = OSPF6_VTY_PORT, diff --git a/ospf6d/ospf6_message.c b/ospf6d/ospf6_message.c index 21f9b0722c..31862a2298 100644 --- a/ospf6d/ospf6_message.c +++ b/ospf6d/ospf6_message.c @@ -1948,9 +1948,9 @@ int ospf6_lsreq_send(struct thread *thread) } if (last_req != NULL) { - if (on->last_ls_req != NULL) { - ospf6_lsa_unlock(on->last_ls_req); - } + if (on->last_ls_req != NULL) + on->last_ls_req = ospf6_lsa_unlock(on->last_ls_req); + ospf6_lsa_lock(last_req); on->last_ls_req = last_req; } diff --git a/ospf6d/ospf6_spf.c b/ospf6d/ospf6_spf.c index b0fe890d33..80bff5795f 100644 --- a/ospf6d/ospf6_spf.c +++ b/ospf6d/ospf6_spf.c @@ -989,7 +989,7 @@ struct ospf6_lsa *ospf6_create_single_router_lsa(struct ospf6_area *area, rtr_lsa = ospf6_lsdb_next(end, rtr_lsa); continue; } - lsa_header = (struct ospf6_lsa_header *)rtr_lsa->header; + lsa_header = rtr_lsa->header; total_lsa_length += (ntohs(lsa_header->length) - lsa_length); num_lsa++; rtr_lsa = ospf6_lsdb_next(end, rtr_lsa); @@ -1027,7 +1027,7 @@ struct ospf6_lsa *ospf6_create_single_router_lsa(struct ospf6_area *area, assert(rtr_lsa); if (!OSPF6_LSA_IS_MAXAGE(rtr_lsa)) { /* Append first Link State ID LSA */ - lsa_header = (struct ospf6_lsa_header *)rtr_lsa->header; + lsa_header = rtr_lsa->header; memcpy(new_header, lsa_header, ntohs(lsa_header->length)); /* Assign new lsa length as aggregated length. */ ((struct ospf6_lsa_header *)new_header)->length = @@ -1057,7 +1057,7 @@ struct ospf6_lsa *ospf6_create_single_router_lsa(struct ospf6_area *area, } /* Append Next Link State ID LSA */ - lsa_header = (struct ospf6_lsa_header *)rtr_lsa->header; + lsa_header = rtr_lsa->header; memcpy(new_header, (OSPF6_LSA_HEADER_END(rtr_lsa->header) + 4), (ntohs(lsa_header->length) - lsa_length)); new_header += (ntohs(lsa_header->length) - lsa_length); diff --git a/ospf6d/ospf6_top.c b/ospf6d/ospf6_top.c index 96eee51929..dd672dd1c5 100644 --- a/ospf6d/ospf6_top.c +++ b/ospf6d/ospf6_top.c @@ -52,8 +52,8 @@ DEFINE_QOBJ_TYPE(ospf6) FRR_CFG_DEFAULT_BOOL(OSPF6_LOG_ADJACENCY_CHANGES, - { .val_long = true, .match_profile = "datacenter", }, - { .val_long = false }, + { .val_bool = true, .match_profile = "datacenter", }, + { .val_bool = false }, ) /* global ospf6d variable */ @@ -1112,16 +1112,21 @@ static int config_write_ospf6(struct vty *vty) return 0; } +static int config_write_ospf6(struct vty *vty); /* OSPF6 node structure. */ static struct cmd_node ospf6_node = { - OSPF6_NODE, "%s(config-ospf6)# ", 1 /* VTYSH */ + .name = "ospf6", + .node = OSPF6_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-ospf6)# ", + .config_write = config_write_ospf6, }; /* Install ospf related commands. */ void ospf6_top_init(void) { /* Install ospf6 top node. */ - install_node(&ospf6_node, config_write_ospf6); + install_node(&ospf6_node); install_element(VIEW_NODE, &show_ipv6_ospf6_cmd); install_element(CONFIG_NODE, &router_ospf6_cmd); diff --git a/ospf6d/ospf6_zebra.c b/ospf6d/ospf6_zebra.c index f8df37094f..2773a666a3 100644 --- a/ospf6d/ospf6_zebra.c +++ b/ospf6d/ospf6_zebra.c @@ -171,8 +171,8 @@ static int ospf6_zebra_read_route(ZAPI_CALLBACK_ARGS) if (IS_OSPF6_DEBUG_ZEBRA(RECV)) { char prefixstr[PREFIX2STR_BUFFER], nexthopstr[128]; - prefix2str((struct prefix *)&api.prefix, prefixstr, - sizeof(prefixstr)); + + prefix2str(&api.prefix, prefixstr, sizeof(prefixstr)); inet_ntop(AF_INET6, nexthop, nexthopstr, sizeof(nexthopstr)); zlog_debug( diff --git a/ospf6d/ospf6d.c b/ospf6d/ospf6d.c index db61fe087b..17e33902d9 100644 --- a/ospf6d/ospf6d.c +++ b/ospf6d/ospf6d.c @@ -69,8 +69,12 @@ struct route_node *route_prev(struct route_node *node) return prev; } +static int config_write_ospf6_debug(struct vty *vty); static struct cmd_node debug_node = { - DEBUG_NODE, "", 1 /* VTYSH */ + .name = "debug", + .node = DEBUG_NODE, + .prompt = "", + .config_write = config_write_ospf6_debug, }; static int config_write_ospf6_debug(struct vty *vty) @@ -1216,7 +1220,7 @@ void ospf6_init(void) prefix_list_delete_hook(ospf6_plist_del); ospf6_bfd_init(); - install_node(&debug_node, config_write_ospf6_debug); + install_node(&debug_node); install_element_ospf6_debug_message(); install_element_ospf6_debug_lsa(); diff --git a/ospfd/ospf_abr.c b/ospfd/ospf_abr.c index a8dfcbb36b..eb3323997f 100644 --- a/ospfd/ospf_abr.c +++ b/ospfd/ospf_abr.c @@ -708,8 +708,7 @@ void ospf_abr_announce_network_to_area(struct prefix_ipv4 *p, uint32_t cost, else full_cost = cost; - old = ospf_lsa_lookup_by_prefix(area->lsdb, OSPF_SUMMARY_LSA, - (struct prefix_ipv4 *)p, + old = ospf_lsa_lookup_by_prefix(area->lsdb, OSPF_SUMMARY_LSA, p, area->ospf->router_id); if (old) { if (IS_DEBUG_OSPF_EVENT) @@ -761,8 +760,7 @@ void ospf_abr_announce_network_to_area(struct prefix_ipv4 *p, uint32_t cost, zlog_debug( "ospf_abr_announce_network_to_area(): " "creating new summary"); - lsa = ospf_summary_lsa_originate((struct prefix_ipv4 *)p, - full_cost, area); + lsa = ospf_summary_lsa_originate(p, full_cost, area); /* This will flood through area. */ if (!lsa) { diff --git a/ospfd/ospf_dump.c b/ospfd/ospf_dump.c index a712ecde95..f0740349a0 100644 --- a/ospfd/ospf_dump.c +++ b/ospfd/ospf_dump.c @@ -1640,9 +1640,13 @@ DEFUN_NOSH (show_debugging_ospf_instance, return show_debugging_ospf_common(vty, ospf); } +static int config_write_debug(struct vty *vty); /* Debug node. */ static struct cmd_node debug_node = { - DEBUG_NODE, "", 1 /* VTYSH */ + .name = "debug", + .node = DEBUG_NODE, + .prompt = "", + .config_write = config_write_debug, }; static int config_write_debug(struct vty *vty) @@ -1783,7 +1787,7 @@ static int config_write_debug(struct vty *vty) /* Initialize debug commands. */ void ospf_debug_init(void) { - install_node(&debug_node, config_write_debug); + install_node(&debug_node); install_element(ENABLE_NODE, &show_debugging_ospf_cmd); install_element(ENABLE_NODE, &debug_ospf_ism_cmd); diff --git a/ospfd/ospf_ext.c b/ospfd/ospf_ext.c index df64fca883..47883d5f39 100644 --- a/ospfd/ospf_ext.c +++ b/ospfd/ospf_ext.c @@ -1684,7 +1684,7 @@ static uint16_t show_vty_link_info(struct vty *vty, struct tlv_header *ext) /* Extended Link TLVs */ static void ospf_ext_link_show_info(struct vty *vty, struct ospf_lsa *lsa) { - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; struct tlv_header *tlvh; uint16_t length = 0, sum = 0; @@ -1758,7 +1758,7 @@ static uint16_t show_vty_pref_info(struct vty *vty, struct tlv_header *ext) /* Extended Prefix TLVs */ static void ospf_ext_pref_show_info(struct vty *vty, struct ospf_lsa *lsa) { - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; struct tlv_header *tlvh; uint16_t length = 0, sum = 0; diff --git a/ospfd/ospf_lsa.c b/ospfd/ospf_lsa.c index d50f390e30..8cf2fad92e 100644 --- a/ospfd/ospf_lsa.c +++ b/ospfd/ospf_lsa.c @@ -33,6 +33,7 @@ #include "hash.h" #include "sockunion.h" /* for inet_aton() */ #include "checksum.h" +#include "network.h" #include "ospfd/ospfd.h" #include "ospfd/ospf_interface.h" @@ -123,7 +124,7 @@ int get_age(struct ospf_lsa *lsa) one-based. */ uint16_t ospf_lsa_checksum(struct lsa_header *lsa) { - uint8_t *buffer = (uint8_t *)&lsa->options; + uint8_t *buffer = &lsa->options; int options_offset = buffer - (uint8_t *)&lsa->ls_age; /* should be 2 */ /* Skip the AGE field */ @@ -138,7 +139,7 @@ uint16_t ospf_lsa_checksum(struct lsa_header *lsa) int ospf_lsa_checksum_valid(struct lsa_header *lsa) { - uint8_t *buffer = (uint8_t *)&lsa->options; + uint8_t *buffer = &lsa->options; int options_offset = buffer - (uint8_t *)&lsa->ls_age; /* should be 2 */ /* Skip the AGE field */ @@ -2845,8 +2846,7 @@ void ospf_lsa_maxage_delete(struct ospf *ospf, struct ospf_lsa *lsa) lsa_prefix.prefixlen = sizeof(lsa_prefix.u.ptr) * CHAR_BIT; lsa_prefix.u.ptr = (uintptr_t)lsa; - if ((rn = route_node_lookup(ospf->maxage_lsa, - (struct prefix *)&lsa_prefix))) { + if ((rn = route_node_lookup(ospf->maxage_lsa, &lsa_prefix))) { if (rn->info == lsa) { UNSET_FLAG(lsa->flags, OSPF_LSA_IN_MAXAGE); ospf_lsa_unlock(&lsa); /* maxage_lsa */ @@ -2888,7 +2888,7 @@ void ospf_lsa_maxage(struct ospf *ospf, struct ospf_lsa *lsa) lsa_prefix.prefixlen = sizeof(lsa_prefix.u.ptr) * CHAR_BIT; lsa_prefix.u.ptr = (uintptr_t)lsa; - rn = route_node_get(ospf->maxage_lsa, (struct prefix *)&lsa_prefix); + rn = route_node_get(ospf->maxage_lsa, &lsa_prefix); if (rn->info != NULL) { if (IS_DEBUG_OSPF(lsa, LSA_FLOODING)) zlog_debug( @@ -3524,7 +3524,8 @@ void ospf_refresher_register_lsa(struct ospf *ospf, struct ospf_lsa *lsa) * 1680s * and 1740s. */ - delay = (random() % (max_delay - min_delay)) + min_delay; + delay = (frr_weak_random() % (max_delay - min_delay)) + + min_delay; current_index = ospf->lsa_refresh_queue.index + (monotime(NULL) - ospf->lsa_refresher_started) diff --git a/ospfd/ospf_main.c b/ospfd/ospf_main.c index 4d6ebb40eb..6a3ba9902d 100644 --- a/ospfd/ospf_main.c +++ b/ospfd/ospf_main.c @@ -128,6 +128,7 @@ struct quagga_signal_t ospf_signals[] = { static const struct frr_yang_module_info *const ospfd_yang_modules[] = { &frr_interface_info, &frr_route_map_info, + &frr_vrf_info, }; FRR_DAEMON_INFO(ospfd, OSPF, .vty_port = OSPF_VTY_PORT, diff --git a/ospfd/ospf_nsm.c b/ospfd/ospf_nsm.c index 9cd83c245c..47688babbf 100644 --- a/ospfd/ospf_nsm.c +++ b/ospfd/ospf_nsm.c @@ -33,6 +33,7 @@ #include "table.h" #include "log.h" #include "command.h" +#include "network.h" #include "ospfd/ospfd.h" #include "ospfd/ospf_interface.h" @@ -723,7 +724,7 @@ static void nsm_change_state(struct ospf_neighbor *nbr, int state) /* Start DD exchange protocol */ if (state == NSM_ExStart) { if (nbr->dd_seqnum == 0) - nbr->dd_seqnum = (uint32_t)random(); + nbr->dd_seqnum = (uint32_t)frr_weak_random(); else nbr->dd_seqnum++; diff --git a/ospfd/ospf_opaque.c b/ospfd/ospf_opaque.c index b042a06372..35fa5da74b 100644 --- a/ospfd/ospf_opaque.c +++ b/ospfd/ospf_opaque.c @@ -1161,7 +1161,7 @@ void ospf_opaque_config_write_debug(struct vty *vty) void show_opaque_info_detail(struct vty *vty, struct ospf_lsa *lsa) { - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; uint32_t lsid = ntohl(lsah->id.s_addr); uint8_t opaque_type = GET_OPAQUE_TYPE(lsid); uint32_t opaque_id = GET_OPAQUE_ID(lsid); diff --git a/ospfd/ospf_packet.c b/ospfd/ospf_packet.c index aa50aeacbc..a39d19cc5a 100644 --- a/ospfd/ospf_packet.c +++ b/ospfd/ospf_packet.c @@ -2038,10 +2038,10 @@ static void ospf_ls_upd(struct ospf *ospf, struct ip *iph, SET_FLAG(lsa->flags, OSPF_LSA_SELF); - ospf_opaque_self_originated_lsa_received(nbr, - lsa); ospf_ls_ack_send(nbr, lsa); + ospf_opaque_self_originated_lsa_received(nbr, + lsa); continue; } } @@ -2604,7 +2604,7 @@ static unsigned ospf_router_lsa_links_examin(struct router_lsa_link *link, { unsigned counted_links = 0, thislinklen; - while (linkbytes) { + while (linkbytes >= OSPF_ROUTER_LSA_LINK_SIZE) { thislinklen = OSPF_ROUTER_LSA_LINK_SIZE + 4 * link->m[0].tos_count; if (thislinklen > linkbytes) { @@ -2642,26 +2642,32 @@ static unsigned ospf_lsa_examin(struct lsa_header *lsah, const uint16_t lsalen, return MSG_NG; } switch (lsah->type) { - case OSPF_ROUTER_LSA: - /* RFC2328 A.4.2, LSA header + 4 bytes followed by N>=1 - * (12+)-byte link blocks */ - if (headeronly) { - ret = (lsalen - OSPF_LSA_HEADER_SIZE - - OSPF_ROUTER_LSA_MIN_SIZE) - % 4 - ? MSG_NG - : MSG_OK; - break; - } + case OSPF_ROUTER_LSA: { + /* + * RFC2328 A.4.2, LSA header + 4 bytes followed by N>=0 + * (12+)-byte link blocks + */ + size_t linkbytes_len = lsalen - OSPF_LSA_HEADER_SIZE + - OSPF_ROUTER_LSA_MIN_SIZE; + + /* + * LSA link blocks are variable length but always multiples of + * 4; basic sanity check + */ + if (linkbytes_len % 4 != 0) + return MSG_NG; + + if (headeronly) + return MSG_OK; + rlsa = (struct router_lsa *)lsah; + ret = ospf_router_lsa_links_examin( (struct router_lsa_link *)rlsa->link, - lsalen - OSPF_LSA_HEADER_SIZE - 4, /* skip: basic - header, "flags", - 0, "# links" */ - ntohs(rlsa->links) /* 16 bits */ - ); + linkbytes_len, + ntohs(rlsa->links)); break; + } case OSPF_AS_EXTERNAL_LSA: /* RFC2328 A.4.5, LSA header + 4 bytes followed by N>=1 12-bytes long * blocks */ diff --git a/ospfd/ospf_ri.c b/ospfd/ospf_ri.c index fbe513cea0..c3d53ad5ed 100644 --- a/ospfd/ospf_ri.c +++ b/ospfd/ospf_ri.c @@ -1438,7 +1438,7 @@ static uint16_t show_vty_sr_msd(struct vty *vty, struct tlv_header *tlvh) static void ospf_router_info_show_info(struct vty *vty, struct ospf_lsa *lsa) { - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; struct tlv_header *tlvh; uint16_t length = 0, sum = 0; diff --git a/ospfd/ospf_snmp.c b/ospfd/ospf_snmp.c index 550e5ee9ee..63191d5cb5 100644 --- a/ospfd/ospf_snmp.c +++ b/ospfd/ospf_snmp.c @@ -784,7 +784,7 @@ static struct ospf_area *ospfStubAreaLookup(struct variable *v, oid name[], area = ospf_area_lookup_by_area_id(ospf, *addr); - if (area->external_routing == OSPF_AREA_STUB) + if (area && area->external_routing == OSPF_AREA_STUB) return area; else return NULL; diff --git a/ospfd/ospf_sr.c b/ospfd/ospf_sr.c index b5a54a0bc4..7a786ba7ab 100644 --- a/ospfd/ospf_sr.c +++ b/ospfd/ospf_sr.c @@ -1035,7 +1035,7 @@ void ospf_sr_ri_lsa_update(struct ospf_lsa *lsa) { struct sr_node *srn; struct tlv_header *tlvh; - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; struct ri_sr_tlv_sid_label_range *ri_srgb; struct ri_sr_tlv_sr_algorithm *algo; struct sr_srgb srgb; @@ -1156,7 +1156,7 @@ void ospf_sr_ri_lsa_update(struct ospf_lsa *lsa) void ospf_sr_ri_lsa_delete(struct ospf_lsa *lsa) { struct sr_node *srn; - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; if (IS_DEBUG_OSPF_SR) zlog_debug("SR (%s): Remove SR node %s from lsa_id 4.0.0.%u", @@ -1198,7 +1198,7 @@ void ospf_sr_ext_link_lsa_update(struct ospf_lsa *lsa) { struct sr_node *srn; struct tlv_header *tlvh; - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; struct sr_link *srl; uint16_t length, sum; @@ -1308,7 +1308,7 @@ void ospf_sr_ext_prefix_lsa_update(struct ospf_lsa *lsa) { struct sr_node *srn; struct tlv_header *tlvh; - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; struct sr_prefix *srp; uint16_t length, sum; diff --git a/ospfd/ospf_te.c b/ospfd/ospf_te.c index a2084e3214..1009c7577e 100644 --- a/ospfd/ospf_te.c +++ b/ospfd/ospf_te.c @@ -2119,7 +2119,7 @@ static uint16_t ospf_mpls_te_show_link_subtlv(struct vty *vty, static void ospf_mpls_te_show_info(struct vty *vty, struct ospf_lsa *lsa) { - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; struct tlv_header *tlvh, *next; uint16_t sum, total; uint16_t (*subfunc)(struct vty * vty, struct tlv_header * tlvh, diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c index 75f556e39f..a3a02a0f95 100644 --- a/ospfd/ospf_vty.c +++ b/ospfd/ospf_vty.c @@ -54,8 +54,8 @@ #include "ospfd/ospf_bfd.h" FRR_CFG_DEFAULT_BOOL(OSPF_LOG_ADJACENCY_CHANGES, - { .val_long = true, .match_profile = "datacenter", }, - { .val_long = false }, + { .val_bool = true, .match_profile = "datacenter", }, + { .val_bool = false }, ) static const char *const ospf_network_type_str[] = { @@ -10555,14 +10555,21 @@ void ospf_vty_show_init(void) } +static int config_write_interface(struct vty *vty); /* ospfd's interface node. */ -static struct cmd_node interface_node = {INTERFACE_NODE, "%s(config-if)# ", 1}; +static struct cmd_node interface_node = { + .name = "interface", + .node = INTERFACE_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-if)# ", + .config_write = config_write_interface, +}; /* Initialization of OSPF interface. */ static void ospf_vty_if_init(void) { /* Install interface node. */ - install_node(&interface_node, config_write_interface); + install_node(&interface_node); if_cmd_init(); /* "ip ospf authentication" commands. */ @@ -10668,7 +10675,14 @@ static void ospf_vty_zebra_init(void) #endif /* 0 */ } -static struct cmd_node ospf_node = {OSPF_NODE, "%s(config-router)# ", 1}; +static int ospf_config_write(struct vty *vty); +static struct cmd_node ospf_node = { + .name = "ospf", + .node = OSPF_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", + .config_write = ospf_config_write, +}; static void ospf_interface_clear(struct interface *ifp) { @@ -10741,7 +10755,7 @@ void ospf_vty_clear_init(void) void ospf_vty_init(void) { /* Install ospf top node. */ - install_node(&ospf_node, ospf_config_write); + install_node(&ospf_node); /* "router ospf" commands. */ install_element(CONFIG_NODE, &router_ospf_cmd); diff --git a/ospfd/ospf_zebra.c b/ospfd/ospf_zebra.c index 385a7ece7b..c7e6bd9cbf 100644 --- a/ospfd/ospf_zebra.c +++ b/ospfd/ospf_zebra.c @@ -1023,8 +1023,8 @@ void ospf_distribute_list_update(struct ospf *ospf, int type, /* Set timer. */ ospf->t_distribute_update = NULL; - thread_add_timer_msec(master, ospf_distribute_list_update_timer, - (void **)args, ospf->min_ls_interval, + thread_add_timer_msec(master, ospf_distribute_list_update_timer, args, + ospf->min_ls_interval, &ospf->t_distribute_update); } diff --git a/ospfd/ospfd.c b/ospfd/ospfd.c index f3fe9e17b2..e9f622d217 100644 --- a/ospfd/ospfd.c +++ b/ospfd/ospfd.c @@ -1787,7 +1787,7 @@ static void ospf_nbr_nbma_add(struct ospf_nbr_nbma *nbr_nbma, p.prefixlen = IPV4_MAX_BITLEN; p.u.prefix4 = nbr_nbma->addr; - rn = route_node_get(oi->nbrs, (struct prefix *)&p); + rn = route_node_get(oi->nbrs, &p); if (rn->info) { nbr = rn->info; nbr->nbr_nbma = nbr_nbma; diff --git a/pbrd/pbr_map.c b/pbrd/pbr_map.c index e45e629649..7928b8e2e7 100644 --- a/pbrd/pbr_map.c +++ b/pbrd/pbr_map.c @@ -145,7 +145,7 @@ static bool pbr_map_interface_is_valid(const struct pbr_map_interface *pmi) } static void pbr_map_pbrms_update_common(struct pbr_map_sequence *pbrms, - bool install) + bool install, bool changed) { struct pbr_map *pbrm; struct listnode *node; @@ -161,19 +161,19 @@ static void pbr_map_pbrms_update_common(struct pbr_map_sequence *pbrms, if (install && !pbr_map_interface_is_valid(pmi)) continue; - pbr_send_pbr_map(pbrms, pmi, install); + pbr_send_pbr_map(pbrms, pmi, install, changed); } } } -static void pbr_map_pbrms_install(struct pbr_map_sequence *pbrms) +static void pbr_map_pbrms_install(struct pbr_map_sequence *pbrms, bool changed) { - pbr_map_pbrms_update_common(pbrms, true); + pbr_map_pbrms_update_common(pbrms, true, changed); } static void pbr_map_pbrms_uninstall(struct pbr_map_sequence *pbrms) { - pbr_map_pbrms_update_common(pbrms, false); + pbr_map_pbrms_update_common(pbrms, false, false); } static const char *const pbr_map_reason_str[] = { @@ -292,7 +292,7 @@ void pbr_map_policy_interface_update(const struct interface *ifp, bool state_up) for (ALL_LIST_ELEMENTS_RO(pbrm->seqnumbers, node, pbrms)) for (ALL_LIST_ELEMENTS_RO(pbrm->incoming, inode, pmi)) if (pmi->ifp == ifp && pbr_map_interface_is_valid(pmi)) - pbr_send_pbr_map(pbrms, pmi, state_up); + pbr_send_pbr_map(pbrms, pmi, state_up, false); } static void pbrms_vrf_update(struct pbr_map_sequence *pbrms, @@ -306,7 +306,7 @@ static void pbrms_vrf_update(struct pbr_map_sequence *pbrms, DEBUGD(&pbr_dbg_map, "\tSeq %u uses vrf %s (%u), updating map", pbrms->seqno, vrf_name, pbr_vrf_id(pbr_vrf)); - pbr_map_check(pbrms); + pbr_map_check(pbrms, false); } } @@ -360,7 +360,7 @@ extern void pbr_map_delete(struct pbr_map_sequence *pbrms) pbrm = pbrms->parent; for (ALL_LIST_ELEMENTS_RO(pbrm->incoming, inode, pmi)) - pbr_send_pbr_map(pbrms, pmi, false); + pbr_send_pbr_map(pbrms, pmi, false, false); if (pbrms->nhg) pbr_nht_delete_individual_nexthop(pbrms); @@ -384,7 +384,7 @@ static void pbr_map_delete_common(struct pbr_map_sequence *pbrms) pbrm->valid = false; pbrms->nhs_installed = false; pbrms->reason |= PBR_MAP_INVALID_NO_NEXTHOPS; - pbrms->nhgrp_name = NULL; + XFREE(MTYPE_TMP, pbrms->nhgrp_name); } void pbr_map_delete_nexthops(struct pbr_map_sequence *pbrms) @@ -619,7 +619,7 @@ void pbr_map_schedule_policy_from_nhg(const char *nh_group) && (strcmp(nh_group, pbrms->nhgrp_name) == 0)) { pbrms->nhs_installed = true; - pbr_map_check(pbrms); + pbr_map_check(pbrms, false); } if (pbrms->nhg @@ -627,7 +627,7 @@ void pbr_map_schedule_policy_from_nhg(const char *nh_group) == 0)) { pbrms->nhs_installed = true; - pbr_map_check(pbrms); + pbr_map_check(pbrms, false); } } } @@ -656,7 +656,8 @@ void pbr_map_policy_install(const char *name) pbrms->seqno); for (ALL_LIST_ELEMENTS_RO(pbrm->incoming, inode, pmi)) if (pbr_map_interface_is_valid(pmi)) - pbr_send_pbr_map(pbrms, pmi, true); + pbr_send_pbr_map(pbrms, pmi, true, + false); } } } @@ -668,7 +669,7 @@ void pbr_map_policy_delete(struct pbr_map *pbrm, struct pbr_map_interface *pmi) for (ALL_LIST_ELEMENTS_RO(pbrm->seqnumbers, node, pbrms)) - pbr_send_pbr_map(pbrms, pmi, false); + pbr_send_pbr_map(pbrms, pmi, false, false); pmi->delete = true; } @@ -710,13 +711,13 @@ void pbr_map_check_nh_group_change(const char *nh_group) pbrm->incoming, inode, pmi)) pbr_send_pbr_map(pbrms, pmi, - false); + false, false); } } } } -void pbr_map_check(struct pbr_map_sequence *pbrms) +void pbr_map_check(struct pbr_map_sequence *pbrms, bool changed) { struct pbr_map *pbrm; bool install; @@ -741,7 +742,7 @@ void pbr_map_check(struct pbr_map_sequence *pbrms) } if (install) - pbr_map_pbrms_install(pbrms); + pbr_map_pbrms_install(pbrms, changed); else pbr_map_pbrms_uninstall(pbrms); } @@ -755,7 +756,7 @@ void pbr_map_install(struct pbr_map *pbrm) return; for (ALL_LIST_ELEMENTS_RO(pbrm->seqnumbers, node, pbrms)) - pbr_map_pbrms_install(pbrms); + pbr_map_pbrms_install(pbrms, false); } void pbr_map_init(void) diff --git a/pbrd/pbr_map.h b/pbrd/pbr_map.h index 8bd22cbf2a..41f1703954 100644 --- a/pbrd/pbr_map.h +++ b/pbrd/pbr_map.h @@ -182,7 +182,15 @@ extern void pbr_map_init(void); extern bool pbr_map_check_valid(const char *name); -extern void pbr_map_check(struct pbr_map_sequence *pbrms); +/** + * Re-check the pbr map for validity. + * + * Install if valid, remove if not. + * + * If changed is set, the config on the on the map has changed somewhere + * and the rules need to be replaced if valid. + */ +extern void pbr_map_check(struct pbr_map_sequence *pbrms, bool changed); extern void pbr_map_check_nh_group_change(const char *nh_group); extern void pbr_map_reason_string(unsigned int reason, char *buf, int size); diff --git a/pbrd/pbr_nht.c b/pbrd/pbr_nht.c index ecd375333c..2f3591ac8d 100644 --- a/pbrd/pbr_nht.c +++ b/pbrd/pbr_nht.c @@ -510,12 +510,26 @@ char *pbr_nht_nexthop_make_name(char *name, size_t l, return buffer; } -void pbr_nht_add_individual_nexthop(struct pbr_map_sequence *pbrms) +void pbr_nht_add_individual_nexthop(struct pbr_map_sequence *pbrms, + const struct nexthop *nhop) { struct pbr_nexthop_group_cache *pnhgc; struct pbr_nexthop_group_cache find; struct pbr_nexthop_cache *pnhc; struct pbr_nexthop_cache lookup; + struct nexthop *nh; + char buf[PBR_NHC_NAMELEN]; + + pbrms->nhg = nexthop_group_new(); + pbrms->internal_nhg_name = XSTRDUP( + MTYPE_TMP, + pbr_nht_nexthop_make_name(pbrms->parent->name, PBR_NHC_NAMELEN, + pbrms->seqno, buf)); + + nh = nexthop_new(); + memcpy(nh, nhop, sizeof(*nh)); + + nexthop_group_add_sorted(pbrms->nhg, nh); memset(&find, 0, sizeof(find)); pbr_nht_nexthop_make_name(pbrms->parent->name, PBR_NHC_NAMELEN, @@ -539,7 +553,7 @@ void pbr_nht_add_individual_nexthop(struct pbr_map_sequence *pbrms) pbr_nht_install_nexthop_group(pnhgc, *pbrms->nhg); } -void pbr_nht_delete_individual_nexthop(struct pbr_map_sequence *pbrms) +static void pbr_nht_release_individual_nexthop(struct pbr_map_sequence *pbrms) { struct pbr_nexthop_group_cache *pnhgc; struct pbr_nexthop_group_cache find; @@ -548,8 +562,6 @@ void pbr_nht_delete_individual_nexthop(struct pbr_map_sequence *pbrms) struct nexthop *nh; enum nexthop_types_t nh_type = 0; - pbr_map_delete_nexthops(pbrms); - memset(&find, 0, sizeof(find)); snprintf(find.name, sizeof(find.name), "%s", pbrms->internal_nhg_name); pnhgc = hash_lookup(pbr_nhg_hash, &find); @@ -564,11 +576,19 @@ void pbr_nht_delete_individual_nexthop(struct pbr_map_sequence *pbrms) pbr_nht_uninstall_nexthop_group(pnhgc, *pbrms->nhg, nh_type); hash_release(pbr_nhg_hash, pnhgc); + pbr_nhgc_delete(pnhgc); nexthop_group_delete(&pbrms->nhg); XFREE(MTYPE_TMP, pbrms->internal_nhg_name); } +void pbr_nht_delete_individual_nexthop(struct pbr_map_sequence *pbrms) +{ + pbr_map_delete_nexthops(pbrms); + + pbr_nht_release_individual_nexthop(pbrms); +} + struct pbr_nexthop_group_cache *pbr_nht_add_group(const char *name) { struct nexthop *nhop; diff --git a/pbrd/pbr_nht.h b/pbrd/pbr_nht.h index 4ef41cede7..2533942547 100644 --- a/pbrd/pbr_nht.h +++ b/pbrd/pbr_nht.h @@ -88,7 +88,8 @@ extern struct pbr_nexthop_group_cache *pbr_nht_add_group(const char *name); extern void pbr_nht_change_group(const char *name); extern void pbr_nht_delete_group(const char *name); -extern void pbr_nht_add_individual_nexthop(struct pbr_map_sequence *pbrms); +extern void pbr_nht_add_individual_nexthop(struct pbr_map_sequence *pbrms, + const struct nexthop *nhop); extern void pbr_nht_delete_individual_nexthop(struct pbr_map_sequence *pbrms); /* * Given the tableid of the installed default diff --git a/pbrd/pbr_vty.c b/pbrd/pbr_vty.c index dfc8bec1bc..a52c2d1e30 100644 --- a/pbrd/pbr_vty.c +++ b/pbrd/pbr_vty.c @@ -142,18 +142,14 @@ DEFPY(pbr_map_match_src, pbr_map_match_src_cmd, if (pbrms->src) { if (prefix_same(pbrms->src, prefix)) return CMD_SUCCESS; + } else + pbrms->src = prefix_new(); - vty_out(vty, - "A `match src-ip XX` command already exists, please remove that first\n"); - return CMD_WARNING_CONFIG_FAILED; - } - - pbrms->src = prefix_new(); prefix_copy(pbrms->src, prefix); } else prefix_free(&pbrms->src); - pbr_map_check(pbrms); + pbr_map_check(pbrms, true); return CMD_SUCCESS; } @@ -174,18 +170,14 @@ DEFPY(pbr_map_match_dst, pbr_map_match_dst_cmd, if (pbrms->dst) { if (prefix_same(pbrms->dst, prefix)) return CMD_SUCCESS; + } else + pbrms->dst = prefix_new(); - vty_out(vty, - "A `match dst-ip XX` command already exists, please remove that first\n"); - return CMD_WARNING_CONFIG_FAILED; - } - - pbrms->dst = prefix_new(); prefix_copy(pbrms->dst, prefix); } else prefix_free(&pbrms->dst); - pbr_map_check(pbrms); + pbr_map_check(pbrms, true); return CMD_SUCCESS; } @@ -205,30 +197,52 @@ DEFPY(pbr_map_match_mark, pbr_map_match_mark_cmd, #endif if (!no) { - if (pbrms->mark) { + if (pbrms->mark) if (pbrms->mark == (uint32_t)mark) return CMD_SUCCESS; - vty_out(vty, - "A `match mark XX` command already exists, please remove that first\n"); - return CMD_WARNING_CONFIG_FAILED; - } - pbrms->mark = (uint32_t)mark; } else pbrms->mark = 0; - pbr_map_check(pbrms); + pbr_map_check(pbrms, true); return CMD_SUCCESS; } -#define SET_VRF_EXISTS_STR \ - "A `set vrf XX` command already exists, please remove that first\n" +static void pbrms_clear_set_vrf_config(struct pbr_map_sequence *pbrms) +{ + if (pbrms->vrf_lookup || pbrms->vrf_unchanged) { + pbr_map_delete_vrf(pbrms); + pbrms->vrf_name[0] = '\0'; + pbrms->vrf_lookup = false; + pbrms->vrf_unchanged = false; + } +} + +static void pbrms_clear_set_nhg_config(struct pbr_map_sequence *pbrms) +{ + if (pbrms->nhgrp_name) + pbr_map_delete_nexthops(pbrms); +} + +static void pbrms_clear_set_nexthop_config(struct pbr_map_sequence *pbrms) +{ + if (pbrms->nhg) + pbr_nht_delete_individual_nexthop(pbrms); +} + +static void pbrms_clear_set_config(struct pbr_map_sequence *pbrms) +{ + pbrms_clear_set_vrf_config(pbrms); + pbrms_clear_set_nhg_config(pbrms); + pbrms_clear_set_nexthop_config(pbrms); + + pbrms->nhs_installed = false; +} DEFPY(pbr_map_nexthop_group, pbr_map_nexthop_group_cmd, - "[no] set nexthop-group NHGNAME$name", - NO_STR + "set nexthop-group NHGNAME$name", "Set for the PBR-MAP\n" "nexthop-group to use\n" "The name of the nexthop-group\n") @@ -236,17 +250,6 @@ DEFPY(pbr_map_nexthop_group, pbr_map_nexthop_group_cmd, struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence); struct nexthop_group_cmd *nhgc; - if (pbrms->nhg) { - vty_out(vty, - "A `set nexthop XX` command already exists, please remove that first\n"); - return CMD_WARNING_CONFIG_FAILED; - } - - if (pbrms->vrf_lookup || pbrms->vrf_unchanged) { - vty_out(vty, SET_VRF_EXISTS_STR); - return CMD_WARNING_CONFIG_FAILED; - } - nhgc = nhgc_find(name); if (!nhgc) { vty_out(vty, "Specified nexthop-group %s does not exist\n", @@ -255,40 +258,39 @@ DEFPY(pbr_map_nexthop_group, pbr_map_nexthop_group_cmd, "PBR-MAP will not be applied until it is created\n"); } - if (no) { - if (pbrms->nhgrp_name && strcmp(name, pbrms->nhgrp_name) == 0) - pbr_map_delete_nexthops(pbrms); - else { - vty_out(vty, - "Nexthop Group specified: %s does not exist to remove\n", - name); - return CMD_WARNING_CONFIG_FAILED; - } - } else { - if (pbrms->nhgrp_name) { - if (strcmp(name, pbrms->nhgrp_name) != 0) { - vty_out(vty, - "Please delete current nexthop group before modifying current one\n"); - return CMD_WARNING_CONFIG_FAILED; - } + if (pbrms->nhgrp_name && strcmp(name, pbrms->nhgrp_name) == 0) + return CMD_SUCCESS; - return CMD_SUCCESS; - } - pbrms->nhgrp_name = XSTRDUP(MTYPE_TMP, name); - pbr_map_check(pbrms); - } + /* This is new/replacement config */ + pbrms_clear_set_config(pbrms); + + pbrms->nhgrp_name = XSTRDUP(MTYPE_TMP, name); + pbr_map_check(pbrms, true); + + return CMD_SUCCESS; +} + +DEFPY(no_pbr_map_nexthop_group, no_pbr_map_nexthop_group_cmd, + "no set nexthop-group [NHGNAME$name]", + NO_STR + "Set for the PBR-MAP\n" + "nexthop-group to use\n" + "The name of the nexthop-group\n") +{ + struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence); + + pbrms_clear_set_config(pbrms); return CMD_SUCCESS; } DEFPY(pbr_map_nexthop, pbr_map_nexthop_cmd, - "[no] set nexthop\ + "set nexthop\ <\ <A.B.C.D|X:X::X:X>$addr [INTERFACE$intf]\ |INTERFACE$intf\ >\ [nexthop-vrf NAME$vrf_name]", - NO_STR "Set for the PBR-MAP\n" "Specify one of the nexthops in this map\n" "v4 Address\n" @@ -301,18 +303,7 @@ DEFPY(pbr_map_nexthop, pbr_map_nexthop_cmd, struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence); struct vrf *vrf; struct nexthop nhop; - struct nexthop *nh; - - if (pbrms->nhgrp_name) { - vty_out(vty, - "Please unconfigure the nexthop group before adding an individual nexthop\n"); - return CMD_WARNING_CONFIG_FAILED; - } - - if (pbrms->vrf_lookup || pbrms->vrf_unchanged) { - vty_out(vty, SET_VRF_EXISTS_STR); - return CMD_WARNING_CONFIG_FAILED; - } + struct nexthop *nh = NULL; if (vrf_name) vrf = vrf_lookup_by_name(vrf_name); @@ -362,45 +353,18 @@ DEFPY(pbr_map_nexthop, pbr_map_nexthop_cmd, if (pbrms->nhg) nh = nexthop_exists(pbrms->nhg, &nhop); - else { - char buf[PBR_NHC_NAMELEN]; - - if (no) { - vty_out(vty, "No nexthops to delete\n"); - return CMD_WARNING_CONFIG_FAILED; - } - - pbrms->nhg = nexthop_group_new(); - pbrms->internal_nhg_name = - XSTRDUP(MTYPE_TMP, - pbr_nht_nexthop_make_name(pbrms->parent->name, - PBR_NHC_NAMELEN, - pbrms->seqno, - buf)); - nh = NULL; - } - - if (no) { - if (nh) - pbr_nht_delete_individual_nexthop(pbrms); - } else if (!nh) { - if (pbrms->nhg->nexthop) { - vty_out(vty, - "If you would like more than one nexthop please use nexthop-groups\n"); - return CMD_WARNING_CONFIG_FAILED; - } + if (nh) /* Same config re-entered */ + goto done; - /* must be adding new nexthop since !no and !nexthop_exists */ - nh = nexthop_new(); + /* This is new/replacement config */ + pbrms_clear_set_config(pbrms); - memcpy(nh, &nhop, sizeof(nhop)); - _nexthop_add(&pbrms->nhg->nexthop, nh); + pbr_nht_add_individual_nexthop(pbrms, &nhop); - pbr_nht_add_individual_nexthop(pbrms); - pbr_map_check(pbrms); - } + pbr_map_check(pbrms, true); +done: if (nhop.type == NEXTHOP_TYPE_IFINDEX || (nhop.type == NEXTHOP_TYPE_IPV6_IFINDEX && IN6_IS_ADDR_LINKLOCAL(&nhop.gate.ipv6))) { @@ -414,84 +378,80 @@ DEFPY(pbr_map_nexthop, pbr_map_nexthop_cmd, return CMD_SUCCESS; } -DEFPY(pbr_map_vrf, pbr_map_vrf_cmd, - "[no] set vrf <NAME$vrf_name|unchanged>", +DEFPY(no_pbr_map_nexthop, no_pbr_map_nexthop_cmd, + "no set nexthop\ + [<\ + <A.B.C.D|X:X::X:X>$addr [INTERFACE$intf]\ + |INTERFACE$intf\ + >\ + [nexthop-vrf NAME$vrf_name]]", NO_STR "Set for the PBR-MAP\n" - "Specify the VRF for this map\n" - "The VRF Name\n" - "Use the interface's VRF for lookup\n") + "Specify one of the nexthops in this map\n" + "v4 Address\n" + "v6 Address\n" + "Interface to use\n" + "Interface to use\n" + "If the nexthop is in a different vrf tell us\n" + "The nexthop-vrf Name\n") { struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence); - if (no) { - pbr_map_delete_vrf(pbrms); + pbrms_clear_set_config(pbrms); - /* Reset all data */ - pbrms->nhs_installed = false; - pbrms->vrf_name[0] = '\0'; - pbrms->vrf_lookup = false; - pbrms->vrf_unchanged = false; - - return CMD_SUCCESS; - } + return CMD_SUCCESS; +} - if (pbrms->nhgrp_name || pbrms->nhg) { - vty_out(vty, - "A `set nexthop/nexthop-group XX` command already exits, please remove that first\n"); - return CMD_WARNING_CONFIG_FAILED; - } +DEFPY(pbr_map_vrf, pbr_map_vrf_cmd, + "set vrf <NAME$vrf_name|unchanged>", + "Set for the PBR-MAP\n" + "Specify the VRF for this map\n" + "The VRF Name\n" + "Use the interface's VRF for lookup\n") +{ + struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence); /* - * Determine if a set vrf * command already exists. - * - * If its equivalent, just return success. - * - * Else, return failure, we don't allow atomic swaps yet. + * If an equivalent set vrf * exists, just return success. */ - if (vrf_name && pbrms->vrf_lookup) { - /* New vrf specified and one already exists */ - - /* Is this vrf different from one already configured? */ - if (strncmp(pbrms->vrf_name, vrf_name, sizeof(pbrms->vrf_name)) - != 0) - goto vrf_exists; - + if (vrf_name && pbrms->vrf_lookup + && strncmp(pbrms->vrf_name, vrf_name, sizeof(pbrms->vrf_name)) == 0) return CMD_SUCCESS; - - } else if (!vrf_name && pbrms->vrf_unchanged) { - /* Unchanged specified and unchanged already exists */ + else if (!vrf_name && pbrms->vrf_unchanged) /* Unchanged already set */ return CMD_SUCCESS; - } else if (vrf_name && pbrms->vrf_unchanged) { - /* New vrf specified and unchanged is already set */ - goto vrf_exists; - - } else if (!vrf_name && pbrms->vrf_lookup) { - /* Unchanged specified and vrf to lookup already exists */ - goto vrf_exists; + if (vrf_name && !pbr_vrf_lookup_by_name(vrf_name)) { + vty_out(vty, "Specified: %s is non-existent\n", vrf_name); + return CMD_WARNING_CONFIG_FAILED; } - /* Create new lookup VRF or Unchanged */ - if (vrf_name) { - if (!pbr_vrf_lookup_by_name(vrf_name)) { - vty_out(vty, "Specified: %s is non-existent\n", - vrf_name); - return CMD_WARNING_CONFIG_FAILED; - } + /* This is new/replacement config */ + pbrms_clear_set_config(pbrms); + if (vrf_name) { pbrms->vrf_lookup = true; strlcpy(pbrms->vrf_name, vrf_name, sizeof(pbrms->vrf_name)); } else pbrms->vrf_unchanged = true; - pbr_map_check(pbrms); + pbr_map_check(pbrms, true); return CMD_SUCCESS; +} -vrf_exists: - vty_out(vty, SET_VRF_EXISTS_STR); - return CMD_WARNING_CONFIG_FAILED; +DEFPY(no_pbr_map_vrf, no_pbr_map_vrf_cmd, + "no set vrf [<NAME$vrf_name|unchanged>]", + NO_STR + "Set for the PBR-MAP\n" + "Specify the VRF for this map\n" + "The VRF Name\n" + "Use the interface's VRF for lookup\n") +{ + struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence); + + pbrms_clear_set_config(pbrms); + + return CMD_SUCCESS; } DEFPY (pbr_policy, @@ -717,7 +677,12 @@ DEFPY (show_pbr_interface, /* PBR debugging CLI ------------------------------------------------------- */ -static struct cmd_node debug_node = {DEBUG_NODE, "", 1}; +static struct cmd_node debug_node = { + .name = "debug", + .node = DEBUG_NODE, + .prompt = "", + .config_write = pbr_debug_config_write, +}; DEFPY(debug_pbr, debug_pbr_cmd, @@ -765,8 +730,13 @@ DEFUN_NOSH(show_debugging_pbr, /* ------------------------------------------------------------------------- */ +static int pbr_interface_config_write(struct vty *vty); static struct cmd_node interface_node = { - INTERFACE_NODE, "%s(config-if)# ", 1 /* vtysh ? yes */ + .name = "interface", + .node = INTERFACE_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-if)# ", + .config_write = pbr_interface_config_write, }; static int pbr_interface_config_write(struct vty *vty) @@ -794,8 +764,15 @@ static int pbr_interface_config_write(struct vty *vty) return 1; } +static int pbr_vty_map_config_write(struct vty *vty); /* PBR map node structure. */ -static struct cmd_node pbr_map_node = {PBRMAP_NODE, "%s(config-pbr-map)# ", 1}; +static struct cmd_node pbr_map_node = { + .name = "pbr-map", + .node = PBRMAP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-pbr-map)# ", + .config_write = pbr_vty_map_config_write, +}; static int pbr_vty_map_config_write_sequence(struct vty *vty, struct pbr_map *pbrm, @@ -873,15 +850,13 @@ void pbr_vty_init(void) { cmd_variable_handler_register(pbr_map_name); - install_node(&interface_node, - pbr_interface_config_write); + install_node(&interface_node); if_cmd_init(); - install_node(&pbr_map_node, - pbr_vty_map_config_write); + install_node(&pbr_map_node); /* debug */ - install_node(&debug_node, pbr_debug_config_write); + install_node(&debug_node); install_element(VIEW_NODE, &debug_pbr_cmd); install_element(CONFIG_NODE, &debug_pbr_cmd); install_element(VIEW_NODE, &show_debugging_pbr_cmd); @@ -897,8 +872,11 @@ void pbr_vty_init(void) install_element(PBRMAP_NODE, &pbr_map_match_dst_cmd); install_element(PBRMAP_NODE, &pbr_map_match_mark_cmd); install_element(PBRMAP_NODE, &pbr_map_nexthop_group_cmd); + install_element(PBRMAP_NODE, &no_pbr_map_nexthop_group_cmd); install_element(PBRMAP_NODE, &pbr_map_nexthop_cmd); + install_element(PBRMAP_NODE, &no_pbr_map_nexthop_cmd); install_element(PBRMAP_NODE, &pbr_map_vrf_cmd); + install_element(PBRMAP_NODE, &no_pbr_map_vrf_cmd); install_element(VIEW_NODE, &show_pbr_cmd); install_element(VIEW_NODE, &show_pbr_map_cmd); install_element(VIEW_NODE, &show_pbr_interface_cmd); diff --git a/pbrd/pbr_zebra.c b/pbrd/pbr_zebra.c index c2d93a405e..de2a99e269 100644 --- a/pbrd/pbr_zebra.c +++ b/pbrd/pbr_zebra.c @@ -548,7 +548,7 @@ static void pbr_encode_pbr_map_sequence(struct stream *s, } void pbr_send_pbr_map(struct pbr_map_sequence *pbrms, - struct pbr_map_interface *pmi, bool install) + struct pbr_map_interface *pmi, bool install, bool changed) { struct pbr_map *pbrm = pbrms->parent; struct stream *s; @@ -560,11 +560,13 @@ void pbr_send_pbr_map(struct pbr_map_sequence *pbrms, pbrm->name, install, is_installed); /* - * If we are installed and asked to do so again - * just return. If we are not installed and asked - * and asked to delete just return; + * If we are installed and asked to do so again and the config + * has not changed, just return. + * + * If we are not installed and asked + * to delete just return. */ - if (install && is_installed) + if (install && is_installed && !changed) return; if (!install && !is_installed) @@ -582,9 +584,9 @@ void pbr_send_pbr_map(struct pbr_map_sequence *pbrms, */ stream_putl(s, 1); - DEBUGD(&pbr_dbg_zebra, "%s: \t%s %s %d %s %u", __func__, - install ? "Installing" : "Deleting", pbrm->name, install, - pmi->ifp->name, pmi->delete); + DEBUGD(&pbr_dbg_zebra, "%s: \t%s %s seq %u %d %s %u", __func__, + install ? "Installing" : "Deleting", pbrm->name, pbrms->seqno, + install, pmi->ifp->name, pmi->delete); pbr_encode_pbr_map_sequence(s, pbrms, pmi->ifp); diff --git a/pbrd/pbr_zebra.h b/pbrd/pbr_zebra.h index d5d938021a..cc42e21abe 100644 --- a/pbrd/pbr_zebra.h +++ b/pbrd/pbr_zebra.h @@ -36,7 +36,8 @@ extern void route_delete(struct pbr_nexthop_group_cache *pnhgc, extern void pbr_send_rnh(struct nexthop *nhop, bool reg); extern void pbr_send_pbr_map(struct pbr_map_sequence *pbrms, - struct pbr_map_interface *pmi, bool install); + struct pbr_map_interface *pmi, bool install, + bool changed); extern struct pbr_interface *pbr_if_new(struct interface *ifp); diff --git a/pimd/README b/pimd/README index 3d03979a9a..1db0aad83c 100644 --- a/pimd/README +++ b/pimd/README @@ -33,7 +33,7 @@ HOME SITE qpimd lives at: - https://github.com/freerangerouting/frr + https://github.com/frrouting/frr PLATFORMS @@ -57,7 +57,7 @@ SUPPORT Please post comments, questions, patches, bug reports at the support site: - https://freerangerouting/frr + https://frrouting.org/frr RELATED WORK diff --git a/pimd/pim_bfd.c b/pimd/pim_bfd.c index 84f6733eaf..0df8ea6922 100644 --- a/pimd/pim_bfd.c +++ b/pimd/pim_bfd.c @@ -47,7 +47,7 @@ void pim_bfd_write_config(struct vty *vty, struct interface *ifp) if (!pim_ifp) return; - bfd_info = (struct bfd_info *)pim_ifp->bfd_info; + bfd_info = pim_ifp->bfd_info; if (!bfd_info) return; @@ -92,7 +92,7 @@ void pim_bfd_info_nbr_create(struct pim_interface *pim_ifp, if (!neigh->bfd_info) return; - nbr_bfd_info = (struct bfd_info *)neigh->bfd_info; + nbr_bfd_info = neigh->bfd_info; nbr_bfd_info->detect_mult = pim_ifp->bfd_info->detect_mult; nbr_bfd_info->desired_min_tx = pim_ifp->bfd_info->desired_min_tx; nbr_bfd_info->required_min_rx = pim_ifp->bfd_info->required_min_rx; @@ -118,7 +118,7 @@ static void pim_bfd_reg_dereg_nbr(struct pim_neighbor *nbr, int command) if (!nbr) return; pim_ifp = nbr->interface->info; - bfd_info = (struct bfd_info *)pim_ifp->bfd_info; + bfd_info = pim_ifp->bfd_info; if (!bfd_info) return; if (PIM_DEBUG_PIM_TRACE) { @@ -194,8 +194,8 @@ void pim_bfd_if_param_set(struct interface *ifp, uint32_t min_rx, if (!pim_ifp) return; - bfd_set_param((struct bfd_info **)&(pim_ifp->bfd_info), min_rx, min_tx, - detect_mult, defaults, &command); + bfd_set_param(&(pim_ifp->bfd_info), min_rx, min_tx, detect_mult, + defaults, &command); if (pim_ifp->bfd_info) { if (PIM_DEBUG_PIM_TRACE) diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c index ad47427101..d949c657bd 100644 --- a/pimd/pim_bsm.c +++ b/pimd/pim_bsm.c @@ -1282,6 +1282,13 @@ int pim_bsm_process(struct interface *ifp, struct ip *ip_hdr, uint8_t *buf, bshdr = (struct bsm_hdr *)(buf + PIM_MSG_HEADER_LEN); pim_inet4_dump("<bsr?>", bshdr->bsr_addr.addr, bsr_str, sizeof(bsr_str)); + if (bshdr->hm_len > 32) { + zlog_warn("Bad hashmask length for IPv4; got %" PRIu8 + ", expected value in range 0-32", + bshdr->hm_len); + pim->bsm_dropped++; + return -1; + } pim->global_scope.hashMasklen = bshdr->hm_len; frag_tag = ntohs(bshdr->frag_tag); diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index 871606414e..f6af98598b 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -70,10 +70,19 @@ #endif static struct cmd_node interface_node = { - INTERFACE_NODE, "%s(config-if)# ", 1 /* vtysh ? yes */ + .name = "interface", + .node = INTERFACE_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-if)# ", + .config_write = pim_interface_config_write, }; -static struct cmd_node debug_node = {DEBUG_NODE, "", 1}; +static struct cmd_node debug_node = { + .name = "debug", + .node = DEBUG_NODE, + .prompt = "", + .config_write = pim_debug_config_write, +}; static struct vrf *pim_cmd_lookup_vrf(struct vty *vty, struct cmd_token *argv[], const int argc, int *idx) @@ -3319,12 +3328,9 @@ static void pim_show_statistics(struct pim_instance *pim, struct vty *vty, if (uj) { json = json_object_new_object(); - json_object_int_add(json, "Number of Received BSMs", - pim->bsm_rcvd); - json_object_int_add(json, "Number of Forwared BSMs", - pim->bsm_sent); - json_object_int_add(json, "Number of Dropped BSMs", - pim->bsm_dropped); + json_object_int_add(json, "bsmRx", pim->bsm_rcvd); + json_object_int_add(json, "bsmTx", pim->bsm_sent); + json_object_int_add(json, "bsmDropped", pim->bsm_dropped); } else { vty_out(vty, "BSM Statistics :\n"); vty_out(vty, "----------------\n"); @@ -3366,15 +3372,13 @@ static void pim_show_statistics(struct pim_instance *pim, struct vty *vty, json_row = json_object_new_object(); json_object_string_add(json_row, "If Name", ifp->name); + json_object_int_add(json_row, "bsmDroppedConfig", + pim_ifp->pim_ifstat_bsm_cfg_miss); json_object_int_add( - json_row, - "Number of BSMs dropped due to config miss", - pim_ifp->pim_ifstat_bsm_cfg_miss); - json_object_int_add( - json_row, "Number of unicast BSMs dropped", + json_row, "bsmDroppedUnicast", pim_ifp->pim_ifstat_ucast_bsm_cfg_miss); json_object_int_add(json_row, - "Number of BSMs dropped due to invalid scope zone", + "bsmDroppedInvalidScopeZone", pim_ifp->pim_ifstat_bsm_invalid_sz); json_object_object_add(json, ifp->name, json_row); } @@ -3792,11 +3796,11 @@ static void pim_show_bsr(struct pim_instance *pim, json_object_string_add(json, "bsr", bsr_str); json_object_int_add(json, "priority", pim->global_scope.current_bsr_prio); - json_object_int_add(json, "fragment_tag", + json_object_int_add(json, "fragmentTag", pim->global_scope.bsm_frag_tag); json_object_string_add(json, "state", bsr_state); json_object_string_add(json, "upTime", uptime); - json_object_string_add(json, "last_bsm_seen", last_bsm_seen); + json_object_string_add(json, "lastBsmSeen", last_bsm_seen); } else { @@ -9762,11 +9766,25 @@ DEFUN (no_ip_msdp_mesh_group_source, "mesh group local address\n") { PIM_DECLVAR_CONTEXT(vrf, pim); - if (argc == 7) - return ip_no_msdp_mesh_group_cmd_worker(pim, vty, argv[6]->arg); + + return ip_no_msdp_mesh_group_source_cmd_worker(pim, vty, argv[4]->arg); +} + +DEFUN (no_ip_msdp_mesh_group, + no_ip_msdp_mesh_group_cmd, + "no ip msdp mesh-group [WORD]", + NO_STR + IP_STR + CFG_MSDP_STR + "Delete MSDP mesh-group\n" + "mesh group name") +{ + PIM_DECLVAR_CONTEXT(vrf, pim); + + if (argc == 5) + return ip_no_msdp_mesh_group_cmd_worker(pim, vty, argv[4]->arg); else - return ip_no_msdp_mesh_group_source_cmd_worker(pim, vty, - argv[4]->arg); + return ip_no_msdp_mesh_group_cmd_worker(pim, vty, NULL); } static void print_empty_json_obj(struct vty *vty) @@ -10863,11 +10881,10 @@ DEFUN_HIDDEN (ip_pim_mlag, void pim_cmd_init(void) { - install_node(&interface_node, - pim_interface_config_write); /* INTERFACE_NODE */ + install_node(&interface_node); /* INTERFACE_NODE */ if_cmd_init(); - install_node(&debug_node, pim_debug_config_write); + install_node(&debug_node); install_element(ENABLE_NODE, &pim_test_sg_keepalive_cmd); @@ -11158,6 +11175,8 @@ void pim_cmd_init(void) install_element(VRF_NODE, &ip_msdp_mesh_group_source_cmd); install_element(CONFIG_NODE, &no_ip_msdp_mesh_group_source_cmd); install_element(VRF_NODE, &no_ip_msdp_mesh_group_source_cmd); + install_element(CONFIG_NODE, &no_ip_msdp_mesh_group_cmd); + install_element(VRF_NODE, &no_ip_msdp_mesh_group_cmd); install_element(VIEW_NODE, &show_ip_msdp_peer_detail_cmd); install_element(VIEW_NODE, &show_ip_msdp_peer_detail_vrf_all_cmd); install_element(VIEW_NODE, &show_ip_msdp_sa_detail_cmd); diff --git a/pimd/pim_hello.c b/pimd/pim_hello.c index ecdb3a34a7..e50504ec10 100644 --- a/pimd/pim_hello.c +++ b/pimd/pim_hello.c @@ -484,7 +484,7 @@ int pim_hello_build_tlv(struct interface *ifp, uint8_t *tlv_buf, return -1; } if (can_disable_join_suppression) { - *((uint8_t *)(curr) + 4) |= 0x80; /* enable T bit */ + *(curr + 4) |= 0x80; /* enable T bit */ } curr = tmp; diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c index 07c4172f22..b25b6eaa8c 100644 --- a/pimd/pim_iface.c +++ b/pimd/pim_iface.c @@ -28,6 +28,7 @@ #include "plist.h" #include "hash.h" #include "ferr.h" +#include "network.h" #include "pimd.h" #include "pim_instance.h" @@ -900,15 +901,16 @@ struct in_addr pim_find_primary_addr(struct interface *ifp) * So let's grab the loopbacks v4 address * and use that as the primary address */ - if (!v4_addrs && v6_addrs && !if_is_loopback(ifp)) { + if (!v4_addrs && v6_addrs) { struct interface *lo_ifp; + // DBS - Come back and check here if (ifp->vrf_id == VRF_DEFAULT) lo_ifp = if_lookup_by_name("lo", vrf->vrf_id); else lo_ifp = if_lookup_by_name(vrf->name, vrf->vrf_id); - if (lo_ifp) + if (lo_ifp && (lo_ifp != ifp)) return pim_find_primary_addr(lo_ifp); } @@ -1102,7 +1104,8 @@ int pim_if_t_override_msec(struct interface *ifp) effective_override_interval_msec = pim_if_effective_override_interval_msec(ifp); - t_override_msec = random() % (effective_override_interval_msec + 1); + t_override_msec = + frr_weak_random() % (effective_override_interval_msec + 1); return t_override_msec; } @@ -1180,7 +1183,7 @@ long pim_if_t_suppressed_msec(struct interface *ifp) return 0; /* t_suppressed = t_periodic * rand(1.1, 1.4) */ - ramount = 1100 + (random() % (1400 - 1100 + 1)); + ramount = 1100 + (frr_weak_random() % (1400 - 1100 + 1)); t_suppressed_msec = router->t_periodic * ramount; return t_suppressed_msec; diff --git a/pimd/pim_main.c b/pimd/pim_main.c index 93b561ba0f..5c4c7151a5 100644 --- a/pimd/pim_main.c +++ b/pimd/pim_main.c @@ -75,6 +75,7 @@ struct zebra_privs_t pimd_privs = { static const struct frr_yang_module_info *const pimd_yang_modules[] = { &frr_interface_info, &frr_route_map_info, + &frr_vrf_info, }; FRR_DAEMON_INFO(pimd, PIM, .vty_port = PIMD_VTY_PORT, diff --git a/pimd/pim_mlag.c b/pimd/pim_mlag.c index 304e6ac6bc..78be914cee 100644 --- a/pimd/pim_mlag.c +++ b/pimd/pim_mlag.c @@ -583,7 +583,9 @@ static void pim_mlag_process_mlagd_state_change(struct mlag_status msg) router->mlag_role = msg.my_role; } - strcpy(router->peerlink_rif, msg.peerlink_rif); + strlcpy(router->peerlink_rif, msg.peerlink_rif, + sizeof(router->peerlink_rif)); + /* XXX - handle the case where we may rx the interface name from the * MLAG daemon before we get the interface from zebra. */ @@ -767,7 +769,7 @@ static void pim_mlag_process_mroute_del(struct mlag_mroute_del msg) int pim_zebra_mlag_handle_msg(struct stream *s, int len) { struct mlag_msg mlag_msg; - char buf[ZLOG_FILTER_LENGTH_MAX]; + char buf[80]; int rc = 0; size_t length; diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c index 63d34e859c..52c989e644 100644 --- a/pimd/pim_msdp.c +++ b/pimd/pim_msdp.c @@ -1286,7 +1286,9 @@ enum pim_msdp_err pim_msdp_mg_del(struct pim_instance *pim, struct pim_msdp_mg *mg = pim->msdp.mg; struct pim_msdp_mg_mbr *mbr; - if (!mg || strcmp(mg->mesh_group_name, mesh_group_name)) { + if (!mg + || (mesh_group_name + && strcmp(mg->mesh_group_name, mesh_group_name))) { return PIM_MSDP_ERR_NO_MG; } diff --git a/pimd/pim_nht.c b/pimd/pim_nht.c index 60b7c73d43..68d43c0556 100644 --- a/pimd/pim_nht.c +++ b/pimd/pim_nht.c @@ -788,7 +788,11 @@ int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS) case NEXTHOP_TYPE_IPV6_IFINDEX: ifp1 = if_lookup_by_index(nexthop->ifindex, pim->vrf_id); - nbr = pim_neighbor_find_if(ifp1); + + if (!ifp1) + nbr = NULL; + else + nbr = pim_neighbor_find_if(ifp1); /* Overwrite with Nbr address as NH addr */ if (nbr) nexthop->gate.ipv4 = nbr->source_addr; diff --git a/pimd/pim_pim.c b/pimd/pim_pim.c index f37c140bf2..3976b262e3 100644 --- a/pimd/pim_pim.c +++ b/pimd/pim_pim.c @@ -23,6 +23,7 @@ #include "thread.h" #include "memory.h" #include "if.h" +#include "network.h" #include "pimd.h" #include "pim_pim.h" @@ -878,7 +879,7 @@ int pim_sock_add(struct interface *ifp) old_genid = pim_ifp->pim_generation_id; while (old_genid == pim_ifp->pim_generation_id) - pim_ifp->pim_generation_id = random(); + pim_ifp->pim_generation_id = frr_weak_random(); zlog_info("PIM INTERFACE UP: on interface %s ifindex=%d", ifp->name, ifp->ifindex); diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c index a9f1d9335a..ef5f478226 100644 --- a/pimd/pim_rp.c +++ b/pimd/pim_rp.c @@ -1261,11 +1261,11 @@ void pim_rp_show_information(struct pim_instance *pim, struct vty *vty, bool uj) char buf[48]; if (rp_info->rp_src == RP_SRC_STATIC) - strcpy(source, "Static"); + strlcpy(source, "Static", sizeof(source)); else if (rp_info->rp_src == RP_SRC_BSR) - strcpy(source, "BSR"); + strlcpy(source, "BSR", sizeof(source)); else - strcpy(source, "None"); + strlcpy(source, "None", sizeof(source)); if (uj) { /* * If we have moved on to a new RP then add the diff --git a/pimd/pim_sock.c b/pimd/pim_sock.c index ae5781f0cc..f0a71c96ce 100644 --- a/pimd/pim_sock.c +++ b/pimd/pim_sock.c @@ -375,8 +375,7 @@ int pim_socket_recvfromto(int fd, uint8_t *buf, size_t len, struct in_pktinfo *i = (struct in_pktinfo *)CMSG_DATA(cmsg); if (to) - ((struct sockaddr_in *)to)->sin_addr = - i->ipi_addr; + to->sin_addr = i->ipi_addr; if (tolen) *tolen = sizeof(struct sockaddr_in); if (ifindex) @@ -391,7 +390,7 @@ int pim_socket_recvfromto(int fd, uint8_t *buf, size_t len, && (cmsg->cmsg_type == IP_RECVDSTADDR)) { struct in_addr *i = (struct in_addr *)CMSG_DATA(cmsg); if (to) - ((struct sockaddr_in *)to)->sin_addr = *i; + to->sin_addr = *i; if (tolen) *tolen = sizeof(struct sockaddr_in); diff --git a/pimd/pim_tlv.c b/pimd/pim_tlv.c index 881a3e332a..633bb207bd 100644 --- a/pimd/pim_tlv.c +++ b/pimd/pim_tlv.c @@ -121,20 +121,19 @@ int pim_encode_addr_ucast(uint8_t *buf, struct prefix *p) { switch (p->family) { case AF_INET: - *(uint8_t *)buf = - PIM_MSG_ADDRESS_FAMILY_IPV4; /* notice: AF_INET != - PIM_MSG_ADDRESS_FAMILY_IPV4 - */ + *buf = PIM_MSG_ADDRESS_FAMILY_IPV4; /* notice: AF_INET != + PIM_MSG_ADDRESS_FAMILY_IPV4 + */ ++buf; - *(uint8_t *)buf = 0; /* ucast IPv4 native encoding type (RFC + *buf = 0; /* ucast IPv4 native encoding type (RFC 4601: 4.9.1) */ ++buf; memcpy(buf, &p->u.prefix4, sizeof(struct in_addr)); return ucast_ipv4_encoding_len; case AF_INET6: - *(uint8_t *)buf = PIM_MSG_ADDRESS_FAMILY_IPV6; + *buf = PIM_MSG_ADDRESS_FAMILY_IPV6; ++buf; - *(uint8_t *)buf = 0; + *buf = 0; ++buf; memcpy(buf, &p->u.prefix6, sizeof(struct in6_addr)); return ucast_ipv6_encoding_len; @@ -198,13 +197,13 @@ int pim_encode_addr_group(uint8_t *buf, afi_t afi, int bidir, int scope, switch (afi) { case AFI_IP: - *(uint8_t *)buf = PIM_MSG_ADDRESS_FAMILY_IPV4; + *buf = PIM_MSG_ADDRESS_FAMILY_IPV4; ++buf; - *(uint8_t *)buf = 0; + *buf = 0; ++buf; - *(uint8_t *)buf = flags; + *buf = flags; ++buf; - *(uint8_t *)buf = 32; + *buf = 32; ++buf; memcpy(buf, &group, sizeof(struct in_addr)); return group_ipv4_encoding_len; diff --git a/pimd/pim_tlv.h b/pimd/pim_tlv.h index 657675b312..ef764656d3 100644 --- a/pimd/pim_tlv.h +++ b/pimd/pim_tlv.h @@ -48,8 +48,18 @@ typedef uint32_t pim_hello_options; #define PIM_OPTION_UNSET(options, option_mask) ((options) &= ~(option_mask)) #define PIM_OPTION_IS_SET(options, option_mask) ((options) & (option_mask)) -#define PIM_TLV_GET_UINT16(buf) ntohs(*(const uint16_t *)(buf)) -#define PIM_TLV_GET_UINT32(buf) ntohl(*(const uint32_t *)(buf)) +#define PIM_TLV_GET_UINT16(buf) \ + ({ \ + uint16_t _tmp; \ + memcpy(&_tmp, (buf), sizeof(uint16_t)); \ + ntohs(_tmp); \ + }) +#define PIM_TLV_GET_UINT32(buf) \ + ({ \ + uint32_t _tmp; \ + memcpy(&_tmp, (buf), sizeof(uint32_t)); \ + ntohl(_tmp); \ + }) #define PIM_TLV_GET_TYPE(buf) PIM_TLV_GET_UINT16(buf) #define PIM_TLV_GET_LENGTH(buf) PIM_TLV_GET_UINT16(buf) #define PIM_TLV_GET_HOLDTIME(buf) PIM_TLV_GET_UINT16(buf) diff --git a/pimd/pim_upstream.c b/pimd/pim_upstream.c index 872883dfde..1e78f41359 100644 --- a/pimd/pim_upstream.c +++ b/pimd/pim_upstream.c @@ -29,6 +29,7 @@ #include "hash.h" #include "jhash.h" #include "wheel.h" +#include "network.h" #include "pimd.h" #include "pim_pim.h" @@ -462,10 +463,26 @@ void pim_upstream_join_timer_decrease_to_t_override(const char *debug_label, return; } - join_timer_remain_msec = pim_time_timer_remain_msec(up->t_join_timer); t_override_msec = pim_if_t_override_msec(up->rpf.source_nexthop.interface); + if (up->t_join_timer) { + join_timer_remain_msec = + pim_time_timer_remain_msec(up->t_join_timer); + } else { + /* upstream join tracked with neighbor jp timer */ + struct pim_neighbor *nbr; + + nbr = pim_neighbor_find(up->rpf.source_nexthop.interface, + up->rpf.rpf_addr.u.prefix4); + if (nbr) + join_timer_remain_msec = + pim_time_timer_remain_msec(nbr->jp_timer); + else + /* Manipulate such that override takes place */ + join_timer_remain_msec = t_override_msec + 1; + } + if (PIM_DEBUG_PIM_TRACE) { char rpf_str[INET_ADDRSTRLEN]; pim_inet4_dump("<rpf?>", up->rpf.rpf_addr.u.prefix4, rpf_str, @@ -1746,7 +1763,7 @@ void pim_upstream_start_register_stop_timer(struct pim_upstream *up, if (!null_register) { uint32_t lower = (0.5 * PIM_REGISTER_SUPPRESSION_PERIOD); uint32_t upper = (1.5 * PIM_REGISTER_SUPPRESSION_PERIOD); - time = lower + (random() % (upper - lower + 1)) + time = lower + (frr_weak_random() % (upper - lower + 1)) - PIM_REGISTER_PROBE_PERIOD; } else time = PIM_REGISTER_PROBE_PERIOD; diff --git a/pimd/pim_zlookup.c b/pimd/pim_zlookup.c index 84fac4f951..49c221f8ed 100644 --- a/pimd/pim_zlookup.c +++ b/pimd/pim_zlookup.c @@ -285,17 +285,17 @@ static int zclient_read_nexthop(struct pim_instance *pim, * If we are sending v6 secondary assume we receive v6 * secondary */ - if (pim->send_v6_secondary) - nbr = pim_neighbor_find_by_secondary( - if_lookup_by_index( - nexthop_tab[num_ifindex] - .ifindex, - nexthop_vrf_id), - &p); + struct interface *ifp = if_lookup_by_index( + nexthop_tab[num_ifindex].ifindex, + nexthop_vrf_id); + + if (!ifp) + nbr = NULL; + else if (pim->send_v6_secondary) + nbr = pim_neighbor_find_by_secondary(ifp, &p); else - nbr = pim_neighbor_find_if(if_lookup_by_index( - nexthop_tab[num_ifindex].ifindex, - nexthop_vrf_id)); + nbr = pim_neighbor_find_if(ifp); + if (nbr) { nexthop_tab[num_ifindex].nexthop_addr.family = AF_INET; diff --git a/redhat/frr.spec.in b/redhat/frr.spec.in index db465f2b00..929214a142 100644 --- a/redhat/frr.spec.in +++ b/redhat/frr.spec.in @@ -675,6 +675,7 @@ fi %{_libdir}/frr/modules/bgpd_rpki.so %endif %{_libdir}/frr/modules/zebra_cumulus_mlag.so +%{_libdir}/frr/modules/dplane_fpm_nl.so %{_libdir}/frr/modules/zebra_irdp.so %{_libdir}/frr/modules/bgpd_bmp.so %{_bindir}/* diff --git a/ripd/rip_debug.c b/ripd/rip_debug.c index 3356d99c2a..871ee8e87e 100644 --- a/ripd/rip_debug.c +++ b/ripd/rip_debug.c @@ -172,10 +172,14 @@ DEFUN (no_debug_rip_zebra, return CMD_SUCCESS; } +static int config_write_debug(struct vty *vty); /* Debug node. */ -static struct cmd_node debug_node = {DEBUG_NODE, - "", /* Debug node has no interface. */ - 1}; +static struct cmd_node debug_node = { + .name = "debug", + .node = DEBUG_NODE, + .prompt = "", + .config_write = config_write_debug, +}; static int config_write_debug(struct vty *vty) { @@ -210,7 +214,7 @@ void rip_debug_init(void) rip_debug_packet = 0; rip_debug_zebra = 0; - install_node(&debug_node, config_write_debug); + install_node(&debug_node); install_element(ENABLE_NODE, &show_debugging_rip_cmd); install_element(ENABLE_NODE, &debug_rip_events_cmd); diff --git a/ripd/rip_interface.c b/ripd/rip_interface.c index c05d776eb1..177f53db45 100644 --- a/ripd/rip_interface.c +++ b/ripd/rip_interface.c @@ -1193,8 +1193,13 @@ int rip_show_network_config(struct vty *vty, struct rip *rip) return 0; } +static int rip_interface_config_write(struct vty *vty); static struct cmd_node interface_node = { - INTERFACE_NODE, "%s(config-if)# ", 1, + .name = "interface", + .node = INTERFACE_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-if)# ", + .config_write = rip_interface_config_write, }; void rip_interface_sync(struct interface *ifp) @@ -1236,7 +1241,7 @@ void rip_if_init(void) hook_register_prio(if_del, 0, rip_interface_delete_hook); /* Install interface node. */ - install_node(&interface_node, rip_interface_config_write); + install_node(&interface_node); if_cmd_init(); if_zapi_callbacks(rip_ifp_create, rip_ifp_up, rip_ifp_down, rip_ifp_destroy); diff --git a/ripd/rip_main.c b/ripd/rip_main.c index 73e94deefc..9ec32a53e3 100644 --- a/ripd/rip_main.c +++ b/ripd/rip_main.c @@ -117,6 +117,7 @@ static const struct frr_yang_module_info *const ripd_yang_modules[] = { &frr_interface_info, &frr_ripd_info, &frr_route_map_info, + &frr_vrf_info, }; FRR_DAEMON_INFO(ripd, RIP, .vty_port = RIP_VTY_PORT, diff --git a/ripd/ripd.c b/ripd/ripd.c index f092da847d..30d2a59d77 100644 --- a/ripd/ripd.c +++ b/ripd/ripd.c @@ -44,6 +44,7 @@ #include "privs.h" #include "lib_errors.h" #include "northbound_cli.h" +#include "network.h" #include "ripd/ripd.h" #include "ripd/rip_nb.h" @@ -2647,7 +2648,7 @@ static int rip_triggered_update(struct thread *t) random interval between 1 and 5 seconds. If other changes that would trigger updates occur before the timer expires, a single update is triggered when the timer expires. */ - interval = (random() % 5) + 1; + interval = (frr_weak_random() % 5) + 1; rip->t_triggered_interval = NULL; thread_add_timer(master, rip_triggered_interval, rip, interval, @@ -2844,7 +2845,8 @@ static int rip_update_jitter(unsigned long time) if (jitter_input < JITTER_BOUND) jitter_input = JITTER_BOUND; - jitter = (((random() % ((jitter_input * 2) + 1)) - jitter_input)); + jitter = (((frr_weak_random() % ((jitter_input * 2) + 1)) + - jitter_input)); return jitter / JITTER_BOUND; } @@ -3327,8 +3329,15 @@ static int config_write_rip(struct vty *vty) return write; } +static int config_write_rip(struct vty *vty); /* RIP node structure. */ -static struct cmd_node rip_node = {RIP_NODE, "%s(config-router)# ", 1}; +static struct cmd_node rip_node = { + .name = "rip", + .node = RIP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", + .config_write = config_write_rip, +}; /* Distribute-list update functions. */ static void rip_distribute_update(struct distribute_ctx *ctx, @@ -3731,7 +3740,7 @@ void rip_vrf_terminate(void) void rip_init(void) { /* Install top nodes. */ - install_node(&rip_node, config_write_rip); + install_node(&rip_node); /* Install rip commands. */ install_element(VIEW_NODE, &show_ip_rip_cmd); diff --git a/ripngd/ripng_debug.c b/ripngd/ripng_debug.c index fe63d8fdea..54edb17ecc 100644 --- a/ripngd/ripng_debug.c +++ b/ripngd/ripng_debug.c @@ -174,10 +174,13 @@ DEFUN (no_debug_ripng_zebra, return CMD_SUCCESS; } +static int config_write_debug(struct vty *vty); /* Debug node. */ static struct cmd_node debug_node = { - DEBUG_NODE, "", /* Debug node has no interface. */ - 1 /* VTYSH */ + .name = "debug", + .node = DEBUG_NODE, + .prompt = "", + .config_write = config_write_debug, }; static int config_write_debug(struct vty *vty) @@ -213,7 +216,7 @@ void ripng_debug_init(void) ripng_debug_packet = 0; ripng_debug_zebra = 0; - install_node(&debug_node, config_write_debug); + install_node(&debug_node); install_element(VIEW_NODE, &show_debugging_ripng_cmd); diff --git a/ripngd/ripng_interface.c b/ripngd/ripng_interface.c index 25d9ed2b9e..e8c907227e 100644 --- a/ripngd/ripng_interface.c +++ b/ripngd/ripng_interface.c @@ -954,9 +954,14 @@ static int interface_config_write(struct vty *vty) return write; } +static int interface_config_write(struct vty *vty); /* ripngd's interface node. */ static struct cmd_node interface_node = { - INTERFACE_NODE, "%s(config-if)# ", 1 /* VTYSH */ + .name = "interface", + .node = INTERFACE_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-if)# ", + .config_write = interface_config_write, }; /* Initialization of interface. */ @@ -967,7 +972,7 @@ void ripng_if_init(void) hook_register_prio(if_del, 0, ripng_if_delete_hook); /* Install interface node. */ - install_node(&interface_node, interface_config_write); + install_node(&interface_node); if_cmd_init(); if_zapi_callbacks(ripng_ifp_create, ripng_ifp_up, ripng_ifp_down, ripng_ifp_destroy); diff --git a/ripngd/ripng_main.c b/ripngd/ripng_main.c index 99adb2cba7..fbac750db3 100644 --- a/ripngd/ripng_main.c +++ b/ripngd/ripng_main.c @@ -117,6 +117,7 @@ static const struct frr_yang_module_info *const ripngd_yang_modules[] = { &frr_interface_info, &frr_ripngd_info, &frr_route_map_info, + &frr_vrf_info, }; FRR_DAEMON_INFO(ripngd, RIPNG, .vty_port = RIPNG_VTY_PORT, diff --git a/ripngd/ripngd.c b/ripngd/ripngd.c index 1ea006abd6..625adcaa3c 100644 --- a/ripngd/ripngd.c +++ b/ripngd/ripngd.c @@ -37,6 +37,7 @@ #include "privs.h" #include "lib_errors.h" #include "northbound_cli.h" +#include "network.h" #include "ripngd/ripngd.h" #include "ripngd/ripng_route.h" @@ -1545,7 +1546,7 @@ int ripng_triggered_update(struct thread *t) random interval between 1 and 5 seconds. If other changes that would trigger updates occur before the timer expires, a single update is triggered when the timer expires. */ - interval = (random() % 5) + 1; + interval = (frr_weak_random() % 5) + 1; ripng->t_triggered_interval = NULL; thread_add_timer(master, ripng_triggered_interval, ripng, interval, @@ -1950,7 +1951,7 @@ int ripng_request(struct interface *ifp) static int ripng_update_jitter(int time) { - return ((random() % (time + 1)) - (time / 2)); + return ((frr_weak_random() % (time + 1)) - (time / 2)); } void ripng_event(struct ripng *ripng, enum ripng_event event, int sock) @@ -2434,9 +2435,14 @@ static int ripng_config_write(struct vty *vty) return write; } +static int ripng_config_write(struct vty *vty); /* RIPng node structure. */ static struct cmd_node cmd_ripng_node = { - RIPNG_NODE, "%s(config-router)# ", 1, + .name = "ripng", + .node = RIPNG_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", + .config_write = ripng_config_write, }; static void ripng_distribute_update(struct distribute_ctx *ctx, @@ -2850,7 +2856,7 @@ void ripng_vrf_terminate(void) void ripng_init(void) { /* Install RIPNG_NODE. */ - install_node(&cmd_ripng_node, ripng_config_write); + install_node(&cmd_ripng_node); /* Install ripng commands. */ install_element(VIEW_NODE, &show_ipv6_ripng_cmd); diff --git a/sharpd/sharp_globals.h b/sharpd/sharp_globals.h index 4e5c933667..8eba57f4dd 100644 --- a/sharpd/sharp_globals.h +++ b/sharpd/sharp_globals.h @@ -28,9 +28,11 @@ struct sharp_routes { /* The original prefix for route installation */ struct prefix orig_prefix; - /* The nexthop group we are using for installation */ + /* The nexthop info we are using for installation */ struct nexthop nhop; + struct nexthop backup_nhop; struct nexthop_group nhop_group; + struct nexthop_group backup_nhop_group; uint32_t total_routes; uint32_t installed_routes; diff --git a/sharpd/sharp_logpump.c b/sharpd/sharp_logpump.c index d07e2d273f..322d802b8a 100644 --- a/sharpd/sharp_logpump.c +++ b/sharpd/sharp_logpump.c @@ -77,6 +77,8 @@ static void *logpump_run(void *arg) period = 1000000000L / lp_frequency; + zlog_tls_buffer_init(); + clock_gettime(CLOCK_MONOTONIC, &start); next = start; do { @@ -109,6 +111,8 @@ static void *logpump_run(void *arg) #endif } while (delta < lp_duration); + zlog_tls_buffer_fini(); + #ifdef RUSAGE_THREAD getrusage(RUSAGE_THREAD, &lp_rusage); #else diff --git a/sharpd/sharp_main.c b/sharpd/sharp_main.c index 5133523f01..120d704918 100644 --- a/sharpd/sharp_main.c +++ b/sharpd/sharp_main.c @@ -114,6 +114,7 @@ struct quagga_signal_t sharp_signals[] = { static const struct frr_yang_module_info *const sharpd_yang_modules[] = { &frr_interface_info, &frr_route_map_info, + &frr_vrf_info, }; FRR_DAEMON_INFO(sharpd, SHARP, .vty_port = SHARP_VTY_PORT, diff --git a/sharpd/sharp_vty.c b/sharpd/sharp_vty.c index aa3d85624b..e2ea773055 100644 --- a/sharpd/sharp_vty.c +++ b/sharpd/sharp_vty.c @@ -162,7 +162,12 @@ DEFPY (install_routes_data_dump, DEFPY (install_routes, install_routes_cmd, - "sharp install routes [vrf NAME$vrf_name] <A.B.C.D$start4|X:X::X:X$start6> <nexthop <A.B.C.D$nexthop4|X:X::X:X$nexthop6>|nexthop-group NHGNAME$nexthop_group> (1-1000000)$routes [instance (0-255)$instance] [repeat (2-1000)$rpt]", + "sharp install routes [vrf NAME$vrf_name]\ + <A.B.C.D$start4|X:X::X:X$start6>\ + <nexthop <A.B.C.D$nexthop4|X:X::X:X$nexthop6>|\ + nexthop-group NHGNAME$nexthop_group>\ + [backup$backup <A.B.C.D$backup_nexthop4|X:X::X:X$backup_nexthop6>] \ + (1-1000000)$routes [instance (0-255)$instance] [repeat (2-1000)$rpt]", "Sharp routing Protocol\n" "install some routes\n" "Routes to install\n" @@ -175,6 +180,9 @@ DEFPY (install_routes, "V6 Nexthop address to use\n" "Nexthop-Group to use\n" "The Name of the nexthop-group\n" + "Backup nexthop to use(Can be an IPv4 or IPv6 address)\n" + "Backup V4 Nexthop address to use\n" + "Backup V6 Nexthop address to use\n" "How many to create\n" "Instance to use\n" "Instance\n" @@ -197,6 +205,8 @@ DEFPY (install_routes, memset(&sg.r.orig_prefix, 0, sizeof(sg.r.orig_prefix)); memset(&sg.r.nhop, 0, sizeof(sg.r.nhop)); memset(&sg.r.nhop_group, 0, sizeof(sg.r.nhop_group)); + memset(&sg.r.backup_nhop, 0, sizeof(sg.r.nhop)); + memset(&sg.r.backup_nhop_group, 0, sizeof(sg.r.nhop_group)); if (start4.s_addr != 0) { prefix.family = AF_INET; @@ -219,6 +229,12 @@ DEFPY (install_routes, return CMD_WARNING; } + /* Explicit backup not available with named nexthop-group */ + if (backup && nexthop_group) { + vty_out(vty, "%% Invalid: cannot specify both nexthop-group and backup\n"); + return CMD_WARNING; + } + if (nexthop_group) { struct nexthop_group_cmd *nhgc = nhgc_find(nexthop_group); if (!nhgc) { @@ -229,6 +245,22 @@ DEFPY (install_routes, } sg.r.nhop_group.nexthop = nhgc->nhg.nexthop; + + /* Use group's backup nexthop info if present */ + if (nhgc->backup_list_name[0]) { + struct nexthop_group_cmd *bnhgc = + nhgc_find(nhgc->backup_list_name); + + if (!bnhgc) { + vty_out(vty, "%% Backup group %s not found for group %s\n", + nhgc->backup_list_name, + nhgc->name); + return CMD_WARNING; + } + + sg.r.backup_nhop.vrf_id = vrf->vrf_id; + sg.r.backup_nhop_group.nexthop = bnhgc->nhg.nexthop; + } } else { if (nexthop4.s_addr != INADDR_ANY) { sg.r.nhop.gate.ipv4 = nexthop4; @@ -242,11 +274,30 @@ DEFPY (install_routes, sg.r.nhop_group.nexthop = &sg.r.nhop; } + /* Use single backup nexthop if specified */ + if (backup) { + /* Set flag and index in primary nexthop */ + SET_FLAG(sg.r.nhop.flags, NEXTHOP_FLAG_HAS_BACKUP); + sg.r.nhop.backup_idx = 0; + + if (backup_nexthop4.s_addr != INADDR_ANY) { + sg.r.backup_nhop.gate.ipv4 = backup_nexthop4; + sg.r.backup_nhop.type = NEXTHOP_TYPE_IPV4; + } else { + sg.r.backup_nhop.gate.ipv6 = backup_nexthop6; + sg.r.backup_nhop.type = NEXTHOP_TYPE_IPV6; + } + + sg.r.backup_nhop.vrf_id = vrf->vrf_id; + sg.r.backup_nhop_group.nexthop = &sg.r.backup_nhop; + } + sg.r.inst = instance; sg.r.vrf_id = vrf->vrf_id; rts = routes; - sharp_install_routes_helper(&prefix, sg.r.vrf_id, - sg.r.inst, &sg.r.nhop_group, rts); + sharp_install_routes_helper(&prefix, sg.r.vrf_id, sg.r.inst, + &sg.r.nhop_group, &sg.r.backup_nhop_group, + rts); return CMD_SUCCESS; } @@ -346,7 +397,7 @@ DEFPY(sharp_lsp_prefix_v4, sharp_lsp_prefix_v4_cmd, "sharp lsp (0-100000)$inlabel\ nexthop-group NHGNAME$nhgname\ [prefix A.B.C.D/M$pfx\ - " FRR_IP_REDIST_STR_SHARPD "$type_str [instance (0-255)$instance]]", + " FRR_IP_REDIST_STR_ZEBRA "$type_str [instance (0-255)$instance]]", "Sharp Routing Protocol\n" "Add an LSP\n" "The ingress label to use\n" @@ -354,7 +405,7 @@ DEFPY(sharp_lsp_prefix_v4, sharp_lsp_prefix_v4_cmd, "The nexthop-group name\n" "Label a prefix\n" "The v4 prefix to label\n" - FRR_IP_REDIST_HELP_STR_SHARPD + FRR_IP_REDIST_HELP_STR_ZEBRA "Instance to use\n" "Instance\n") { diff --git a/sharpd/sharp_zebra.c b/sharpd/sharp_zebra.c index 258a0a06dd..e1bd6f5722 100644 --- a/sharpd/sharp_zebra.c +++ b/sharpd/sharp_zebra.c @@ -143,7 +143,9 @@ int sharp_install_lsps_helper(bool install_p, const struct prefix *p, } void sharp_install_routes_helper(struct prefix *p, vrf_id_t vrf_id, - uint8_t instance, struct nexthop_group *nhg, + uint8_t instance, + const struct nexthop_group *nhg, + const struct nexthop_group *backup_nhg, uint32_t routes) { uint32_t temp, i; @@ -157,9 +159,13 @@ void sharp_install_routes_helper(struct prefix *p, vrf_id_t vrf_id, } else temp = ntohl(p->u.val32[3]); + /* Only use backup route/nexthops if present */ + if (backup_nhg && (backup_nhg->nexthop == NULL)) + backup_nhg = NULL; + monotime(&sg.r.t_start); for (i = 0; i < routes; i++) { - route_add(p, vrf_id, (uint8_t)instance, nhg); + route_add(p, vrf_id, (uint8_t)instance, nhg, backup_nhg); if (v4) p->u.prefix4.s_addr = htonl(++temp); else @@ -209,6 +215,7 @@ static void handle_repeated(bool installed) sg.r.installed_routes = 0; sharp_install_routes_helper(&p, sg.r.vrf_id, sg.r.inst, &sg.r.nhop_group, + &sg.r.backup_nhop_group, sg.r.total_routes); } } @@ -276,8 +283,9 @@ void vrf_label_add(vrf_id_t vrf_id, afi_t afi, mpls_label_t label) zclient_send_vrf_label(zclient, vrf_id, afi, label, ZEBRA_LSP_SHARP); } -void route_add(struct prefix *p, vrf_id_t vrf_id, - uint8_t instance, struct nexthop_group *nhg) +void route_add(const struct prefix *p, vrf_id_t vrf_id, + uint8_t instance, const struct nexthop_group *nhg, + const struct nexthop_group *backup_nhg) { struct zapi_route api; struct zapi_nexthop *api_nh; @@ -298,10 +306,27 @@ void route_add(struct prefix *p, vrf_id_t vrf_id, api_nh = &api.nexthops[i]; zapi_nexthop_from_nexthop(api_nh, nh); + i++; } api.nexthop_num = i; + /* Include backup nexthops, if present */ + if (backup_nhg && backup_nhg->nexthop) { + SET_FLAG(api.message, ZAPI_MESSAGE_BACKUP_NEXTHOPS); + + i = 0; + for (ALL_NEXTHOPS_PTR(backup_nhg, nh)) { + api_nh = &api.backup_nexthops[i]; + + zapi_backup_nexthop_from_nexthop(api_nh, nh); + + i++; + } + + api.backup_nexthop_num = i; + } + zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api); } diff --git a/sharpd/sharp_zebra.h b/sharpd/sharp_zebra.h index c995d557af..926bff676b 100644 --- a/sharpd/sharp_zebra.h +++ b/sharpd/sharp_zebra.h @@ -25,15 +25,17 @@ extern void sharp_zebra_init(void); extern void vrf_label_add(vrf_id_t vrf_id, afi_t afi, mpls_label_t label); -extern void route_add(struct prefix *p, vrf_id_t, uint8_t instance, - struct nexthop_group *nhg); +extern void route_add(const struct prefix *p, vrf_id_t, uint8_t instance, + const struct nexthop_group *nhg, + const struct nexthop_group *backup_nhg); extern void route_delete(struct prefix *p, vrf_id_t vrf_id, uint8_t instance); extern void sharp_zebra_nexthop_watch(struct prefix *p, vrf_id_t vrf_id, bool import, bool watch, bool connected); extern void sharp_install_routes_helper(struct prefix *p, vrf_id_t vrf_id, uint8_t instance, - struct nexthop_group *nhg, + const struct nexthop_group *nhg, + const struct nexthop_group *backup_nhg, uint32_t routes); extern void sharp_remove_routes_helper(struct prefix *p, vrf_id_t vrf_id, uint8_t instance, uint32_t routes); diff --git a/staticd/static_debug.c b/staticd/static_debug.c index 9906e805a7..e43d4e79ff 100644 --- a/staticd/static_debug.c +++ b/staticd/static_debug.c @@ -3,7 +3,7 @@ * Copyright (C) 2019 Volta Networks Inc. * Mark Stapp * - * This file is part of Free Range Routing (FRR). + * This file is part of FRRouting (FRR). * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/staticd/static_debug.h b/staticd/static_debug.h index 8932e2d429..481c266e14 100644 --- a/staticd/static_debug.h +++ b/staticd/static_debug.h @@ -3,7 +3,7 @@ * Copyright (C) 2019 Volta Networks Inc. * Mark Stapp * - * This file is part of Free Range Routing (FRR). + * This file is part of FRRouting (FRR). * * GNU Zebra is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/staticd/static_main.c b/staticd/static_main.c index 3aa8a8db3e..c77a99f280 100644 --- a/staticd/static_main.c +++ b/staticd/static_main.c @@ -104,6 +104,7 @@ struct quagga_signal_t static_signals[] = { }; static const struct frr_yang_module_info *const staticd_yang_modules[] = { + &frr_vrf_info, }; #define STATIC_VTY_PORT 2616 diff --git a/staticd/static_vty.c b/staticd/static_vty.c index a950b0473e..16cd64bcb6 100644 --- a/staticd/static_vty.c +++ b/staticd/static_vty.c @@ -1470,11 +1470,16 @@ DEFUN_NOSH (show_debugging_static, return CMD_SUCCESS; } -static struct cmd_node debug_node = {DEBUG_NODE, "", 1}; +static struct cmd_node debug_node = { + .name = "debug", + .node = DEBUG_NODE, + .prompt = "", + .config_write = static_config_write_debug, +}; void static_vty_init(void) { - install_node(&debug_node, static_config_write_debug); + install_node(&debug_node); install_element(CONFIG_NODE, &ip_mroute_dist_cmd); diff --git a/tests/bgpd/test_peer_attr.c b/tests/bgpd/test_peer_attr.c index 422d397479..f6a892df42 100644 --- a/tests/bgpd/test_peer_attr.c +++ b/tests/bgpd/test_peer_attr.c @@ -1381,13 +1381,12 @@ static void test_peer_attr(struct test *test, struct test_peer_attr *pa) static void bgp_startup(void) { cmd_init(1); - openzlog("testbgpd", "NONE", 0, LOG_CONS | LOG_NDELAY | LOG_PID, - LOG_DAEMON); + zlog_aux_init("NONE: ", LOG_DEBUG); zprivs_preinit(&bgpd_privs); zprivs_init(&bgpd_privs); master = thread_master_create(NULL); - yang_init(); + yang_init(true); nb_init(master, NULL, 0); bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE); bgp_option_set(BGP_OPT_NO_LISTEN); @@ -1438,7 +1437,6 @@ static void bgp_shutdown(void) zprivs_terminate(&bgpd_privs); thread_master_free(master); master = NULL; - closezlog(); } int main(void) diff --git a/tests/helpers/c/main.c b/tests/helpers/c/main.c index 2de29cbdbc..68ed16d513 100644 --- a/tests/helpers/c/main.c +++ b/tests/helpers/c/main.c @@ -155,7 +155,7 @@ int main(int argc, char **argv) cmd_init(1); vty_init(master, false); lib_cmd_init(); - yang_init(); + yang_init(true); nb_init(master, NULL, 0); /* OSPF vty inits. */ diff --git a/tests/lib/cli/common_cli.c b/tests/lib/cli/common_cli.c index e091372ab8..3cade4a2c9 100644 --- a/tests/lib/cli/common_cli.c +++ b/tests/lib/cli/common_cli.c @@ -53,7 +53,6 @@ static void vty_do_exit(int isexit) nb_terminate(); yang_terminate(); thread_master_free(master); - closezlog(); log_memstats(stderr, "testcli"); if (!isexit) @@ -71,11 +70,7 @@ int main(int argc, char **argv) /* master init. */ master = thread_master_create(NULL); - openzlog("common-cli", "NONE", 0, LOG_CONS | LOG_NDELAY | LOG_PID, - LOG_DAEMON); - zlog_set_level(ZLOG_DEST_SYSLOG, ZLOG_DISABLED); - zlog_set_level(ZLOG_DEST_STDOUT, ZLOG_DISABLED); - zlog_set_level(ZLOG_DEST_MONITOR, LOG_DEBUG); + zlog_aux_init("NONE: ", ZLOG_DISABLED); /* Library inits. */ cmd_init(1); @@ -84,7 +79,7 @@ int main(int argc, char **argv) vty_init(master, false); lib_cmd_init(); - yang_init(); + yang_init(true); nb_init(master, NULL, 0); test_init(argc, argv); diff --git a/tests/lib/cli/test_commands.c b/tests/lib/cli/test_commands.c index bbdc8b238d..2b345c91e8 100644 --- a/tests/lib/cli/test_commands.c +++ b/tests/lib/cli/test_commands.c @@ -49,50 +49,116 @@ static vector test_cmds; static char test_buf[32768]; static struct cmd_node bgp_node = { - BGP_NODE, "%s(config-router)# ", + .name = "bgp", + .node = BGP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", }; static struct cmd_node rip_node = { - RIP_NODE, "%s(config-router)# ", + .name = "rip", + .node = RIP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", }; static struct cmd_node isis_node = { - ISIS_NODE, "%s(config-router)# ", + .name = "isis", + .node = ISIS_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", }; static struct cmd_node interface_node = { - INTERFACE_NODE, "%s(config-if)# ", + .name = "interface", + .node = INTERFACE_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-if)# ", }; -static struct cmd_node rmap_node = {RMAP_NODE, "%s(config-route-map)# "}; +static struct cmd_node rmap_node = { + .name = "routemap", + .node = RMAP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-route-map)# ", +}; -static struct cmd_node zebra_node = {ZEBRA_NODE, "%s(config-router)# "}; +static struct cmd_node zebra_node = { + .name = "zebra", + .node = ZEBRA_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", +}; -static struct cmd_node bgp_vpnv4_node = {BGP_VPNV4_NODE, - "%s(config-router-af)# "}; +static struct cmd_node bgp_vpnv4_node = { + .name = "bgp vpnv4", + .node = BGP_VPNV4_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node bgp_ipv4_node = {BGP_IPV4_NODE, - "%s(config-router-af)# "}; +static struct cmd_node bgp_ipv4_node = { + .name = "bgp ipv4 unicast", + .node = BGP_IPV4_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node bgp_ipv4m_node = {BGP_IPV4M_NODE, - "%s(config-router-af)# "}; +static struct cmd_node bgp_ipv4m_node = { + .name = "bgp ipv4 multicast", + .node = BGP_IPV4M_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node bgp_ipv6_node = {BGP_IPV6_NODE, - "%s(config-router-af)# "}; +static struct cmd_node bgp_ipv6_node = { + .name = "bgp ipv6", + .node = BGP_IPV6_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node bgp_ipv6m_node = {BGP_IPV6M_NODE, - "%s(config-router-af)# "}; +static struct cmd_node bgp_ipv6m_node = { + .name = "bgp ipv6 multicast", + .node = BGP_IPV6M_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node ospf_node = {OSPF_NODE, "%s(config-router)# "}; +static struct cmd_node ospf_node = { + .name = "ospf", + .node = OSPF_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", +}; -static struct cmd_node ripng_node = {RIPNG_NODE, "%s(config-router)# "}; +static struct cmd_node ripng_node = { + .name = "ripng", + .node = RIPNG_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", +}; -static struct cmd_node ospf6_node = {OSPF6_NODE, "%s(config-ospf6)# "}; +static struct cmd_node ospf6_node = { + .name = "ospf6", + .node = OSPF6_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-ospf6)# ", +}; -static struct cmd_node keychain_node = {KEYCHAIN_NODE, "%s(config-keychain)# "}; +static struct cmd_node keychain_node = { + .name = "keychain", + .node = KEYCHAIN_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-keychain)# ", +}; -static struct cmd_node keychain_key_node = {KEYCHAIN_KEY_NODE, - "%s(config-keychain-key)# "}; +static struct cmd_node keychain_key_node = { + .name = "keychain key", + .node = KEYCHAIN_KEY_NODE, + .parent_node = KEYCHAIN_NODE, + .prompt = "%s(config-keychain-key)# ", +}; static int test_callback(const struct cmd_element *cmd, struct vty *vty, int argc, struct cmd_token *argv[]) @@ -142,26 +208,26 @@ static void test_init(void) struct cmd_element *cmd; cmd_init(1); - yang_init(); + yang_init(true); nb_init(master, NULL, 0); - install_node(&bgp_node, NULL); - install_node(&rip_node, NULL); - install_node(&interface_node, NULL); - install_node(&rmap_node, NULL); - install_node(&zebra_node, NULL); - install_node(&bgp_vpnv4_node, NULL); - install_node(&bgp_ipv4_node, NULL); - install_node(&bgp_ipv4m_node, NULL); - install_node(&bgp_ipv6_node, NULL); - install_node(&bgp_ipv6m_node, NULL); - install_node(&ospf_node, NULL); - install_node(&ripng_node, NULL); - install_node(&ospf6_node, NULL); - install_node(&keychain_node, NULL); - install_node(&keychain_key_node, NULL); - install_node(&isis_node, NULL); - install_node(&vty_node, NULL); + install_node(&bgp_node); + install_node(&rip_node); + install_node(&interface_node); + install_node(&rmap_node); + install_node(&zebra_node); + install_node(&bgp_vpnv4_node); + install_node(&bgp_ipv4_node); + install_node(&bgp_ipv4m_node); + install_node(&bgp_ipv6_node); + install_node(&bgp_ipv6m_node); + install_node(&ospf_node); + install_node(&ripng_node); + install_node(&ospf6_node); + install_node(&keychain_node); + install_node(&keychain_key_node); + install_node(&isis_node); + install_node(&vty_node); test_init_cmd(); diff --git a/tests/lib/northbound/test_oper_data.c b/tests/lib/northbound/test_oper_data.c index 18d3180889..e16412986e 100644 --- a/tests/lib/northbound/test_oper_data.c +++ b/tests/lib/northbound/test_oper_data.c @@ -374,7 +374,6 @@ static void vty_do_exit(int isexit) nb_terminate(); yang_terminate(); thread_master_free(master); - closezlog(); log_memstats(stderr, "test-nb-oper-data"); if (!isexit) @@ -402,18 +401,14 @@ int main(int argc, char **argv) /* master init. */ master = thread_master_create(NULL); - openzlog("test-nb-oper-data", "NONE", 0, - LOG_CONS | LOG_NDELAY | LOG_PID, LOG_DAEMON); - zlog_set_level(ZLOG_DEST_SYSLOG, ZLOG_DISABLED); - zlog_set_level(ZLOG_DEST_STDOUT, ZLOG_DISABLED); - zlog_set_level(ZLOG_DEST_MONITOR, LOG_DEBUG); + zlog_aux_init("NONE: ", ZLOG_DISABLED); /* Library inits. */ cmd_init(1); cmd_hostname_set("test"); vty_init(master, false); lib_cmd_init(); - yang_init(); + yang_init(true); nb_init(master, modules, array_size(modules)); /* Create artificial data. */ diff --git a/tests/lib/test_checksum.c b/tests/lib/test_checksum.c index 13d35b0e99..ddb76c8f9d 100644 --- a/tests/lib/test_checksum.c +++ b/tests/lib/test_checksum.c @@ -23,6 +23,7 @@ #include <time.h> #include "checksum.h" +#include "network.h" struct thread_master *master; @@ -477,7 +478,7 @@ int main(int argc, char **argv) exercise %= MAXDATALEN; for (i = 0; i < exercise; i += sizeof(long int)) { - long int rand = random(); + long int rand = frr_weak_random(); for (j = sizeof(long int); j > 0; j--) buffer[i + (sizeof(long int) - j)] = diff --git a/tests/lib/test_segv.c b/tests/lib/test_segv.c index 43ca0837d5..8133637adc 100644 --- a/tests/lib/test_segv.c +++ b/tests/lib/test_segv.c @@ -73,11 +73,7 @@ int main(void) master = thread_master_create(NULL); signal_init(master, array_size(sigs), sigs); - openzlog("testsegv", "NONE", 0, LOG_CONS | LOG_NDELAY | LOG_PID, - LOG_DAEMON); - zlog_set_level(ZLOG_DEST_SYSLOG, ZLOG_DISABLED); - zlog_set_level(ZLOG_DEST_STDOUT, LOG_DEBUG); - zlog_set_level(ZLOG_DEST_MONITOR, ZLOG_DISABLED); + zlog_aux_init("NONE: ", LOG_DEBUG); thread_execute(master, threadfunc, 0, 0); diff --git a/tests/lib/test_sig.c b/tests/lib/test_sig.c index cf63a3d047..2aceafb8f0 100644 --- a/tests/lib/test_sig.c +++ b/tests/lib/test_sig.c @@ -57,11 +57,7 @@ int main(void) master = thread_master_create(NULL); signal_init(master, array_size(sigs), sigs); - openzlog("testsig", "NONE", 0, LOG_CONS | LOG_NDELAY | LOG_PID, - LOG_DAEMON); - zlog_set_level(ZLOG_DEST_SYSLOG, ZLOG_DISABLED); - zlog_set_level(ZLOG_DEST_STDOUT, LOG_DEBUG); - zlog_set_level(ZLOG_DEST_MONITOR, ZLOG_DISABLED); + zlog_aux_init("NONE: ", LOG_DEBUG); while (thread_fetch(master, &t)) thread_call(&t); diff --git a/tests/lib/test_srcdest_table.c b/tests/lib/test_srcdest_table.c index 0fca571d28..9d395bee89 100644 --- a/tests/lib/test_srcdest_table.c +++ b/tests/lib/test_srcdest_table.c @@ -4,7 +4,7 @@ * Copyright (C) 2017 by David Lamparter & Christian Franke, * Open Source Routing / NetDEF Inc. * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -391,8 +391,7 @@ static void test_state_del_one_route(struct test_state *test, struct prng *prng) } assert(rn); - srcdest_rnode_prefixes(rn, (const struct prefix **)&dst_p, - (const struct prefix **)&src_p); + srcdest_rnode_prefixes(rn, &dst_p, &src_p); memcpy(&dst6_p, dst_p, sizeof(dst6_p)); if (src_p) memcpy(&src6_p, src_p, sizeof(src6_p)); diff --git a/tests/lib/test_zlog.c b/tests/lib/test_zlog.c index 07885d9847..48fa7bce94 100644 --- a/tests/lib/test_zlog.c +++ b/tests/lib/test_zlog.c @@ -20,6 +20,7 @@ #include <zebra.h> #include <memory.h> #include "log.h" +#include "network.h" /* maximum amount of data to hexdump */ #define MAXDATA 16384 @@ -37,7 +38,7 @@ static bool test_zlog_hexdump(void) uint8_t d[nl]; for (unsigned int i = 0; i < nl; i++) - d[i] = random(); + d[i] = frr_weak_random(); zlog_hexdump(d, nl - 1); nl += 1 + (nl / 2); @@ -52,9 +53,7 @@ bool (*tests[])(void) = { int main(int argc, char **argv) { - openzlog("testzlog", "NONE", 0, LOG_CONS | LOG_NDELAY | LOG_PID, - LOG_ERR); - zlog_set_file("test_zlog.log", LOG_DEBUG); + zlog_aux_init("NONE: ", ZLOG_DISABLED); for (unsigned int i = 0; i < array_size(tests); i++) if (!tests[i]()) diff --git a/tests/subdir.am b/tests/subdir.am index bce08c4034..5efdcbbd4c 100644 --- a/tests/subdir.am +++ b/tests/subdir.am @@ -90,6 +90,7 @@ check_PROGRAMS += \ endif tests/lib/cli/test_commands_defun.c: vtysh/vtysh_cmd.c + mkdir -p tests/lib/cli sed \ -e 's%"vtysh/vtysh\.h"%"tests/helpers/c/tests.h"%' \ -e 's/vtysh_init_cmd/test_init_cmd/' \ @@ -99,6 +100,7 @@ tests/lib/cli/test_commands_defun.c: vtysh/vtysh_cmd.c CLEANFILES += tests/lib/cli/test_commands_defun.c tests/isisd/test_fuzz_isis_tlv_tests.h: $(top_srcdir)/tests/isisd/test_fuzz_isis_tlv_tests.h.gz + mkdir -p tests/isisd gzip -d < $(top_srcdir)/tests/isisd/test_fuzz_isis_tlv_tests.h.gz > "$@" CLEANFILES += tests/isisd/test_fuzz_isis_tlv_tests.h diff --git a/tests/topotests/all-protocol-startup/r1/bgpd.conf b/tests/topotests/all-protocol-startup/r1/bgpd.conf index 4614287f27..7a7bba7ae7 100644 --- a/tests/topotests/all-protocol-startup/r1/bgpd.conf +++ b/tests/topotests/all-protocol-startup/r1/bgpd.conf @@ -4,6 +4,7 @@ log file bgpd.log router bgp 100 bgp router-id 192.168.0.1 bgp log-neighbor-changes + no bgp ebgp-requires-policy neighbor 192.168.7.10 remote-as 100 neighbor 192.168.7.20 remote-as 200 neighbor fc00:0:0:8::1000 remote-as 100 @@ -45,3 +46,6 @@ route-map bgp-map permit 20 line vty ! +route-map LIES deny 10 + match interface notpresent +!
\ No newline at end of file diff --git a/tests/topotests/all-protocol-startup/r1/ipv4_routes.ref b/tests/topotests/all-protocol-startup/r1/ipv4_routes.ref index 61d17a61b3..044cffae7a 100644 --- a/tests/topotests/all-protocol-startup/r1/ipv4_routes.ref +++ b/tests/topotests/all-protocol-startup/r1/ipv4_routes.ref @@ -8,16 +8,25 @@ C>* 192.168.6.0/26 is directly connected, r1-eth6, XX:XX:XX C>* 192.168.7.0/26 is directly connected, r1-eth7, XX:XX:XX C>* 192.168.8.0/26 is directly connected, r1-eth8, XX:XX:XX C>* 192.168.9.0/26 is directly connected, r1-eth9, XX:XX:XX -O 192.168.0.0/24 [110/10] is directly connected, r1-eth0, XX:XX:XX -O 192.168.3.0/26 [110/10] is directly connected, r1-eth3, XX:XX:XX -S>* 1.1.1.1/32 [1/0] is directly connected, r1-eth0, XX:XX:XX -S>* 1.1.1.2/32 [1/0] is directly connected, r1-eth1, XX:XX:XX -S>* 4.5.6.10/32 [1/0] via 192.168.0.2, r1-eth0, XX:XX:XX -S>* 4.5.6.11/32 [1/0] via 192.168.0.2, r1-eth0, XX:XX:XX -S>* 4.5.6.12/32 [1/0] is directly connected, r1-eth0, XX:XX:XX -S>* 4.5.6.13/32 [1/0] unreachable (blackhole), XX:XX:XX -S>* 4.5.6.14/32 [1/0] unreachable (blackhole), XX:XX:XX -S 4.5.6.15/32 [255/0] via 192.168.0.2, r1-eth0, XX:XX:XX -S>* 4.5.6.7/32 [1/0] unreachable (blackhole), XX:XX:XX -S>* 4.5.6.8/32 [1/0] unreachable (blackhole), XX:XX:XX -S>* 4.5.6.9/32 [1/0] unreachable (ICMP unreachable), XX:XX:XX +O 192.168.0.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX +O 192.168.3.0/26 [110/10] is directly connected, r1-eth3, weight 1, XX:XX:XX +S>* 1.1.1.1/32 [1/0] is directly connected, r1-eth1, weight 1, XX:XX:XX +S>* 1.1.1.2/32 [1/0] is directly connected, r1-eth2, weight 1, XX:XX:XX +S>* 1.1.1.3/32 [1/0] is directly connected, r1-eth3, weight 1, XX:XX:XX +S>* 1.1.1.4/32 [1/0] is directly connected, r1-eth4, weight 1, XX:XX:XX +S>* 1.1.1.5/32 [1/0] is directly connected, r1-eth5, weight 1, XX:XX:XX +S>* 1.1.1.6/32 [1/0] is directly connected, r1-eth6, weight 1, XX:XX:XX +S>* 1.1.1.7/32 [1/0] is directly connected, r1-eth7, weight 1, XX:XX:XX +S>* 1.1.1.8/32 [1/0] is directly connected, r1-eth8, weight 1, XX:XX:XX +S>* 4.5.6.10/32 [1/0] via 192.168.0.2, r1-eth0, weight 1, XX:XX:XX +S>* 4.5.6.11/32 [1/0] via 192.168.0.2, r1-eth0, weight 1, XX:XX:XX +S>* 4.5.6.12/32 [1/0] is directly connected, r1-eth0, weight 1, XX:XX:XX +S>* 4.5.6.13/32 [1/0] unreachable (blackhole), weight 1, XX:XX:XX +S>* 4.5.6.14/32 [1/0] unreachable (blackhole), weight 1, XX:XX:XX +S 4.5.6.15/32 [255/0] via 192.168.0.2, r1-eth0, weight 1, XX:XX:XX +S 4.5.6.16/32 [10/0] via 192.168.0.4, r1-eth0, weight 1, XX:XX:XX +S>* 4.5.6.16/32 [5/0] via 192.168.0.2, r1-eth0, weight 1, XX:XX:XX +S>* 4.5.6.17/32 [1/0] via 192.168.0.2, r1-eth0, weight 1, XX:XX:XX +S>* 4.5.6.7/32 [1/0] unreachable (blackhole), weight 1, XX:XX:XX +S>* 4.5.6.8/32 [1/0] unreachable (blackhole), weight 1, XX:XX:XX +S>* 4.5.6.9/32 [1/0] unreachable (ICMP unreachable), weight 1, XX:XX:XX diff --git a/tests/topotests/all-protocol-startup/r1/ipv6_routes.ref b/tests/topotests/all-protocol-startup/r1/ipv6_routes.ref index d5bc16a2bf..ef12d615dc 100644 --- a/tests/topotests/all-protocol-startup/r1/ipv6_routes.ref +++ b/tests/topotests/all-protocol-startup/r1/ipv6_routes.ref @@ -19,11 +19,11 @@ C * fe80::/64 is directly connected, r1-eth6, XX:XX:XX C * fe80::/64 is directly connected, r1-eth7, XX:XX:XX C * fe80::/64 is directly connected, r1-eth8, XX:XX:XX C * fe80::/64 is directly connected, r1-eth9, XX:XX:XX -O fc00:0:0:4::/64 [110/10] is directly connected, r1-eth4, XX:XX:XX -S>* 4:5::6:10/128 [1/0] via fc00::2, r1-eth0, XX:XX:XX -S>* 4:5::6:11/128 [1/0] via fc00::2, r1-eth0, XX:XX:XX -S>* 4:5::6:12/128 [1/0] is directly connected, r1-eth0, XX:XX:XX -S 4:5::6:15/128 [255/0] via fc00::2, r1-eth0, XX:XX:XX -S>* 4:5::6:7/128 [1/0] unreachable (blackhole), XX:XX:XX -S>* 4:5::6:8/128 [1/0] unreachable (blackhole), XX:XX:XX -S>* 4:5::6:9/128 [1/0] unreachable (ICMP unreachable), XX:XX:XX +O fc00:0:0:4::/64 [110/10] is directly connected, r1-eth4, weight 1, XX:XX:XX +S>* 4:5::6:10/128 [1/0] via fc00::2, r1-eth0, weight 1, XX:XX:XX +S>* 4:5::6:11/128 [1/0] via fc00::2, r1-eth0, weight 1, XX:XX:XX +S>* 4:5::6:12/128 [1/0] is directly connected, r1-eth0, weight 1, XX:XX:XX +S 4:5::6:15/128 [255/0] via fc00::2, r1-eth0, weight 1, XX:XX:XX +S>* 4:5::6:7/128 [1/0] unreachable (blackhole), weight 1, XX:XX:XX +S>* 4:5::6:8/128 [1/0] unreachable (blackhole), weight 1, XX:XX:XX +S>* 4:5::6:9/128 [1/0] unreachable (ICMP unreachable), weight 1, XX:XX:XX diff --git a/tests/topotests/all-protocol-startup/r1/ospf6d.conf b/tests/topotests/all-protocol-startup/r1/ospf6d.conf index 941d3016c7..5c6f071644 100644 --- a/tests/topotests/all-protocol-startup/r1/ospf6d.conf +++ b/tests/topotests/all-protocol-startup/r1/ospf6d.conf @@ -14,3 +14,6 @@ router ospf6 ! line vty ! +route-map LIES deny 10 + match interface notpresent +! diff --git a/tests/topotests/all-protocol-startup/r1/ospfd.conf b/tests/topotests/all-protocol-startup/r1/ospfd.conf index 549f36fab4..bac9f61620 100644 --- a/tests/topotests/all-protocol-startup/r1/ospfd.conf +++ b/tests/topotests/all-protocol-startup/r1/ospfd.conf @@ -11,3 +11,6 @@ router ospf ! line vty ! +route-map LIES deny 10 + match interface notpresent +! diff --git a/tests/topotests/all-protocol-startup/r1/ripd.conf b/tests/topotests/all-protocol-startup/r1/ripd.conf index 4b35630b36..ace7608873 100644 --- a/tests/topotests/all-protocol-startup/r1/ripd.conf +++ b/tests/topotests/all-protocol-startup/r1/ripd.conf @@ -10,3 +10,6 @@ router rip line vty ! +route-map LIES deny 10 + match interface notpresent +! diff --git a/tests/topotests/all-protocol-startup/r1/ripngd.conf b/tests/topotests/all-protocol-startup/r1/ripngd.conf index 199fe15ab9..2e0aef3a14 100644 --- a/tests/topotests/all-protocol-startup/r1/ripngd.conf +++ b/tests/topotests/all-protocol-startup/r1/ripngd.conf @@ -9,3 +9,6 @@ router ripng line vty ! +route-map LIES deny 10 + match interface notpresent +! diff --git a/tests/topotests/all-protocol-startup/r1/show_route_map.ref b/tests/topotests/all-protocol-startup/r1/show_route_map.ref new file mode 100644 index 0000000000..25786081d1 --- /dev/null +++ b/tests/topotests/all-protocol-startup/r1/show_route_map.ref @@ -0,0 +1,73 @@ +ZEBRA: +route-map: LIES Invoked: 0 Optimization: enabled + deny, sequence 10 Invoked 0 + Match clauses: + interface notpresent + Set clauses: + Call clause: + Action: + Exit routemap +RIP: +route-map: LIES Invoked: 0 Optimization: enabled + deny, sequence 10 Invoked 0 + Match clauses: + interface notpresent + Set clauses: + Call clause: + Action: + Exit routemap +RIPNG: +route-map: LIES Invoked: 0 Optimization: enabled + deny, sequence 10 Invoked 0 + Match clauses: + interface notpresent + Set clauses: + Call clause: + Action: + Exit routemap +OSPF: +route-map: LIES Invoked: 0 Optimization: enabled + deny, sequence 10 Invoked 0 + Match clauses: + interface notpresent + Set clauses: + Call clause: + Action: + Exit routemap +OSPF6: +route-map: LIES Invoked: 0 Optimization: enabled + deny, sequence 10 Invoked 0 + Match clauses: + interface notpresent + Set clauses: + Call clause: + Action: + Exit routemap +BGP: +route-map: LIES Invoked: 0 Optimization: enabled + deny, sequence 10 Invoked 0 + Match clauses: + interface notpresent + Set clauses: + Call clause: + Action: + Exit routemap +route-map: bgp-map Invoked: 0 Optimization: enabled + permit, sequence 10 Invoked 0 + Match clauses: + Set clauses: + community 100:100 additive + local-preference 100 + Call clause: + Action: + Exit routemap + permit, sequence 20 Invoked 0 + Match clauses: + Set clauses: + metric 10 + local-preference 200 + Call clause: + Action: + Exit routemap +ISIS: +SHARP: diff --git a/tests/topotests/all-protocol-startup/r1/zebra.conf b/tests/topotests/all-protocol-startup/r1/zebra.conf index fbf827604f..c5ef79630e 100644 --- a/tests/topotests/all-protocol-startup/r1/zebra.conf +++ b/tests/topotests/all-protocol-startup/r1/zebra.conf @@ -26,10 +26,25 @@ ipv6 route 4:5::6:12/128 r1-eth0 # by zebra but not installed. ip route 4.5.6.15/32 192.168.0.2 255 ipv6 route 4:5::6:15/128 fc00:0:0:0::2 255 - # Routes to put into a nexthop-group -ip route 1.1.1.1/32 r1-eth0 -ip route 1.1.1.2/32 r1-eth1 +ip route 1.1.1.1/32 r1-eth1 +ip route 1.1.1.2/32 r1-eth2 +ip route 1.1.1.3/32 r1-eth3 +ip route 1.1.1.4/32 r1-eth4 +ip route 1.1.1.5/32 r1-eth5 +ip route 1.1.1.6/32 r1-eth6 +ip route 1.1.1.7/32 r1-eth7 +ip route 1.1.1.8/32 r1-eth8 + +# Create a route that has overlapping distance +# so we have backups +ip route 4.5.6.16/32 192.168.0.2 5 +ip route 4.5.6.16/32 192.168.0.4 10 + +# Create routes that have different tags +# and how we handle it +ip route 4.5.6.17/32 192.168.0.2 tag 9000 +ip route 4.5.6.17/32 192.168.0.2 tag 10000 ! interface r1-eth0 @@ -100,3 +115,6 @@ ipv6 forwarding line vty ! +route-map LIES deny 10 + match interface notpresent +! diff --git a/tests/topotests/all-protocol-startup/test_all_protocol_startup.py b/tests/topotests/all-protocol-startup/test_all_protocol_startup.py index a671e14e07..f78c2b4bc0 100755 --- a/tests/topotests/all-protocol-startup/test_all_protocol_startup.py +++ b/tests/topotests/all-protocol-startup/test_all_protocol_startup.py @@ -347,6 +347,35 @@ def test_converge_protocols(): # For debugging after starting FRR/Quagga daemons, uncomment the next line ## CLI(net) +def route_get_nhg_id(route_str): + output = net["r1"].cmd('vtysh -c "show ip route %s nexthop-group"' % route_str) + match = re.search(r"Nexthop Group ID: (\d+)", output) + assert match is not None, "Nexthop Group ID not found for sharpd route %s" % route_str + + nhg_id = int(match.group(1)) + return nhg_id + +def verify_nexthop_group(nhg_id, recursive=False): + # Verify NHG is valid/installed + output = net["r1"].cmd('vtysh -c "show nexthop-group rib %d"' % nhg_id) + + match = re.search(r"Valid", output) + assert match is not None, "Nexthop Group ID=%d not marked Valid" % nhg_id + + # If recursive, we need to look at its resolved group + if recursive: + match = re.search(r"Depends: \((\d+)\)", output) + resolved_id = int(match.group(1)) + verify_nexthop_group(resolved_id, False) + else: + match = re.search(r"Installed", output) + assert match is not None, "Nexthop Group ID=%d not marked Installed" % nhg_id + +def verify_route_nexthop_group(route_str, recursive=False): + # Verify route and that zebra created NHGs for and they are valid/installed + nhg_id = route_get_nhg_id(route_str) + verify_nexthop_group(nhg_id, recursive) + def test_nexthop_groups(): global fatal_error global net @@ -358,25 +387,77 @@ def test_nexthop_groups(): print("\n\n** Verifying Nexthop Groups") print("******************************************\n") + ### Nexthop Group Tests + + ## Basic test + # Create a lib nexthop-group - net["r1"].cmd('vtysh -c "c t" -c "nexthop-group red" -c "nexthop 1.1.1.1" -c "nexthop 1.1.1.2"') + net["r1"].cmd('vtysh -c "c t" -c "nexthop-group basic" -c "nexthop 1.1.1.1" -c "nexthop 1.1.1.2"') # Create with sharpd using nexthop-group - net["r1"].cmd('vtysh -c "sharp install routes 2.2.2.1 nexthop-group red 1"') + net["r1"].cmd('vtysh -c "sharp install routes 2.2.2.1 nexthop-group basic 1"') - # Verify route and that zebra created NHGs for and they are valid/installed - output = net["r1"].cmd('vtysh -c "show ip route 2.2.2.1/32 nexthop-group"') - match = re.search(r"Nexthop Group ID: (\d+)", output); - assert match is not None, "Nexthop Group ID not found for sharpd route 2.2.2.1/32" + verify_route_nexthop_group("2.2.2.1/32") - nhe_id = int(match.group(1)) + ## Connected - output = net["r1"].cmd('vtysh -c "show nexthop-group rib %d"' % nhe_id) - match = re.search(r"Valid", output) - assert match is not None, "Nexthop Group ID=%d not marked Valid" % nhe_id + net["r1"].cmd('vtysh -c "c t" -c "nexthop-group connected" -c "nexthop r1-eth1" -c "nexthop r1-eth2"') - match = re.search(r"Installed", output) - assert match is not None, "Nexthop Group ID=%d not marked Installed" % nhe_id + net["r1"].cmd('vtysh -c "sharp install routes 2.2.2.2 nexthop-group connected 1"') + + verify_route_nexthop_group("2.2.2.2/32") + + ## Recursive + + net["r1"].cmd('vtysh -c "c t" -c "nexthop-group basic-recursive" -c "nexthop 2.2.2.1"') + + net["r1"].cmd('vtysh -c "sharp install routes 3.3.3.1 nexthop-group basic-recursive 1"') + + verify_route_nexthop_group("3.3.3.1/32", True) + + ## Duplicate + + net["r1"].cmd('vtysh -c "c t" -c "nexthop-group duplicate" -c "nexthop 2.2.2.1" -c "nexthop 1.1.1.1"') + + net["r1"].cmd('vtysh -c "sharp install routes 3.3.3.2 nexthop-group duplicate 1"') + + verify_route_nexthop_group("3.3.3.2/32") + + ## Two 4-Way ECMP + + net["r1"].cmd('vtysh -c "c t" -c "nexthop-group fourA" -c "nexthop 1.1.1.1" -c "nexthop 1.1.1.2" \ + -c "nexthop 1.1.1.3" -c "nexthop 1.1.1.4"') + + net["r1"].cmd('vtysh -c "sharp install routes 4.4.4.1 nexthop-group fourA 1"') + + verify_route_nexthop_group("4.4.4.1/32") + + net["r1"].cmd('vtysh -c "c t" -c "nexthop-group fourB" -c "nexthop 1.1.1.5" -c "nexthop 1.1.1.6" \ + -c "nexthop 1.1.1.7" -c "nexthop 1.1.1.8"') + + net["r1"].cmd('vtysh -c "sharp install routes 4.4.4.2 nexthop-group fourB 1"') + + verify_route_nexthop_group("4.4.4.2/32") + + ## Recursive to 8-Way ECMP + + net["r1"].cmd('vtysh -c "c t" -c "nexthop-group eight-recursive" -c "nexthop 4.4.4.1" -c "nexthop 4.4.4.2"') + + net["r1"].cmd('vtysh -c "sharp install routes 5.5.5.1 nexthop-group eight-recursive 1"') + + verify_route_nexthop_group("5.5.5.1/32") + + ##CLI(net) + + ## Remove all NHG routes + + net["r1"].cmd('vtysh -c "sharp remove routes 2.2.2.1 1"') + net["r1"].cmd('vtysh -c "sharp remove routes 2.2.2.2 1"') + net["r1"].cmd('vtysh -c "sharp remove routes 3.3.3.1 1"') + net["r1"].cmd('vtysh -c "sharp remove routes 3.3.3.2 1"') + net["r1"].cmd('vtysh -c "sharp remove routes 4.4.4.1 1"') + net["r1"].cmd('vtysh -c "sharp remove routes 4.4.4.2 1"') + net["r1"].cmd('vtysh -c "sharp remove routes 5.5.5.1 1"') def test_rip_status(): global fatal_error @@ -891,7 +972,114 @@ def test_bgp_ipv6(): # For debugging after starting FRR/Quagga daemons, uncomment the next line # CLI(net) +def test_route_map(): + global fatal_error + global net + + if (fatal_error != ""): + pytest.skip(fatal_error) + thisDir = os.path.dirname(os.path.realpath(__file__)) + + print("\n\n** Verifying some basic routemap forward references\n") + print("*******************************************************\n") + failures = 0 + for i in range(1, 2): + refroutemap = '%s/r%s/show_route_map.ref' % (thisDir, i) + if os.path.isfile(refroutemap): + expected = open(refroutemap).read().rstrip() + expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + + actual = net['r%s' %i].cmd('vtysh -c "show route-map" 2> /dev/null').rstrip() + actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + + diff = topotest.get_textdiff(actual, expected, + title1="actual show route-map", + title2="expected show route-map") + + if diff: + sys.stderr.write('r%s failed show route-map command Check:\n%s\n' % (i, diff)) + failures += 1 + else: + print("r%s ok" %i) + + assert failures == 0, "Show route-map command failed for router r%s:\n%s" % (i, diff) + +def test_nexthop_groups_with_route_maps(): + global fatal_error + global net + + # Skip if previous fatal error condition is raised + if (fatal_error != ""): + pytest.skip(fatal_error) + + print("\n\n** Verifying Nexthop Groups With Route-Maps") + print("******************************************\n") + + ### Nexthop Group With Route-Map Tests + + # Create a lib nexthop-group + net["r1"].cmd('vtysh -c "c t" -c "nexthop-group test" -c "nexthop 1.1.1.1" -c "nexthop 1.1.1.2"') + + ## Route-Map Proto Source + + route_str = "2.2.2.1" + src_str = "192.168.0.1" + + net["r1"].cmd('vtysh -c "c t" -c "route-map NH-SRC permit 111" -c "set src %s"' % src_str) + net["r1"].cmd('vtysh -c "c t" -c "ip protocol sharp route-map NH-SRC"') + + net["r1"].cmd('vtysh -c "sharp install routes %s nexthop-group test 1"' % route_str) + + verify_route_nexthop_group("%s/32" % route_str) + + # Only a valid test on linux using nexthop objects + if sys.platform.startswith("linux"): + output = net["r1"].cmd('ip route show %s/32' % route_str) + match = re.search(r"src %s" % src_str, output) + assert match is not None, "Route %s/32 not installed with src %s" % (route_str, src_str) + + # Remove NHG routes and route-map + net["r1"].cmd('vtysh -c "sharp remove routes %s 1"' % route_str) + net["r1"].cmd('vtysh -c "c t" -c "no ip protocol sharp route-map NH-SRC"') + net["r1"].cmd('vtysh -c "c t" -c "no route-map NH-SRC permit 111" -c "set src %s"' % src_str) + net["r1"].cmd('vtysh -c "c t" -c "no route-map NH-SRC"') + + ## Route-Map Deny/Permit with same nexthop group + + permit_route_str = "3.3.3.1" + deny_route_str = "3.3.3.2" + + net["r1"].cmd('vtysh -c "c t" -c "ip prefix-list NOPE seq 5 permit %s/32"' % permit_route_str) + net["r1"].cmd('vtysh -c "c t" -c "route-map NOPE permit 111" -c "match ip address prefix-list NOPE"') + net["r1"].cmd('vtysh -c "c t" -c "route-map NOPE deny 222"') + net["r1"].cmd('vtysh -c "c t" -c "ip protocol sharp route-map NOPE"') + + # This route should be permitted + net["r1"].cmd('vtysh -c "sharp install routes %s nexthop-group test 1"' % permit_route_str) + + verify_route_nexthop_group("%s/32" % permit_route_str) + + # This route should be denied + net["r1"].cmd('vtysh -c "sharp install routes %s nexthop-group test 1"' % deny_route_str) + + nhg_id = route_get_nhg_id(deny_route_str) + output = net["r1"].cmd('vtysh -c "show nexthop-group rib %d"' % nhg_id) + + match = re.search(r"Valid", output) + assert match is None, "Nexthop Group ID=%d should not be marked Valid" % nhg_id + + match = re.search(r"Installed", output) + assert match is None, "Nexthop Group ID=%d should not be marked Installed" % nhg_id + + # Remove NHG routes and route-map + net["r1"].cmd('vtysh -c "sharp remove routes %s 1"' % permit_route_str) + net["r1"].cmd('vtysh -c "sharp remove routes %s 1"' % deny_route_str) + net["r1"].cmd('vtysh -c "c t" -c "no ip protocol sharp route-map NOPE"') + net["r1"].cmd('vtysh -c "c t" -c "no route-map NOPE permit 111"') + net["r1"].cmd('vtysh -c "c t" -c "no route-map NOPE deny 222"') + net["r1"].cmd('vtysh -c "c t" -c "no route-map NOPE"') + net["r1"].cmd('vtysh -c "c t" -c "no ip prefix-list NOPE seq 5 permit %s/32"' % permit_route_str) def test_mpls_interfaces(): global fatal_error diff --git a/tests/topotests/bfd-bgp-cbit-topo3/r1/bfdd.conf b/tests/topotests/bfd-bgp-cbit-topo3/r1/bfdd.conf new file mode 100644 index 0000000000..f35e772790 --- /dev/null +++ b/tests/topotests/bfd-bgp-cbit-topo3/r1/bfdd.conf @@ -0,0 +1,5 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! diff --git a/tests/topotests/bfd-bgp-cbit-topo3/r1/bgp_ipv6_routes_down.json b/tests/topotests/bfd-bgp-cbit-topo3/r1/bgp_ipv6_routes_down.json index 54ae57f7be..ac5fd04074 100644 --- a/tests/topotests/bfd-bgp-cbit-topo3/r1/bgp_ipv6_routes_down.json +++ b/tests/topotests/bfd-bgp-cbit-topo3/r1/bgp_ipv6_routes_down.json @@ -14,7 +14,6 @@ "prefix": "2001:db8:6::", "prefixLen": 64, "network": "2001:db8:6::\/64", - "med": 0, "metric": 0, "weight": 0, "peerId": "2001:db8:4::1", @@ -37,7 +36,6 @@ "prefix": "2001:db8:7::", "prefixLen": 64, "network": "2001:db8:7::\/64", - "med": 0, "metric": 0, "weight": 0, "peerId": "2001:db8:4::1", @@ -60,7 +58,6 @@ "prefix": "2001:db8:8::", "prefixLen": 64, "network": "2001:db8:8::\/64", - "med": 0, "metric": 0, "weight": 32768, "peerId": "(unspec)", @@ -83,7 +80,6 @@ "prefix": "2001:db8:9::", "prefixLen": 64, "network": "2001:db8:9::\/64", - "med": 0, "metric": 0, "weight": 32768, "peerId": "(unspec)", diff --git a/tests/topotests/bfd-bgp-cbit-topo3/r1/bgpd.conf b/tests/topotests/bfd-bgp-cbit-topo3/r1/bgpd.conf index fa6d60a8fc..f69b3c4ba3 100644 --- a/tests/topotests/bfd-bgp-cbit-topo3/r1/bgpd.conf +++ b/tests/topotests/bfd-bgp-cbit-topo3/r1/bgpd.conf @@ -1,6 +1,7 @@ debug bgp neighbor-events router bgp 101 bgp router-id 10.254.254.1 + no bgp ebgp-requires-policy timers bgp 8 24 bgp graceful-restart neighbor 2001:db8:4::1 remote-as 102 @@ -14,7 +15,7 @@ router bgp 101 exit-address-family address-family ipv6 unicast network 2001:db8:8::/64 - network 2001:db8:9::/64 + network 2001:db8:9::/64 neighbor 2001:db8:4::1 activate exit-address-family ! diff --git a/tests/topotests/bfd-bgp-cbit-topo3/r3/bfdd.conf b/tests/topotests/bfd-bgp-cbit-topo3/r3/bfdd.conf new file mode 100644 index 0000000000..f35e772790 --- /dev/null +++ b/tests/topotests/bfd-bgp-cbit-topo3/r3/bfdd.conf @@ -0,0 +1,5 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! diff --git a/tests/topotests/bfd-bgp-cbit-topo3/r3/bgp_ipv6_routes_down.json b/tests/topotests/bfd-bgp-cbit-topo3/r3/bgp_ipv6_routes_down.json index a3bb222504..ab42b05e85 100644 --- a/tests/topotests/bfd-bgp-cbit-topo3/r3/bgp_ipv6_routes_down.json +++ b/tests/topotests/bfd-bgp-cbit-topo3/r3/bgp_ipv6_routes_down.json @@ -13,7 +13,6 @@ "prefix": "2001:db8:6::", "prefixLen": 64, "network": "2001:db8:6::\/64", - "med": 0, "metric": 0, "weight": 32768, "peerId": "(unspec)", @@ -36,7 +35,6 @@ "prefix": "2001:db8:7::", "prefixLen": 64, "network": "2001:db8:7::\/64", - "med": 0, "metric": 0, "weight": 32768, "peerId": "(unspec)", diff --git a/tests/topotests/bfd-bgp-cbit-topo3/r3/bgpd.conf b/tests/topotests/bfd-bgp-cbit-topo3/r3/bgpd.conf index ea5334029c..51b611b2a7 100644 --- a/tests/topotests/bfd-bgp-cbit-topo3/r3/bgpd.conf +++ b/tests/topotests/bfd-bgp-cbit-topo3/r3/bgpd.conf @@ -1,6 +1,7 @@ debug bgp neighbor-events router bgp 102 bgp router-id 10.254.254.3 + no bgp ebgp-requires-policy timers bgp 20 60 bgp graceful-restart ! simulate NSF machine diff --git a/tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py b/tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py index e2bd80daa8..186dac31a0 100755 --- a/tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py +++ b/tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py @@ -33,7 +33,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -44,23 +44,26 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class BFDTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 4 routers. for routern in range(1, 4): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) def setup_module(mod): "Sets up the pytest environment" @@ -71,16 +74,13 @@ def setup_module(mod): for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)), + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)), ) router.load_config( - TopoRouter.RD_BFD, - os.path.join(CWD, '{}/bfdd.conf'.format(rname)) + TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) # Initialize all routers. @@ -90,10 +90,11 @@ def setup_module(mod): # daemon exists. for router in router_list.values(): # Check for Version - if router.has_version('<', '5.1'): - tgen.set_error('Unsupported FRR version') + if router.has_version("<", "5.1"): + tgen.set_error("Unsupported FRR version") break + def teardown_module(_mod): "Teardown the pytest environment" tgen = get_topogen() @@ -112,17 +113,17 @@ def test_protocols_convergence(): # Check IPv6 routing tables. logger.info("Checking IPv6 routes for convergence") for router in tgen.routers().values(): - if router.name == 'r2': + if router.name == "r2": continue - json_file = '{}/{}/ipv6_routes.json'.format(CWD, router.name) + json_file = "{}/{}/ipv6_routes.json".format(CWD, router.name) if not os.path.isfile(json_file): - logger.info('skipping file {}'.format(json_file)) + logger.info("skipping file {}".format(json_file)) continue expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ipv6 route json', expected) - _, result = topotest.run_and_expect(test_func, None, count=40, - wait=0.5) + test_func = partial( + topotest.router_json_cmp, router, "show ipv6 route json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg @@ -133,19 +134,21 @@ def test_bfd_connection(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bfd peers to go up') + logger.info("waiting for bfd peers to go up") for router in tgen.routers().values(): - if router.name == 'r2': + if router.name == "r2": continue - json_file = '{}/{}/peers.json'.format(CWD, router.name) + json_file = "{}/{}/peers.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show bfd peers json', expected) - _, result = topotest.run_and_expect(test_func, None, count=16, wait=0.5) + test_func = partial( + topotest.router_json_cmp, router, "show bfd peers json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=32, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg + def test_bfd_loss_intermediate(): """ Assert that BFD notices the bfd link down failure. @@ -155,94 +158,98 @@ def test_bfd_loss_intermediate(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('removing IPv6 address from r2 to simulate loss of connectivity') + logger.info("removing IPv6 address from r2 to simulate loss of connectivity") # Disable r2-eth0 ipv6 address - cmd = 'vtysh -c \"configure terminal\" -c \"interface r2-eth1\" -c "no ipv6 address 2001:db8:4::2/64\"' - tgen.net['r2'].cmd(cmd) - + cmd = 'vtysh -c "configure terminal" -c "interface r2-eth1" -c "no ipv6 address 2001:db8:4::2/64"' + tgen.net["r2"].cmd(cmd) + # Wait the minimum time we can before checking that BGP/BFD # converged. - logger.info('waiting for BFD converge down') + logger.info("waiting for BFD converge down") # Check that BGP converged quickly. for router in tgen.routers().values(): - if router.name == 'r2': + if router.name == "r2": continue - json_file = '{}/{}/peers_down.json'.format(CWD, router.name) + json_file = "{}/{}/peers_down.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show bfd peers json', expected) - _, result = topotest.run_and_expect(test_func, None, count=16, wait=0.5) + test_func = partial( + topotest.router_json_cmp, router, "show bfd peers json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=32, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg - logger.info('waiting for BGP entries to become stale') + logger.info("waiting for BGP entries to become stale") for router in tgen.routers().values(): - if router.name == 'r2': + if router.name == "r2": continue - json_file = '{}/{}/bgp_ipv6_routes_down.json'.format(CWD, router.name) + json_file = "{}/{}/bgp_ipv6_routes_down.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show bgp ipv6 json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show bgp ipv6 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=50, wait=1) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg logger.info("Checking IPv6 routes on r1 should still be present") for router in tgen.routers().values(): - if router.name == 'r2': + if router.name == "r2": continue - if router.name == 'r3': + if router.name == "r3": continue - json_file = '{}/r1/ipv6_routes.json'.format(CWD) + json_file = "{}/r1/ipv6_routes.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ipv6 route json', expected) - _, result = topotest.run_and_expect(test_func, None, count=30, - wait=0.5) + test_func = partial( + topotest.router_json_cmp, router, "show ipv6 route json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg + def test_bfd_comes_back_again(): """ Assert that BFD notices the bfd link up and that ipv6 entries appear back """ tgen = get_topogen() - logger.info('re-adding IPv6 address from r2 to simulate connectivity is back') + logger.info("re-adding IPv6 address from r2 to simulate connectivity is back") # adds back r2-eth0 ipv6 address - cmd = 'vtysh -c \"configure terminal\" -c \"interface r2-eth1\" -c "ipv6 address 2001:db8:4::2/64\"' - tgen.net['r2'].cmd(cmd) + cmd = 'vtysh -c "configure terminal" -c "interface r2-eth1" -c "ipv6 address 2001:db8:4::2/64"' + tgen.net["r2"].cmd(cmd) # Wait the minimum time we can before checking that BGP/BFD # converged. - logger.info('waiting for BFD to converge up') + logger.info("waiting for BFD to converge up") # Check that BGP converged quickly. for router in tgen.routers().values(): - if router.name == 'r2': + if router.name == "r2": continue - json_file = '{}/{}/peers.json'.format(CWD, router.name) + json_file = "{}/{}/peers.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show bfd peers json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show bfd peers json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=16, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg - + def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bfd-topo1/r1/bfdd.conf b/tests/topotests/bfd-topo1/r1/bfdd.conf index 131b01f0d9..4102000337 100644 --- a/tests/topotests/bfd-topo1/r1/bfdd.conf +++ b/tests/topotests/bfd-topo1/r1/bfdd.conf @@ -1,3 +1,8 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! bfd peer 192.168.0.2 echo-mode diff --git a/tests/topotests/bfd-topo1/r1/bgp_prefixes.json b/tests/topotests/bfd-topo1/r1/bgp_prefixes.json index 4b2cc1ad62..1262f5e984 100644 --- a/tests/topotests/bfd-topo1/r1/bgp_prefixes.json +++ b/tests/topotests/bfd-topo1/r1/bgp_prefixes.json @@ -2,7 +2,7 @@ "routes": { "10.254.254.2/32": [ { - "aspath": "102", + "path": "102", "prefix": "10.254.254.2", "valid": true, "peerId": "192.168.0.2", @@ -18,7 +18,7 @@ ], "10.254.254.3/32": [ { - "aspath": "102 103", + "path": "102 103", "prefix": "10.254.254.3", "valid": true, "peerId": "192.168.0.2", @@ -34,7 +34,7 @@ ], "10.254.254.4/32": [ { - "aspath": "102 104", + "path": "102 104", "prefix": "10.254.254.4", "valid": true, "peerId": "192.168.0.2", diff --git a/tests/topotests/bfd-topo1/r1/bgpd.conf b/tests/topotests/bfd-topo1/r1/bgpd.conf index 78a5611844..87f20d29e9 100644 --- a/tests/topotests/bfd-topo1/r1/bgpd.conf +++ b/tests/topotests/bfd-topo1/r1/bgpd.conf @@ -1,4 +1,5 @@ router bgp 101 + no bgp ebgp-requires-policy neighbor 192.168.0.2 remote-as 102 neighbor 192.168.0.2 bfd address-family ipv4 unicast diff --git a/tests/topotests/bfd-topo1/r2/bfdd.conf b/tests/topotests/bfd-topo1/r2/bfdd.conf index cb4357172b..412450ca1e 100644 --- a/tests/topotests/bfd-topo1/r2/bfdd.conf +++ b/tests/topotests/bfd-topo1/r2/bfdd.conf @@ -1,3 +1,8 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! bfd peer 192.168.0.1 receive-interval 1000 diff --git a/tests/topotests/bfd-topo1/r2/bgp_prefixes.json b/tests/topotests/bfd-topo1/r2/bgp_prefixes.json index 39f3c0a835..0d47c0fc30 100644 --- a/tests/topotests/bfd-topo1/r2/bgp_prefixes.json +++ b/tests/topotests/bfd-topo1/r2/bgp_prefixes.json @@ -2,7 +2,7 @@ "routes": { "10.254.254.1/32": [ { - "aspath": "101", + "path": "101", "prefix": "10.254.254.1", "valid": true, "peerId": "192.168.0.1", @@ -18,7 +18,7 @@ ], "10.254.254.3/32": [ { - "aspath": "103", + "path": "103", "prefix": "10.254.254.3", "valid": true, "peerId": "192.168.1.1", @@ -34,7 +34,7 @@ ], "10.254.254.4/32": [ { - "aspath": "104", + "path": "104", "prefix": "10.254.254.4", "valid": true, "peerId": "192.168.2.1", diff --git a/tests/topotests/bfd-topo1/r2/bgpd.conf b/tests/topotests/bfd-topo1/r2/bgpd.conf index af10cfaf40..f87e12f0a7 100644 --- a/tests/topotests/bfd-topo1/r2/bgpd.conf +++ b/tests/topotests/bfd-topo1/r2/bgpd.conf @@ -1,4 +1,5 @@ router bgp 102 + no bgp ebgp-requires-policy neighbor 192.168.0.1 remote-as 101 neighbor 192.168.0.1 bfd neighbor 192.168.1.1 remote-as 103 diff --git a/tests/topotests/bfd-topo1/r3/bfdd.conf b/tests/topotests/bfd-topo1/r3/bfdd.conf index a5d38c8162..4cf20bb904 100644 --- a/tests/topotests/bfd-topo1/r3/bfdd.conf +++ b/tests/topotests/bfd-topo1/r3/bfdd.conf @@ -1,3 +1,8 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! bfd peer 192.168.1.2 echo-interval 100 diff --git a/tests/topotests/bfd-topo1/r3/bgp_prefixes.json b/tests/topotests/bfd-topo1/r3/bgp_prefixes.json index c92d4e052a..36fca17bbf 100644 --- a/tests/topotests/bfd-topo1/r3/bgp_prefixes.json +++ b/tests/topotests/bfd-topo1/r3/bgp_prefixes.json @@ -2,7 +2,7 @@ "routes": { "10.254.254.1/32": [ { - "aspath": "102 101", + "path": "102 101", "prefix": "10.254.254.1", "valid": true, "peerId": "192.168.1.2", @@ -18,7 +18,7 @@ ], "10.254.254.2/32": [ { - "aspath": "102", + "path": "102", "prefix": "10.254.254.2", "valid": true, "peerId": "192.168.1.2", @@ -34,7 +34,7 @@ ], "10.254.254.4/32": [ { - "aspath": "102 104", + "path": "102 104", "prefix": "10.254.254.4", "valid": true, "peerId": "192.168.1.2", diff --git a/tests/topotests/bfd-topo1/r3/bgpd.conf b/tests/topotests/bfd-topo1/r3/bgpd.conf index 041fd7a759..0340e067f1 100644 --- a/tests/topotests/bfd-topo1/r3/bgpd.conf +++ b/tests/topotests/bfd-topo1/r3/bgpd.conf @@ -1,4 +1,5 @@ router bgp 103 + no bgp ebgp-requires-policy neighbor 192.168.1.2 remote-as 102 neighbor 192.168.1.2 bfd address-family ipv4 unicast diff --git a/tests/topotests/bfd-topo1/r4/bfdd.conf b/tests/topotests/bfd-topo1/r4/bfdd.conf index 029dfba355..34b88c9a35 100644 --- a/tests/topotests/bfd-topo1/r4/bfdd.conf +++ b/tests/topotests/bfd-topo1/r4/bfdd.conf @@ -1,3 +1,8 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! bfd peer 192.168.2.2 transmit-interval 2000 diff --git a/tests/topotests/bfd-topo1/r4/bgp_prefixes.json b/tests/topotests/bfd-topo1/r4/bgp_prefixes.json index cc8510dd61..efe7d47b1a 100644 --- a/tests/topotests/bfd-topo1/r4/bgp_prefixes.json +++ b/tests/topotests/bfd-topo1/r4/bgp_prefixes.json @@ -2,7 +2,7 @@ "routes": { "10.254.254.1/32": [ { - "aspath": "102 101", + "path": "102 101", "prefix": "10.254.254.1", "valid": true, "peerId": "192.168.2.2", @@ -18,7 +18,7 @@ ], "10.254.254.2/32": [ { - "aspath": "102", + "path": "102", "prefix": "10.254.254.2", "valid": true, "peerId": "192.168.2.2", @@ -34,7 +34,7 @@ ], "10.254.254.3/32": [ { - "aspath": "102 103", + "path": "102 103", "prefix": "10.254.254.3", "valid": true, "peerId": "192.168.2.2", diff --git a/tests/topotests/bfd-topo1/r4/bgpd.conf b/tests/topotests/bfd-topo1/r4/bgpd.conf index 9c504699ba..980d927e97 100644 --- a/tests/topotests/bfd-topo1/r4/bgpd.conf +++ b/tests/topotests/bfd-topo1/r4/bgpd.conf @@ -1,4 +1,5 @@ router bgp 104 + no bgp ebgp-requires-policy neighbor 192.168.2.2 remote-as 102 neighbor 192.168.2.2 bfd address-family ipv4 unicast diff --git a/tests/topotests/bfd-topo1/test_bfd_topo1.py b/tests/topotests/bfd-topo1/test_bfd_topo1.py index 4fd4f97436..e1865dc5a8 100644 --- a/tests/topotests/bfd-topo1/test_bfd_topo1.py +++ b/tests/topotests/bfd-topo1/test_bfd_topo1.py @@ -34,7 +34,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -45,27 +45,29 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class BFDTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 4 routers for routern in range(1, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r4']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r4"]) def setup_module(mod): @@ -76,16 +78,13 @@ def setup_module(mod): router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BFD, - os.path.join(CWD, '{}/bfdd.conf'.format(rname)) + TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) # Initialize all routers. @@ -95,8 +94,8 @@ def setup_module(mod): # daemon exists. for router in router_list.values(): # Check for Version - if router.has_version('<', '5.1'): - tgen.set_error('Unsupported FRR version') + if router.has_version("<", "5.1"): + tgen.set_error("Unsupported FRR version") break @@ -112,14 +111,15 @@ def test_bfd_connection(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bfd peers to go up') + logger.info("waiting for bfd peers to go up") for router in tgen.routers().values(): - json_file = '{}/{}/peers.json'.format(CWD, router.name) + json_file = "{}/{}/peers.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show bfd peers json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show bfd peers json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=8, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg @@ -131,15 +131,16 @@ def test_bgp_convergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bgp peers to go up') + logger.info("waiting for bgp peers to go up") for router in tgen.routers().values(): - ref_file = '{}/{}/bgp_summary.json'.format(CWD, router.name) + ref_file = "{}/{}/bgp_summary.json".format(CWD, router.name) expected = json.loads(open(ref_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ip bgp summary json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show ip bgp summary json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=125, wait=1.0) - assertmsg = '{}: bgp did not converge'.format(router.name) + assertmsg = "{}: bgp did not converge".format(router.name) assert res is None, assertmsg @@ -149,15 +150,16 @@ def test_bgp_fast_convergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bgp peers converge') + logger.info("waiting for bgp peers converge") for router in tgen.routers().values(): - ref_file = '{}/{}/bgp_prefixes.json'.format(CWD, router.name) + ref_file = "{}/{}/bgp_prefixes.json".format(CWD, router.name) expected = json.loads(open(ref_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ip bgp json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show ip bgp json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=40, wait=0.5) - assertmsg = '{}: bgp did not converge'.format(router.name) + assertmsg = "{}: bgp did not converge".format(router.name) assert res is None, assertmsg @@ -171,29 +173,30 @@ def test_bfd_fast_convergence(): pytest.skip(tgen.errors) # Disable r1-eth0 link. - tgen.gears['r1'].link_enable('r1-eth0', enabled=False) + tgen.gears["r1"].link_enable("r1-eth0", enabled=False) # Wait the minimum time we can before checking that BGP/BFD # converged. - logger.info('waiting for BFD converge') + logger.info("waiting for BFD converge") # Check that BGP converged quickly. for router in tgen.routers().values(): - json_file = '{}/{}/peers.json'.format(CWD, router.name) + json_file = "{}/{}/peers.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) # Load the same file as previous test, but expect R1 to be down. - if router.name == 'r1': + if router.name == "r1": for peer in expected: - if peer['peer'] == '192.168.0.2': - peer['status'] = 'down' + if peer["peer"] == "192.168.0.2": + peer["status"] = "down" else: for peer in expected: - if peer['peer'] == '192.168.0.1': - peer['status'] = 'down' + if peer["peer"] == "192.168.0.1": + peer["status"] = "down" - test_func = partial(topotest.router_json_cmp, - router, 'show bfd peers json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show bfd peers json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=20, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert res is None, assertmsg @@ -205,31 +208,27 @@ def test_bgp_fast_reconvergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for BGP re convergence') + logger.info("waiting for BGP re convergence") # Check that BGP converged quickly. for router in tgen.routers().values(): - ref_file = '{}/{}/bgp_prefixes.json'.format(CWD, router.name) + ref_file = "{}/{}/bgp_prefixes.json".format(CWD, router.name) expected = json.loads(open(ref_file).read()) # Load the same file as previous test, but set networks to None # to test absence. - if router.name == 'r1': - expected['routes']['10.254.254.2/32'] = None - expected['routes']['10.254.254.3/32'] = None - expected['routes']['10.254.254.4/32'] = None + if router.name == "r1": + expected["routes"]["10.254.254.2/32"] = None + expected["routes"]["10.254.254.3/32"] = None + expected["routes"]["10.254.254.4/32"] = None else: - expected['routes']['10.254.254.1/32'] = None - - test_func = partial(topotest.router_json_cmp, - router, 'show ip bgp json', expected) - _, res = topotest.run_and_expect( - test_func, - None, - count=3, - wait=1 + expected["routes"]["10.254.254.1/32"] = None + + test_func = partial( + topotest.router_json_cmp, router, "show ip bgp json", expected ) - assertmsg = '{}: bgp did not converge'.format(router.name) + _, res = topotest.run_and_expect(test_func, None, count=3, wait=1) + assertmsg = "{}: bgp did not converge".format(router.name) assert res is None, assertmsg @@ -237,11 +236,11 @@ def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bfd-topo2/r1/bfdd.conf b/tests/topotests/bfd-topo2/r1/bfdd.conf index 5c2571bdbd..f03135021e 100644 --- a/tests/topotests/bfd-topo2/r1/bfdd.conf +++ b/tests/topotests/bfd-topo2/r1/bfdd.conf @@ -1,3 +1,8 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! bfd peer 2001:db8:4::1 multihop local-address 2001:db8:1::1 no shutdown diff --git a/tests/topotests/bfd-topo2/r1/bgpd.conf b/tests/topotests/bfd-topo2/r1/bgpd.conf index 1623b4578b..4d96bec2cb 100644 --- a/tests/topotests/bfd-topo2/r1/bgpd.conf +++ b/tests/topotests/bfd-topo2/r1/bgpd.conf @@ -1,5 +1,6 @@ router bgp 101 bgp router-id 10.254.254.1 + no bgp ebgp-requires-policy neighbor r2g peer-group neighbor r2g remote-as external neighbor r2g bfd diff --git a/tests/topotests/bfd-topo2/r2/bfdd.conf b/tests/topotests/bfd-topo2/r2/bfdd.conf new file mode 100644 index 0000000000..f35e772790 --- /dev/null +++ b/tests/topotests/bfd-topo2/r2/bfdd.conf @@ -0,0 +1,5 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! diff --git a/tests/topotests/bfd-topo2/r2/bgpd.conf b/tests/topotests/bfd-topo2/r2/bgpd.conf index bf42d21812..4d02fc4f29 100644 --- a/tests/topotests/bfd-topo2/r2/bgpd.conf +++ b/tests/topotests/bfd-topo2/r2/bgpd.conf @@ -1,5 +1,6 @@ router bgp 102 bgp router-id 10.254.254.2 + no bgp ebgp-requires-policy neighbor r2g peer-group neighbor r2g remote-as external neighbor r2g bfd diff --git a/tests/topotests/bfd-topo2/r3/bfdd.conf b/tests/topotests/bfd-topo2/r3/bfdd.conf new file mode 100644 index 0000000000..f35e772790 --- /dev/null +++ b/tests/topotests/bfd-topo2/r3/bfdd.conf @@ -0,0 +1,5 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! diff --git a/tests/topotests/bfd-topo2/r4/bfdd.conf b/tests/topotests/bfd-topo2/r4/bfdd.conf index fdb4412446..0173dc9be2 100644 --- a/tests/topotests/bfd-topo2/r4/bfdd.conf +++ b/tests/topotests/bfd-topo2/r4/bfdd.conf @@ -1,3 +1,8 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! bfd peer 2001:db8:1::1 multihop local-address 2001:db8:4::1 no shutdown diff --git a/tests/topotests/bfd-topo2/test_bfd_topo2.py b/tests/topotests/bfd-topo2/test_bfd_topo2.py index 773db129f0..3e87e8485a 100644 --- a/tests/topotests/bfd-topo2/test_bfd_topo2.py +++ b/tests/topotests/bfd-topo2/test_bfd_topo2.py @@ -35,7 +35,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -49,25 +49,26 @@ from mininet.topo import Topo class BFDTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 4 routers. for routern in range(1, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r4']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r4"]) def setup_module(mod): @@ -78,24 +79,19 @@ def setup_module(mod): router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BFD, - os.path.join(CWD, '{}/bfdd.conf'.format(rname)) + TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_OSPF, - os.path.join(CWD, '{}/ospfd.conf'.format(rname)) + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_OSPF6, - os.path.join(CWD, '{}/ospf6d.conf'.format(rname)) + TopoRouter.RD_OSPF6, os.path.join(CWD, "{}/ospf6d.conf".format(rname)) ) # Initialize all routers. @@ -105,8 +101,8 @@ def setup_module(mod): # daemon exists. for router in router_list.values(): # Check for Version - if router.has_version('<', '5.1'): - tgen.set_error('Unsupported FRR version') + if router.has_version("<", "5.1"): + tgen.set_error("Unsupported FRR version") break @@ -128,32 +124,32 @@ def test_protocols_convergence(): # Check IPv4 routing tables. logger.info("Checking IPv4 routes for convergence") for router in tgen.routers().values(): - json_file = '{}/{}/ipv4_routes.json'.format(CWD, router.name) + json_file = "{}/{}/ipv4_routes.json".format(CWD, router.name) if not os.path.isfile(json_file): - logger.info('skipping file {}'.format(json_file)) + logger.info("skipping file {}".format(json_file)) continue expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ip route json', expected) - _, result = topotest.run_and_expect(test_func, None, count=160, - wait=0.5) + test_func = partial( + topotest.router_json_cmp, router, "show ip route json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg # Check IPv6 routing tables. logger.info("Checking IPv6 routes for convergence") for router in tgen.routers().values(): - json_file = '{}/{}/ipv6_routes.json'.format(CWD, router.name) + json_file = "{}/{}/ipv6_routes.json".format(CWD, router.name) if not os.path.isfile(json_file): - logger.info('skipping file {}'.format(json_file)) + logger.info("skipping file {}".format(json_file)) continue expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ipv6 route json', expected) - _, result = topotest.run_and_expect(test_func, None, count=160, - wait=0.5) + test_func = partial( + topotest.router_json_cmp, router, "show ipv6 route json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg @@ -164,14 +160,15 @@ def test_bfd_connection(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bfd peers to go up') + logger.info("waiting for bfd peers to go up") for router in tgen.routers().values(): - json_file = '{}/{}/peers.json'.format(CWD, router.name) + json_file = "{}/{}/peers.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show bfd peers json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show bfd peers json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=8, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg @@ -181,11 +178,11 @@ def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bfd-vrf-topo1/r1/bfdd.conf b/tests/topotests/bfd-vrf-topo1/r1/bfdd.conf index 3466e6a3ca..5e736a7fcc 100644 --- a/tests/topotests/bfd-vrf-topo1/r1/bfdd.conf +++ b/tests/topotests/bfd-vrf-topo1/r1/bfdd.conf @@ -1,3 +1,8 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! bfd peer 192.168.0.2 vrf r1-cust1 echo-mode diff --git a/tests/topotests/bfd-vrf-topo1/r1/bgp_prefixes.json b/tests/topotests/bfd-vrf-topo1/r1/bgp_prefixes.json index 4b2cc1ad62..1262f5e984 100644 --- a/tests/topotests/bfd-vrf-topo1/r1/bgp_prefixes.json +++ b/tests/topotests/bfd-vrf-topo1/r1/bgp_prefixes.json @@ -2,7 +2,7 @@ "routes": { "10.254.254.2/32": [ { - "aspath": "102", + "path": "102", "prefix": "10.254.254.2", "valid": true, "peerId": "192.168.0.2", @@ -18,7 +18,7 @@ ], "10.254.254.3/32": [ { - "aspath": "102 103", + "path": "102 103", "prefix": "10.254.254.3", "valid": true, "peerId": "192.168.0.2", @@ -34,7 +34,7 @@ ], "10.254.254.4/32": [ { - "aspath": "102 104", + "path": "102 104", "prefix": "10.254.254.4", "valid": true, "peerId": "192.168.0.2", diff --git a/tests/topotests/bfd-vrf-topo1/r1/bgpd.conf b/tests/topotests/bfd-vrf-topo1/r1/bgpd.conf index 7ad4e2bd74..136e53a43f 100644 --- a/tests/topotests/bfd-vrf-topo1/r1/bgpd.conf +++ b/tests/topotests/bfd-vrf-topo1/r1/bgpd.conf @@ -1,4 +1,5 @@ router bgp 101 vrf r1-cust1 + no bgp ebgp-requires-policy neighbor 192.168.0.2 remote-as 102 ! neighbor 192.168.0.2 ebgp-multihop 10 neighbor 192.168.0.2 bfd diff --git a/tests/topotests/bfd-vrf-topo1/r2/bfdd.conf b/tests/topotests/bfd-vrf-topo1/r2/bfdd.conf index 3481ea8c87..94f502c7d9 100644 --- a/tests/topotests/bfd-vrf-topo1/r2/bfdd.conf +++ b/tests/topotests/bfd-vrf-topo1/r2/bfdd.conf @@ -1,3 +1,8 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! bfd peer 192.168.0.1 vrf r2-cust1 receive-interval 1000 diff --git a/tests/topotests/bfd-vrf-topo1/r2/bgp_prefixes.json b/tests/topotests/bfd-vrf-topo1/r2/bgp_prefixes.json index 39f3c0a835..0d47c0fc30 100644 --- a/tests/topotests/bfd-vrf-topo1/r2/bgp_prefixes.json +++ b/tests/topotests/bfd-vrf-topo1/r2/bgp_prefixes.json @@ -2,7 +2,7 @@ "routes": { "10.254.254.1/32": [ { - "aspath": "101", + "path": "101", "prefix": "10.254.254.1", "valid": true, "peerId": "192.168.0.1", @@ -18,7 +18,7 @@ ], "10.254.254.3/32": [ { - "aspath": "103", + "path": "103", "prefix": "10.254.254.3", "valid": true, "peerId": "192.168.1.1", @@ -34,7 +34,7 @@ ], "10.254.254.4/32": [ { - "aspath": "104", + "path": "104", "prefix": "10.254.254.4", "valid": true, "peerId": "192.168.2.1", diff --git a/tests/topotests/bfd-vrf-topo1/r2/bgpd.conf b/tests/topotests/bfd-vrf-topo1/r2/bgpd.conf index 0715ea32a5..2bd13e22b2 100644 --- a/tests/topotests/bfd-vrf-topo1/r2/bgpd.conf +++ b/tests/topotests/bfd-vrf-topo1/r2/bgpd.conf @@ -1,4 +1,5 @@ router bgp 102 vrf r2-cust1 + no bgp ebgp-requires-policy neighbor 192.168.0.1 remote-as 101 neighbor 192.168.0.1 bfd neighbor 192.168.1.1 remote-as 103 diff --git a/tests/topotests/bfd-vrf-topo1/r3/bfdd.conf b/tests/topotests/bfd-vrf-topo1/r3/bfdd.conf index f6921b7818..76910ac927 100644 --- a/tests/topotests/bfd-vrf-topo1/r3/bfdd.conf +++ b/tests/topotests/bfd-vrf-topo1/r3/bfdd.conf @@ -1,3 +1,8 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! bfd peer 192.168.1.2 vrf r3-cust1 echo-interval 100 diff --git a/tests/topotests/bfd-vrf-topo1/r3/bgp_prefixes.json b/tests/topotests/bfd-vrf-topo1/r3/bgp_prefixes.json index c92d4e052a..36fca17bbf 100644 --- a/tests/topotests/bfd-vrf-topo1/r3/bgp_prefixes.json +++ b/tests/topotests/bfd-vrf-topo1/r3/bgp_prefixes.json @@ -2,7 +2,7 @@ "routes": { "10.254.254.1/32": [ { - "aspath": "102 101", + "path": "102 101", "prefix": "10.254.254.1", "valid": true, "peerId": "192.168.1.2", @@ -18,7 +18,7 @@ ], "10.254.254.2/32": [ { - "aspath": "102", + "path": "102", "prefix": "10.254.254.2", "valid": true, "peerId": "192.168.1.2", @@ -34,7 +34,7 @@ ], "10.254.254.4/32": [ { - "aspath": "102 104", + "path": "102 104", "prefix": "10.254.254.4", "valid": true, "peerId": "192.168.1.2", diff --git a/tests/topotests/bfd-vrf-topo1/r3/bgpd.conf b/tests/topotests/bfd-vrf-topo1/r3/bgpd.conf index 277f027d5b..c0cd45f7fe 100644 --- a/tests/topotests/bfd-vrf-topo1/r3/bgpd.conf +++ b/tests/topotests/bfd-vrf-topo1/r3/bgpd.conf @@ -1,4 +1,5 @@ router bgp 103 vrf r3-cust1 + no bgp ebgp-requires-policy neighbor 192.168.1.2 remote-as 102 neighbor 192.168.1.2 bfd address-family ipv4 unicast diff --git a/tests/topotests/bfd-vrf-topo1/r4/bfdd.conf b/tests/topotests/bfd-vrf-topo1/r4/bfdd.conf index a56a3a0d37..63d0da7805 100644 --- a/tests/topotests/bfd-vrf-topo1/r4/bfdd.conf +++ b/tests/topotests/bfd-vrf-topo1/r4/bfdd.conf @@ -1,3 +1,8 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! bfd peer 192.168.2.2 vrf r4-cust1 transmit-interval 2000 diff --git a/tests/topotests/bfd-vrf-topo1/r4/bgp_prefixes.json b/tests/topotests/bfd-vrf-topo1/r4/bgp_prefixes.json index cc8510dd61..efe7d47b1a 100644 --- a/tests/topotests/bfd-vrf-topo1/r4/bgp_prefixes.json +++ b/tests/topotests/bfd-vrf-topo1/r4/bgp_prefixes.json @@ -2,7 +2,7 @@ "routes": { "10.254.254.1/32": [ { - "aspath": "102 101", + "path": "102 101", "prefix": "10.254.254.1", "valid": true, "peerId": "192.168.2.2", @@ -18,7 +18,7 @@ ], "10.254.254.2/32": [ { - "aspath": "102", + "path": "102", "prefix": "10.254.254.2", "valid": true, "peerId": "192.168.2.2", @@ -34,7 +34,7 @@ ], "10.254.254.3/32": [ { - "aspath": "102 103", + "path": "102 103", "prefix": "10.254.254.3", "valid": true, "peerId": "192.168.2.2", diff --git a/tests/topotests/bfd-vrf-topo1/r4/bgpd.conf b/tests/topotests/bfd-vrf-topo1/r4/bgpd.conf index 66bf28587d..fe1185768d 100644 --- a/tests/topotests/bfd-vrf-topo1/r4/bgpd.conf +++ b/tests/topotests/bfd-vrf-topo1/r4/bgpd.conf @@ -1,4 +1,5 @@ router bgp 104 vrf r4-cust1 + no bgp ebgp-requires-policy neighbor 192.168.2.2 remote-as 102 neighbor 192.168.2.2 bfd address-family ipv4 unicast diff --git a/tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py b/tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py index e2933820bd..eb4f0d4a83 100755 --- a/tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py +++ b/tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py @@ -35,7 +35,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -46,27 +46,29 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class BFDTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 4 routers for routern in range(1, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r4']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r4"]) def setup_module(mod): @@ -78,47 +80,50 @@ def setup_module(mod): # check for zebra capability for rname, router in router_list.iteritems(): - if router.check_capability( - TopoRouter.RD_ZEBRA, - '--vrfwnetns' - ) == False: - return pytest.skip('Skipping BFD Topo1 VRF NETNS feature. VRF NETNS backend not available on FRR') - - if os.system('ip netns list') != 0: - return pytest.skip('Skipping BFD Topo1 VRF NETNS Test. NETNS not available on System') - - logger.info('Testing with VRF Namespace support') - - cmds = ['if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi', - 'ip netns add {0}-cust1', - 'ip link set dev {0}-eth0 netns {0}-cust1', - 'ip netns exec {0}-cust1 ifconfig {0}-eth0 up'] - cmds2 = ['ip link set dev {0}-eth1 netns {0}-cust1', - 'ip netns exec {0}-cust1 ifconfig {0}-eth1 up', - 'ip link set dev {0}-eth2 netns {0}-cust1', - 'ip netns exec {0}-cust1 ifconfig {0}-eth2 up'] + if router.check_capability(TopoRouter.RD_ZEBRA, "--vrfwnetns") == False: + return pytest.skip( + "Skipping BFD Topo1 VRF NETNS feature. VRF NETNS backend not available on FRR" + ) + + if os.system("ip netns list") != 0: + return pytest.skip( + "Skipping BFD Topo1 VRF NETNS Test. NETNS not available on System" + ) + + logger.info("Testing with VRF Namespace support") + + cmds = [ + "if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi", + "ip netns add {0}-cust1", + "ip link set dev {0}-eth0 netns {0}-cust1", + "ip netns exec {0}-cust1 ifconfig {0}-eth0 up", + ] + cmds2 = [ + "ip link set dev {0}-eth1 netns {0}-cust1", + "ip netns exec {0}-cust1 ifconfig {0}-eth1 up", + "ip link set dev {0}-eth2 netns {0}-cust1", + "ip netns exec {0}-cust1 ifconfig {0}-eth2 up", + ] for rname, router in router_list.iteritems(): # create VRF rx-cust1 and link rx-eth0 to rx-cust1 for cmd in cmds: output = tgen.net[rname].cmd(cmd.format(rname)) - if rname == 'r2': + if rname == "r2": for cmd in cmds2: output = tgen.net[rname].cmd(cmd.format(rname)) for rname, router in router_list.iteritems(): router.load_config( TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)), - '--vrfwnetns' + os.path.join(CWD, "{}/zebra.conf".format(rname)), + "--vrfwnetns", ) router.load_config( - TopoRouter.RD_BFD, - os.path.join(CWD, '{}/bfdd.conf'.format(rname)) + TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) # Initialize all routers. @@ -128,42 +133,49 @@ def setup_module(mod): # daemon exists. for router in router_list.values(): # Check for Version - if router.has_version('<', '5.1'): - tgen.set_error('Unsupported FRR version') + if router.has_version("<", "5.1"): + tgen.set_error("Unsupported FRR version") break + def teardown_module(_mod): "Teardown the pytest environment" tgen = get_topogen() # move back rx-eth0 to default VRF # delete rx-vrf - cmds = ['ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1', - 'ip netns delete {0}-cust1'] - cmds2 = ['ip netns exec {0}-cust1 ip link set {0}-eth1 netns 1', - 'ip netns exec {0}-cust2 ip link set {0}-eth1 netns 1'] + cmds = [ + "ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1", + "ip netns delete {0}-cust1", + ] + cmds2 = [ + "ip netns exec {0}-cust1 ip link set {0}-eth1 netns 1", + "ip netns exec {0}-cust2 ip link set {0}-eth1 netns 1", + ] router_list = tgen.routers() for rname, router in router_list.iteritems(): - if rname == 'r2': + if rname == "r2": for cmd in cmds2: tgen.net[rname].cmd(cmd.format(rname)) for cmd in cmds: tgen.net[rname].cmd(cmd.format(rname)) tgen.stop_topology() + def test_bfd_connection(): "Assert that the BFD peers can find themselves." tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bfd peers to go up') + logger.info("waiting for bfd peers to go up") for router in tgen.routers().values(): - json_file = '{}/{}/peers.json'.format(CWD, router.name) + json_file = "{}/{}/peers.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show bfd peers json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show bfd peers json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=8, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg @@ -175,15 +187,19 @@ def test_bgp_convergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bgp peers to go up') + logger.info("waiting for bgp peers to go up") for router in tgen.routers().values(): - ref_file = '{}/{}/bgp_summary.json'.format(CWD, router.name) + ref_file = "{}/{}/bgp_summary.json".format(CWD, router.name) expected = json.loads(open(ref_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ip bgp vrf {}-cust1 summary json'.format(router.name), expected) + test_func = partial( + topotest.router_json_cmp, + router, + "show ip bgp vrf {}-cust1 summary json".format(router.name), + expected, + ) _, res = topotest.run_and_expect(test_func, None, count=125, wait=1.0) - assertmsg = '{}: bgp did not converge'.format(router.name) + assertmsg = "{}: bgp did not converge".format(router.name) assert res is None, assertmsg @@ -193,15 +209,19 @@ def test_bgp_fast_convergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bgp peers converge') + logger.info("waiting for bgp peers converge") for router in tgen.routers().values(): - ref_file = '{}/{}/bgp_prefixes.json'.format(CWD, router.name) + ref_file = "{}/{}/bgp_prefixes.json".format(CWD, router.name) expected = json.loads(open(ref_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ip bgp vrf {}-cust1 json'.format(router.name), expected) + test_func = partial( + topotest.router_json_cmp, + router, + "show ip bgp vrf {}-cust1 json".format(router.name), + expected, + ) _, res = topotest.run_and_expect(test_func, None, count=40, wait=0.5) - assertmsg = '{}: bgp did not converge'.format(router.name) + assertmsg = "{}: bgp did not converge".format(router.name) assert res is None, assertmsg @@ -215,30 +235,33 @@ def test_bfd_fast_convergence(): pytest.skip(tgen.errors) # Disable r2-eth0 link - router2 = tgen.gears['r2'] - topotest.interface_set_status(router2, 'r2-eth0', ifaceaction=False, vrf_name='r2-cust1') + router2 = tgen.gears["r2"] + topotest.interface_set_status( + router2, "r2-eth0", ifaceaction=False, vrf_name="r2-cust1" + ) # Wait the minimum time we can before checking that BGP/BFD # converged. - logger.info('waiting for BFD converge') + logger.info("waiting for BFD converge") # Check that BGP converged quickly. for router in tgen.routers().values(): - json_file = '{}/{}/peers.json'.format(CWD, router.name) + json_file = "{}/{}/peers.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) # Load the same file as previous test, but expect R1 to be down. - if router.name == 'r1': + if router.name == "r1": for peer in expected: - if peer['peer'] == '192.168.0.2': - peer['status'] = 'down' + if peer["peer"] == "192.168.0.2": + peer["status"] = "down" else: for peer in expected: - if peer['peer'] == '192.168.0.1': - peer['status'] = 'down' + if peer["peer"] == "192.168.0.1": + peer["status"] = "down" - test_func = partial(topotest.router_json_cmp, - router, 'show bfd peers json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show bfd peers json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=20, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert res is None, assertmsg @@ -250,31 +273,30 @@ def test_bgp_fast_reconvergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for BGP re convergence') + logger.info("waiting for BGP re convergence") # Check that BGP converged quickly. for router in tgen.routers().values(): - ref_file = '{}/{}/bgp_prefixes.json'.format(CWD, router.name) + ref_file = "{}/{}/bgp_prefixes.json".format(CWD, router.name) expected = json.loads(open(ref_file).read()) # Load the same file as previous test, but set networks to None # to test absence. - if router.name == 'r1': - expected['routes']['10.254.254.2/32'] = None - expected['routes']['10.254.254.3/32'] = None - expected['routes']['10.254.254.4/32'] = None + if router.name == "r1": + expected["routes"]["10.254.254.2/32"] = None + expected["routes"]["10.254.254.3/32"] = None + expected["routes"]["10.254.254.4/32"] = None else: - expected['routes']['10.254.254.1/32'] = None - - test_func = partial(topotest.router_json_cmp, - router, 'show ip bgp vrf {}-cust1 json'.format(router.name), expected) - _, res = topotest.run_and_expect( - test_func, - None, - count=3, - wait=1 + expected["routes"]["10.254.254.1/32"] = None + + test_func = partial( + topotest.router_json_cmp, + router, + "show ip bgp vrf {}-cust1 json".format(router.name), + expected, ) - assertmsg = '{}: bgp did not converge'.format(router.name) + _, res = topotest.run_and_expect(test_func, None, count=3, wait=1) + assertmsg = "{}: bgp did not converge".format(router.name) assert res is None, assertmsg @@ -282,11 +304,11 @@ def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py index 43639a81d1..3441d68731 100755 --- a/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py +++ b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py @@ -48,8 +48,8 @@ from copy import deepcopy # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) -sys.path.append(os.path.join(CWD, '../lib/')) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) # Required to instantiate the topology builder class. @@ -59,30 +59,39 @@ from lib.topogen import Topogen, get_topogen from mininet.topo import Topo from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, reset_config_on_routers, create_static_routes, - verify_rib, verify_admin_distance_for_static_routes + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + create_static_routes, + verify_rib, + verify_admin_distance_for_static_routes, ) from lib.topolog import logger from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, verify_router_id, - modify_as_number, verify_as_numbers, clear_bgp_and_verify, - verify_bgp_timers_and_functionality + verify_bgp_convergence, + create_router_bgp, + verify_router_id, + modify_as_number, + verify_as_numbers, + clear_bgp_and_verify, + verify_bgp_timers_and_functionality, ) from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology creation jsonFile = "{}/bgp_basic_functionality.json".format(CWD) try: - with open(jsonFile, 'r') as topoJson: + with open(jsonFile, "r") as topoJson: topo = json.load(topoJson) except IOError: assert False, "Could not read file {}".format(jsonFile) -#Global Variable +# Global Variable KEEPALIVETIMER = 2 HOLDDOWNTIMER = 6 + class CreateTopo(Topo): """ Test BasicTopo - topology 1 @@ -124,8 +133,9 @@ def setup_module(mod): global BGP_CONVERGENCE BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) - assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}". \ - format(BGP_CONVERGENCE) + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format( + BGP_CONVERGENCE + ) logger.info("Running setup_module() done") @@ -140,8 +150,9 @@ def teardown_module(): # Stop toplogy and Remove tmp files tgen.stop_topology() - logger.info("Testsuite end time: {}". - format(time.asctime(time.localtime(time.time())))) + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) logger.info("=" * 40) @@ -157,7 +168,7 @@ def test_modify_and_delete_router_id(request): tgen = get_topogen() if BGP_CONVERGENCE is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -165,59 +176,31 @@ def test_modify_and_delete_router_id(request): # Modify router id input_dict = { - 'r1': { - "bgp": { - 'router_id': '12.12.12.12' - } - }, - 'r2': { - "bgp": { - 'router_id': '22.22.22.22' - } - }, - 'r3': { - "bgp": { - 'router_id': '33.33.33.33' - } - }, + "r1": {"bgp": {"router_id": "12.12.12.12"}}, + "r2": {"bgp": {"router_id": "22.22.22.22"}}, + "r3": {"bgp": {"router_id": "33.33.33.33"}}, } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}".\ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Verifying router id once modified result = verify_router_id(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}".\ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Delete router id input_dict = { - 'r1': { - "bgp": { - 'del_router_id': True - } - }, - 'r2': { - "bgp": { - 'del_router_id': True - } - }, - 'r3': { - "bgp": { - 'del_router_id': True - } - }, + "r1": {"bgp": {"del_router_id": True}}, + "r2": {"bgp": {"del_router_id": True}}, + "r3": {"bgp": {"del_router_id": True}}, } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Verifying router id once deleted # Once router-id is deleted, highest interface ip should become # router-id result = verify_router_id(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -229,41 +212,94 @@ def test_bgp_config_with_4byte_as_number(request): tgen = get_topogen() if BGP_CONVERGENCE is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name write_test_header(tc_name) input_dict = { - "r1": { - "bgp": { - "local_as": 131079 - } - }, - "r2": { - "bgp": { - "local_as": 131079 - } - }, - "r3": { - "bgp": { - "local_as": 131079 - } - }, - "r4": { - "bgp": { - "local_as": 131080 - } - } + "r1": {"bgp": {"local_as": 131079}}, + "r2": {"bgp": {"local_as": 131079}}, + "r3": {"bgp": {"local_as": 131079}}, + "r4": {"bgp": {"local_as": 131080}}, } result = modify_as_number(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) result = verify_as_numbers(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_BGP_config_with_invalid_ASN_p2(request): + """ + Configure BGP with invalid ASN(ex - 0, reserved ASN) and verify test case + ended up with error + """ + + tgen = get_topogen() + global BGP_CONVERGENCE + + if BGP_CONVERGENCE != True: + pytest.skip("skipped because of BGP Convergence failure") + + # test case name + tc_name = request.node.name + write_test_header(tc_name) + + # Api call to modify AS number + input_dict = { + "r1": {"bgp": {"local_as": 0,}}, + "r2": {"bgp": {"local_as": 0,}}, + "r3": {"bgp": {"local_as": 0,}}, + "r4": {"bgp": {"local_as": 64000,}}, + } + result = modify_as_number(tgen, topo, input_dict) + try: + assert result is True + except AssertionError: + logger.info("Expected behaviour: {}".format(result)) + logger.info("BGP config is not created because of invalid ASNs") + + write_test_footer(tc_name) + + +def test_BGP_config_with_2byteAS_and_4byteAS_number_p1(request): + """ + Configure BGP with 4 byte and 2 byte ASN and verify BGP is converged + """ + + tgen = get_topogen() + global BGP_CONVERGENCE + + if BGP_CONVERGENCE != True: + pytest.skip("skipped because of BGP Convergence failure") + + # test case name + tc_name = request.node.name + write_test_header(tc_name) + + # Api call to modify AS number + input_dict = { + "r1": {"bgp": {"local_as": 131079}}, + "r2": {"bgp": {"local_as": 131079}}, + "r3": {"bgp": {"local_as": 131079}}, + "r4": {"bgp": {"local_as": 111}}, + } + result = modify_as_number(tgen, topo, input_dict) + if result != True: + assert False, "Testcase " + tc_name + " :Failed \n Error: {}".format(result) + + result = verify_as_numbers(tgen, topo, input_dict) + if result != True: + assert False, "Testcase " + tc_name + " :Failed \n Error: {}".format(result) + + # Api call verify whether BGP is converged + result = verify_bgp_convergence(tgen, topo) + if result != True: + assert False, "Testcase " + tc_name + " :Failed \n Error: {}".format(result) write_test_footer(tc_name) @@ -275,7 +311,7 @@ def test_bgp_timers_functionality(request): tgen = get_topogen() if BGP_CONVERGENCE is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -293,10 +329,10 @@ def test_bgp_timers_functionality(request): "unicast": { "neighbor": { "r2": { - "dest_link":{ + "dest_link": { "r1": { "keepalivetimer": KEEPALIVETIMER, - "holddowntimer": HOLDDOWNTIMER + "holddowntimer": HOLDDOWNTIMER, } } } @@ -308,16 +344,14 @@ def test_bgp_timers_functionality(request): } } result = create_router_bgp(tgen, topo, deepcopy(input_dict)) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Api call to clear bgp, so timer modification would take place - clear_bgp_and_verify(tgen, topo, 'r1') + clear_bgp_and_verify(tgen, topo, "r1") # Verifying bgp timers functionality result = verify_bgp_timers_and_functionality(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -327,7 +361,7 @@ def test_static_routes(request): tgen = get_topogen() if BGP_CONVERGENCE is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -339,17 +373,18 @@ def test_static_routes(request): # Api call to create static routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 9, - "admin_distance": 100, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + { + "network": "10.0.20.1/32", + "no_of_ip": 9, + "admin_distance": 100, + "next_hop": "10.0.0.2", + } + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes input_dict_1 = { @@ -360,7 +395,7 @@ def test_static_routes(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -370,17 +405,16 @@ def test_static_routes(request): } result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes - dut = 'r3' - protocol = 'bgp' - next_hop = ['10.0.0.2', '10.0.0.5'] - result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop, - protocol=protocol) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + dut = "r3" + protocol = "bgp" + next_hop = ["10.0.0.2", "10.0.0.5"] + result = verify_rib( + tgen, "ipv4", dut, input_dict, next_hop=next_hop, protocol=protocol + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -390,7 +424,7 @@ def test_admin_distance_for_existing_static_routes(request): tgen = get_topogen() if BGP_CONVERGENCE is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -401,21 +435,21 @@ def test_admin_distance_for_existing_static_routes(request): input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "admin_distance": 10, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + { + "network": "10.0.20.1/32", + "admin_distance": 10, + "next_hop": "10.0.0.2", + } + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Verifying admin distance once modified result = verify_admin_distance_for_static_routes(tgen, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -425,7 +459,7 @@ def test_advertise_network_using_network_command(request): tgen = get_topogen() if BGP_CONVERGENCE is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -442,14 +476,8 @@ def test_advertise_network_using_network_command(request): "ipv4": { "unicast": { "advertise_networks": [ - { - "network": "20.0.0.0/32", - "no_of_network": 10 - }, - { - "network": "30.0.0.0/32", - "no_of_network": 10 - } + {"network": "20.0.0.0/32", "no_of_network": 10}, + {"network": "30.0.0.0/32", "no_of_network": 10}, ] } } @@ -459,15 +487,13 @@ def test_advertise_network_using_network_command(request): } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes - dut = 'r2' + dut = "r2" protocol = "bgp" - result = verify_rib(tgen, 'ipv4', dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -480,7 +506,7 @@ def test_clear_bgp_and_verify(request): tgen = get_topogen() if BGP_CONVERGENCE is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -490,9 +516,8 @@ def test_clear_bgp_and_verify(request): reset_config_on_routers(tgen) # clear ip bgp - result = clear_bgp_and_verify(tgen, topo, 'r1') - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + result = clear_bgp_and_verify(tgen, topo, "r1") + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -509,7 +534,7 @@ def test_bgp_with_loopback_interface(request): tgen = get_topogen() if BGP_CONVERGENCE is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -518,79 +543,51 @@ def test_bgp_with_loopback_interface(request): # Creating configuration from JSON reset_config_on_routers(tgen) - for routerN in sorted(topo['routers'].keys()): - for bgp_neighbor in \ - topo['routers'][routerN]['bgp']['address_family']['ipv4'][ - 'unicast']['neighbor'].keys(): + for routerN in sorted(topo["routers"].keys()): + for bgp_neighbor in topo["routers"][routerN]["bgp"]["address_family"]["ipv4"][ + "unicast" + ]["neighbor"].keys(): # Adding ['source_link'] = 'lo' key:value pair - topo['routers'][routerN]['bgp']['address_family']['ipv4'][ - 'unicast']['neighbor'][bgp_neighbor]["dest_link"] = { - 'lo': { - "source_link": "lo", - } - } + topo["routers"][routerN]["bgp"]["address_family"]["ipv4"]["unicast"][ + "neighbor" + ][bgp_neighbor]["dest_link"] = {"lo": {"source_link": "lo",}} # Creating configuration from JSON build_config_from_json(tgen, topo) input_dict = { "r1": { - "static_routes": [{ - "network": "1.0.2.17/32", - "next_hop": "10.0.0.2" - }, - { - "network": "1.0.3.17/32", - "next_hop": "10.0.0.6" - } + "static_routes": [ + {"network": "1.0.2.17/32", "next_hop": "10.0.0.2"}, + {"network": "1.0.3.17/32", "next_hop": "10.0.0.6"}, ] }, "r2": { - "static_routes": [{ - "network": "1.0.1.17/32", - "next_hop": "10.0.0.1" - }, - { - "network": "1.0.3.17/32", - "next_hop": "10.0.0.10" - } + "static_routes": [ + {"network": "1.0.1.17/32", "next_hop": "10.0.0.1"}, + {"network": "1.0.3.17/32", "next_hop": "10.0.0.10"}, ] }, "r3": { - "static_routes": [{ - "network": "1.0.1.17/32", - "next_hop": "10.0.0.5" - }, - { - "network": "1.0.2.17/32", - "next_hop": "10.0.0.9" - }, - { - "network": "1.0.4.17/32", - "next_hop": "10.0.0.14" - } + "static_routes": [ + {"network": "1.0.1.17/32", "next_hop": "10.0.0.5"}, + {"network": "1.0.2.17/32", "next_hop": "10.0.0.9"}, + {"network": "1.0.4.17/32", "next_hop": "10.0.0.14"}, ] }, - "r4": { - "static_routes": [{ - "network": "1.0.3.17/32", - "next_hop": "10.0.0.13" - }] - } + "r4": {"static_routes": [{"network": "1.0.3.17/32", "next_hop": "10.0.0.13"}]}, } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Api call verify whether BGP is converged result = verify_bgp_convergence(tgen, topo) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp-ecmp-topo1/peer1/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer1/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer1/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer1/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer1/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer1/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer1/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer1/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer10/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer10/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer10/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer10/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer10/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer10/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer10/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer10/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer11/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer11/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer11/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer11/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer11/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer11/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer11/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer11/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer12/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer12/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer12/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer12/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer12/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer12/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer12/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer12/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer13/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer13/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer13/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer13/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer13/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer13/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer13/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer13/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer14/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer14/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer14/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer14/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer14/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer14/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer14/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer14/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer15/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer15/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer15/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer15/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer15/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer15/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer15/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer15/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer16/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer16/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer16/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer16/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer16/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer16/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer16/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer16/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer17/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer17/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer17/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer17/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer17/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer17/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer17/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer17/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer18/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer18/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer18/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer18/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer18/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer18/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer18/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer18/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer19/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer19/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer19/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer19/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer19/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer19/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer19/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer19/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer2/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer2/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer2/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer2/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer2/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer2/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer2/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer2/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer20/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer20/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer20/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer20/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer20/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer20/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer20/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer20/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer3/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer3/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer3/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer3/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer3/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer3/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer3/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer3/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer4/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer4/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer4/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer4/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer4/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer4/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer4/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer4/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer5/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer5/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer5/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer5/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer5/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer5/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer5/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer5/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer6/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer6/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer6/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer6/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer6/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer6/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer6/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer6/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer7/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer7/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer7/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer7/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer7/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer7/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer7/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer7/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer8/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer8/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer8/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer8/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer8/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer8/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer8/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer8/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer9/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer9/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer9/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer9/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer9/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer9/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer9/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer9/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/r1/bgpd.conf b/tests/topotests/bgp-ecmp-topo1/r1/bgpd.conf index 627dc76c1b..d3beb2d320 100644 --- a/tests/topotests/bgp-ecmp-topo1/r1/bgpd.conf +++ b/tests/topotests/bgp-ecmp-topo1/r1/bgpd.conf @@ -5,6 +5,7 @@ log file bgpd.log router bgp 100 bgp router-id 10.0.255.1 bgp bestpath as-path multipath-relax + no bgp ebgp-requires-policy neighbor 10.0.1.101 remote-as 99 neighbor 10.0.1.102 remote-as 99 neighbor 10.0.1.103 remote-as 99 diff --git a/tests/topotests/bgp-ecmp-topo1/r1/summary.txt b/tests/topotests/bgp-ecmp-topo1/r1/summary.txt index bccc483d52..11611d041b 100644 --- a/tests/topotests/bgp-ecmp-topo1/r1/summary.txt +++ b/tests/topotests/bgp-ecmp-topo1/r1/summary.txt @@ -9,121 +9,121 @@ "10.0.1.101":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.1.102":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.1.103":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.1.104":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.1.105":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.106":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.107":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.108":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.109":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.110":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.111":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.112":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.113":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.114":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.115":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.116":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.117":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.118":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.119":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.120":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" } }, diff --git a/tests/topotests/bgp-ecmp-topo1/r1/summary20.txt b/tests/topotests/bgp-ecmp-topo1/r1/summary20.txt index 73ae256abe..f90aedb1ec 100644 --- a/tests/topotests/bgp-ecmp-topo1/r1/summary20.txt +++ b/tests/topotests/bgp-ecmp-topo1/r1/summary20.txt @@ -8,121 +8,121 @@ "10.0.1.101":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.1.102":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.1.103":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.1.104":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.1.105":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.106":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.107":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.108":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.109":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.110":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.111":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.112":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.113":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.114":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.115":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.116":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.117":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.118":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.119":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.120":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" } }, diff --git a/tests/topotests/bgp-ecmp-topo1/test_bgp_ecmp_topo1.py b/tests/topotests/bgp-ecmp-topo1/test_bgp_ecmp_topo1.py index d806226dff..c37f818b0f 100755 --- a/tests/topotests/bgp-ecmp-topo1/test_bgp_ecmp_topo1.py +++ b/tests/topotests/bgp-ecmp-topo1/test_bgp_ecmp_topo1.py @@ -34,7 +34,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -61,23 +61,24 @@ class BGPECMPTopo1(Topo): tgen = get_topogen(self) # Create the BGP router - router = tgen.add_router('r1') + router = tgen.add_router("r1") # Setup Switches - 1 switch per 5 peering routers for swNum in range(1, (total_ebgp_peers + 4) / 5 + 1): - switch = tgen.add_switch('s{}'.format(swNum)) + switch = tgen.add_switch("s{}".format(swNum)) switch.add_link(router) # Add 'total_ebgp_peers' number of eBGP ExaBGP neighbors - for peerNum in range(1, total_ebgp_peers+1): - swNum = ((peerNum - 1) / 5 + 1) + for peerNum in range(1, total_ebgp_peers + 1): + swNum = (peerNum - 1) / 5 + 1 - peer_ip = '10.0.{}.{}'.format(swNum, peerNum + 100) - peer_route = 'via 10.0.{}.1'.format(swNum) - peer = tgen.add_exabgp_peer('peer{}'.format(peerNum), - ip=peer_ip, defaultRoute=peer_route) + peer_ip = "10.0.{}.{}".format(swNum, peerNum + 100) + peer_route = "via 10.0.{}.1".format(swNum) + peer = tgen.add_exabgp_peer( + "peer{}".format(peerNum), ip=peer_ip, defaultRoute=peer_route + ) - switch = tgen.gears['s{}'.format(swNum)] + switch = tgen.gears["s{}".format(swNum)] switch.add_link(peer) @@ -87,6 +88,7 @@ class BGPECMPTopo1(Topo): # ##################################################### + def setup_module(module): tgen = Topogen(BGPECMPTopo1, module.__name__) tgen.start_topology() @@ -95,21 +97,19 @@ def setup_module(module): router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) router.start() # Starting Hosts and init ExaBGP on each of them - topotest.sleep(10, 'starting BGP on all {} peers'.format(total_ebgp_peers)) + topotest.sleep(10, "starting BGP on all {} peers".format(total_ebgp_peers)) peer_list = tgen.exabgp_peers() for pname, peer in peer_list.iteritems(): peer_dir = os.path.join(CWD, pname) - env_file = os.path.join(CWD, 'exabgp.env') + env_file = os.path.join(CWD, "exabgp.env") peer.start(peer_dir, env_file) logger.info(pname) @@ -128,11 +128,11 @@ def test_bgp_convergence(): pytest.skip(tgen.errors) # Expected result - router = tgen.gears['r1'] - if router.has_version('<', '3.0'): - reffile = os.path.join(CWD, 'r1/summary20.txt') + router = tgen.gears["r1"] + if router.has_version("<", "3.0"): + reffile = os.path.join(CWD, "r1/summary20.txt") else: - reffile = os.path.join(CWD, 'r1/summary.txt') + reffile = os.path.join(CWD, "r1/summary.txt") expected = json.loads(open(reffile).read()) @@ -142,18 +142,19 @@ def test_bgp_convergence(): with 'json') and compare with `data` contents. """ output = router.vtysh_cmd(cmd, isjson=True) - if 'ipv4Unicast' in output: - output['ipv4Unicast']['vrfName'] = \ - output['ipv4Unicast']['vrfName'].replace( - 'default', 'Default') - elif 'vrfName' in output: - output['vrfName'] = output['vrfName'].replace('default', 'Default') + if "ipv4Unicast" in output: + output["ipv4Unicast"]["vrfName"] = output["ipv4Unicast"]["vrfName"].replace( + "default", "Default" + ) + elif "vrfName" in output: + output["vrfName"] = output["vrfName"].replace("default", "Default") return topotest.json_cmp(output, data) test_func = functools.partial( - _output_summary_cmp, router, 'show ip bgp summary json', expected) + _output_summary_cmp, router, "show ip bgp summary json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assertmsg = 'BGP router network did not converge' + assertmsg = "BGP router network did not converge" assert res is None, assertmsg @@ -165,26 +166,26 @@ def test_bgp_ecmp(): pytest.skip(tgen.errors) expect = { - 'routerId': '10.0.255.1', - 'routes': { - }, + "routerId": "10.0.255.1", + "routes": {}, } for net in range(1, 5): for subnet in range(0, 10): - netkey = '10.20{}.{}.0/24'.format(net, subnet) - expect['routes'][netkey] = [] + netkey = "10.20{}.{}.0/24".format(net, subnet) + expect["routes"][netkey] = [] for _ in range(0, 10): - peer = {'multipath': True, 'valid': True} - expect['routes'][netkey].append(peer) + peer = {"multipath": True, "valid": True} + expect["routes"][netkey].append(peer) - test_func = functools.partial(topotest.router_json_cmp, - tgen.gears['r1'], 'show ip bgp json', expect) + test_func = functools.partial( + topotest.router_json_cmp, tgen.gears["r1"], "show ip bgp json", expect + ) _, res = topotest.run_and_expect(test_func, None, count=10, wait=0.5) assertmsg = 'expected multipath routes in "show ip bgp" output' assert res is None, assertmsg -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py b/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py index 4b9f419bf2..fd3e7fd7d3 100755 --- a/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py +++ b/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py @@ -41,10 +41,11 @@ import sys import time import json import pytest + # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) -sys.path.append(os.path.join(CWD, '../../')) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -52,15 +53,17 @@ from lib.topogen import Topogen, get_topogen from mininet.topo import Topo from lib.common_config import ( - start_topology, write_test_header, + start_topology, + write_test_header, write_test_footer, - verify_rib, create_static_routes, check_address_types, - interface_status, reset_config_on_routers + verify_rib, + create_static_routes, + check_address_types, + interface_status, + reset_config_on_routers, ) from lib.topolog import logger -from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp_and_verify) +from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology and configuration creation @@ -130,27 +133,32 @@ def setup_module(mod): ADDR_TYPES = check_address_types() BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) - assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:" - " {}".format(BGP_CONVERGENCE)) - - link_data = [val for links, val in - topo["routers"]["r2"]["links"].iteritems() - if "r3" in links] + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format( + BGP_CONVERGENCE + ) + + link_data = [ + val + for links, val in topo["routers"]["r2"]["links"].iteritems() + if "r3" in links + ] for adt in ADDR_TYPES: NEXT_HOPS[adt] = [val[adt].split("/")[0] for val in link_data] if adt == "ipv4": - NEXT_HOPS[adt] = sorted( - NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2])) + NEXT_HOPS[adt] = sorted(NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2])) elif adt == "ipv6": NEXT_HOPS[adt] = sorted( - NEXT_HOPS[adt], key=lambda x: int(x.split(':')[-3], 16)) + NEXT_HOPS[adt], key=lambda x: int(x.split(":")[-3], 16) + ) INTF_LIST_R2 = [val["interface"].split("/")[0] for val in link_data] INTF_LIST_R2 = sorted(INTF_LIST_R2, key=lambda x: int(x.split("eth")[1])) - link_data = [val for links, val in - topo["routers"]["r3"]["links"].iteritems() - if "r2" in links] + link_data = [ + val + for links, val in topo["routers"]["r3"]["links"].iteritems() + if "r2" in links + ] INTF_LIST_R3 = [val["interface"].split("/")[0] for val in link_data] INTF_LIST_R3 = sorted(INTF_LIST_R3, key=lambda x: int(x.split("eth")[1])) @@ -179,40 +187,27 @@ def static_or_nw(tgen, topo, tc_name, test_type, dut): input_dict_static = { dut: { "static_routes": [ - { - "network": NETWORK["ipv4"], - "next_hop": NEXT_HOP_IP["ipv4"] - }, - { - "network": NETWORK["ipv6"], - "next_hop": NEXT_HOP_IP["ipv6"] - } + {"network": NETWORK["ipv4"], "next_hop": NEXT_HOP_IP["ipv4"]}, + {"network": NETWORK["ipv6"], "next_hop": NEXT_HOP_IP["ipv6"]}, ] } } logger.info("Configuring static route on router %s", dut) result = create_static_routes(tgen, input_dict_static) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_2 = { dut: { "bgp": { "address_family": { "ipv4": { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } + "unicast": {"redistribute": [{"redist_type": "static"}]} }, "ipv6": { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } - } + "unicast": {"redistribute": [{"redist_type": "static"}]} + }, } } } @@ -221,7 +216,8 @@ def static_or_nw(tgen, topo, tc_name, test_type, dut): logger.info("Configuring redistribute static route on router %s", dut) result = create_router_bgp(tgen, topo, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) elif test_type == "advertise_nw": input_dict_nw = { @@ -230,28 +226,29 @@ def static_or_nw(tgen, topo, tc_name, test_type, dut): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": NETWORK["ipv4"]} - ] + "advertise_networks": [{"network": NETWORK["ipv4"]}] } }, "ipv6": { "unicast": { - "advertise_networks": [ - {"network": NETWORK["ipv6"]} - ] + "advertise_networks": [{"network": NETWORK["ipv6"]}] } - } + }, } } } } - logger.info("Advertising networks %s %s from router %s", - NETWORK["ipv4"], NETWORK["ipv6"], dut) + logger.info( + "Advertising networks %s %s from router %s", + NETWORK["ipv4"], + NETWORK["ipv6"], + dut, + ) result = create_router_bgp(tgen, topo, input_dict_nw) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) @pytest.mark.parametrize("ecmp_num", ["8", "16", "32"]) @@ -274,20 +271,8 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type): "r3": { "bgp": { "address_family": { - "ipv4": { - "unicast": { - "maximum_paths": { - "ebgp": ecmp_num, - } - } - }, - "ipv6": { - "unicast": { - "maximum_paths": { - "ebgp": ecmp_num, - } - } - } + "ipv4": {"unicast": {"maximum_paths": {"ebgp": ecmp_num,}}}, + "ipv6": {"unicast": {"maximum_paths": {"ebgp": ecmp_num,}}}, } } } @@ -295,35 +280,33 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type): logger.info("Configuring bgp maximum-paths %s on router r3", ecmp_num) result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" for addr_type in ADDR_TYPES: - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) -def test_ecmp_after_clear_bgp(request): +@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"]) +def test_ecmp_after_clear_bgp(request, test_type): """ Verify BGP table and RIB in DUT after clear BGP routes and neighbors""" tc_name = request.node.name @@ -336,46 +319,41 @@ def test_ecmp_after_clear_bgp(request): dut = "r3" protocol = "bgp" - static_or_nw(tgen, topo, tc_name, "redist_static", "r2") + static_or_nw(tgen, topo, tc_name, test_type, "r2") for addr_type in ADDR_TYPES: - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Clear bgp result = clear_bgp_and_verify(tgen, topo, dut) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -395,22 +373,20 @@ def test_ecmp_remove_redistribute_static(request): # Verifying RIB routes dut = "r3" protocol = "bgp" - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_2 = { "r2": { @@ -418,22 +394,14 @@ def test_ecmp_remove_redistribute_static(request): "address_family": { "ipv4": { "unicast": { - "redistribute": [{ - "redist_type": "static", - "delete": True - - }] + "redistribute": [{"redist_type": "static", "delete": True}] } }, "ipv6": { "unicast": { - "redistribute": [{ - "redist_type": "static", - "delete": True - - }] + "redistribute": [{"redist_type": "static", "delete": True}] } - } + }, } } } @@ -441,88 +409,68 @@ def test_ecmp_remove_redistribute_static(request): logger.info("Remove redistribute static") result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: # Verifying RIB routes dut = "r3" protocol = "bgp" - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3 are deleted", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=[], protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=[], + protocol=protocol, + expected=False, + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name) logger.info("Enable redistribute static") input_dict_2 = { "r2": { "bgp": { "address_family": { - "ipv4": { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } - }, - "ipv6": { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } - } + "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}}, + "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}}, } } } } result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: # Verifying RIB routes dut = "r3" protocol = "bgp" - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) -def test_ecmp_shut_bgp_neighbor(request): - """ - Disable/Shut selected paths nexthops and verify other next are installed in - the RIB of DUT. Enable interfaces and verify RIB count. - - Shut BGP neigbors one by one and verify BGP and routing table updated - accordingly in DUT - """ +@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"]) +def test_ecmp_shut_bgp_neighbor(request, test_type): + """ Shut BGP neigbors one by one and verify BGP and routing table updated + accordingly in DUT """ tc_name = request.node.name write_test_header(tc_name) @@ -534,40 +482,33 @@ def test_ecmp_shut_bgp_neighbor(request): protocol = "bgp" reset_config_on_routers(tgen) - static_or_nw(tgen, topo, tc_name, "redist_static", "r2") + static_or_nw(tgen, topo, tc_name, test_type, "r2") for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - for intf_num in range(len(INTF_LIST_R2)+1, 16): - intf_val = INTF_LIST_R2[intf_num:intf_num+16] + for intf_num in range(len(INTF_LIST_R2) + 1, 16): + intf_val = INTF_LIST_R2[intf_num : intf_num + 16] - input_dict_1 = { - "r2": { - "interface_list": [intf_val], - "status": "down" - } - } - logger.info("Shutting down neighbor interface {} on r2". - format(intf_val)) + input_dict_1 = {"r2": {"interface_list": [intf_val], "status": "down"}} + logger.info("Shutting down neighbor interface {} on r2".format(intf_val)) result = interface_status(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for addr_type in ADDR_TYPES: if intf_num + 16 < 32: @@ -575,52 +516,37 @@ def test_ecmp_shut_bgp_neighbor(request): else: check_hops = [] - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=check_hops, - protocol=protocol) + result = verify_rib( + tgen, addr_type, dut, input_dict, next_hop=check_hops, protocol=protocol + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - input_dict_1 = { - "r2": { - "interface_list": INTF_LIST_R2, - "status": "up" - } - } + input_dict_1 = {"r2": {"interface_list": INTF_LIST_R2, "status": "up"}} logger.info("Enabling all neighbor interface {} on r2") result = interface_status(tgen, topo, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) - static_or_nw(tgen, topo, tc_name, "redist_static", "r2") + static_or_nw(tgen, topo, tc_name, test_type, "r2") for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -643,22 +569,20 @@ def test_ecmp_remove_static_route(request): static_or_nw(tgen, topo, tc_name, "redist_static", "r2") for addr_type in ADDR_TYPES: - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) result = verify_rib( - tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], protocol=protocol) + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for addr_type in ADDR_TYPES: input_dict_2 = { @@ -667,7 +591,7 @@ def test_ecmp_remove_static_route(request): { "network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type], - "delete": True + "delete": True, } ] } @@ -676,23 +600,29 @@ def test_ecmp_remove_static_route(request): logger.info("Remove static routes") result = create_static_routes(tgen, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) logger.info("Verifying %s routes on r3 are removed", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_2, - next_hop=[], protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_2, + next_hop=[], + protocol=protocol, + expected=False, + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name) for addr_type in ADDR_TYPES: # Enable static routes input_dict_4 = { "r2": { "static_routes": [ - { - "network": NETWORK[addr_type], - "next_hop": NEXT_HOP_IP[addr_type] - } + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} ] } } @@ -700,14 +630,21 @@ def test_ecmp_remove_static_route(request): logger.info("Enable static route") result = create_static_routes(tgen, input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_4, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) def test_ecmp_remove_nw_advertise(request): @@ -727,22 +664,20 @@ def test_ecmp_remove_nw_advertise(request): reset_config_on_routers(tgen) static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2") for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_3 = { "r2": { @@ -750,64 +685,59 @@ def test_ecmp_remove_nw_advertise(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [{ - "network": NETWORK["ipv4"], - "delete": True - }] - } - }, + "advertise_networks": [ + {"network": NETWORK["ipv4"], "delete": True} + ] + } + }, "ipv6": { "unicast": { - "advertise_networks": [{ - "network": NETWORK["ipv6"], - "delete": True - }] - } + "advertise_networks": [ + {"network": NETWORK["ipv6"], "delete": True} + ] } - } + }, } } } + } logger.info("Withdraw advertised networks") result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=[], protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=[], + protocol=protocol, + expected=False, + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name) static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2") for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) diff --git a/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py b/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py index a9f18ed1fa..94ffc71ef6 100755 --- a/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py +++ b/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py @@ -41,10 +41,11 @@ import sys import time import json import pytest + # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) -sys.path.append(os.path.join(CWD, '../../')) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -52,15 +53,17 @@ from lib.topogen import Topogen, get_topogen from mininet.topo import Topo from lib.common_config import ( - start_topology, write_test_header, + start_topology, + write_test_header, write_test_footer, - verify_rib, create_static_routes, check_address_types, - interface_status, reset_config_on_routers + verify_rib, + create_static_routes, + check_address_types, + interface_status, + reset_config_on_routers, ) from lib.topolog import logger -from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp_and_verify) +from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology and configuration creation @@ -131,27 +134,32 @@ def setup_module(mod): for addr_type in ADDR_TYPES: BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) - assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:" - " {}".format(BGP_CONVERGENCE)) - - link_data = [val for links, val in - topo["routers"]["r2"]["links"].iteritems() - if "r3" in links] + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format( + BGP_CONVERGENCE + ) + + link_data = [ + val + for links, val in topo["routers"]["r2"]["links"].iteritems() + if "r3" in links + ] for adt in ADDR_TYPES: NEXT_HOPS[adt] = [val[adt].split("/")[0] for val in link_data] if adt == "ipv4": - NEXT_HOPS[adt] = sorted( - NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2])) + NEXT_HOPS[adt] = sorted(NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2])) elif adt == "ipv6": NEXT_HOPS[adt] = sorted( - NEXT_HOPS[adt], key=lambda x: int(x.split(':')[-3], 16)) + NEXT_HOPS[adt], key=lambda x: int(x.split(":")[-3], 16) + ) INTF_LIST_R2 = [val["interface"].split("/")[0] for val in link_data] INTF_LIST_R2 = sorted(INTF_LIST_R2, key=lambda x: int(x.split("eth")[1])) - link_data = [val for links, val in - topo["routers"]["r3"]["links"].iteritems() - if "r2" in links] + link_data = [ + val + for links, val in topo["routers"]["r3"]["links"].iteritems() + if "r2" in links + ] INTF_LIST_R3 = [val["interface"].split("/")[0] for val in link_data] INTF_LIST_R3 = sorted(INTF_LIST_R3, key=lambda x: int(x.split("eth")[1])) @@ -180,40 +188,27 @@ def static_or_nw(tgen, topo, tc_name, test_type, dut): input_dict_static = { dut: { "static_routes": [ - { - "network": NETWORK["ipv4"], - "next_hop": NEXT_HOP_IP["ipv4"] - }, - { - "network": NETWORK["ipv6"], - "next_hop": NEXT_HOP_IP["ipv6"] - } + {"network": NETWORK["ipv4"], "next_hop": NEXT_HOP_IP["ipv4"]}, + {"network": NETWORK["ipv6"], "next_hop": NEXT_HOP_IP["ipv6"]}, ] } } logger.info("Configuring static route on router %s", dut) result = create_static_routes(tgen, input_dict_static) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_2 = { dut: { "bgp": { "address_family": { "ipv4": { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } + "unicast": {"redistribute": [{"redist_type": "static"}]} }, "ipv6": { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } - } + "unicast": {"redistribute": [{"redist_type": "static"}]} + }, } } } @@ -222,7 +217,8 @@ def static_or_nw(tgen, topo, tc_name, test_type, dut): logger.info("Configuring redistribute static route on router %s", dut) result = create_router_bgp(tgen, topo, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) elif test_type == "advertise_nw": input_dict_nw = { @@ -231,28 +227,29 @@ def static_or_nw(tgen, topo, tc_name, test_type, dut): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": NETWORK["ipv4"]} - ] + "advertise_networks": [{"network": NETWORK["ipv4"]}] } }, "ipv6": { "unicast": { - "advertise_networks": [ - {"network": NETWORK["ipv6"]} - ] + "advertise_networks": [{"network": NETWORK["ipv6"]}] } - } + }, } } } } - logger.info("Advertising networks %s %s from router %s", - NETWORK["ipv4"], NETWORK["ipv6"], dut) + logger.info( + "Advertising networks %s %s from router %s", + NETWORK["ipv4"], + NETWORK["ipv6"], + dut, + ) result = create_router_bgp(tgen, topo, input_dict_nw) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) @pytest.mark.parametrize("ecmp_num", ["8", "16", "32"]) @@ -275,20 +272,8 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type): "r3": { "bgp": { "address_family": { - "ipv4": { - "unicast": { - "maximum_paths": { - "ibgp": ecmp_num, - } - } - }, - "ipv6": { - "unicast": { - "maximum_paths": { - "ibgp": ecmp_num, - } - } - } + "ipv4": {"unicast": {"maximum_paths": {"ibgp": ecmp_num,}}}, + "ipv6": {"unicast": {"maximum_paths": {"ibgp": ecmp_num,}}}, } } } @@ -296,35 +281,33 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type): logger.info("Configuring bgp maximum-paths %s on router r3", ecmp_num) result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" for addr_type in ADDR_TYPES: - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) -def test_ecmp_after_clear_bgp(request): +@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"]) +def test_ecmp_after_clear_bgp(request, test_type): """ Verify BGP table and RIB in DUT after clear BGP routes and neighbors""" tc_name = request.node.name @@ -337,46 +320,41 @@ def test_ecmp_after_clear_bgp(request): dut = "r3" protocol = "bgp" - static_or_nw(tgen, topo, tc_name, "redist_static", "r2") + static_or_nw(tgen, topo, tc_name, test_type, "r2") for addr_type in ADDR_TYPES: - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Clear bgp result = clear_bgp_and_verify(tgen, topo, dut) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -396,22 +374,20 @@ def test_ecmp_remove_redistribute_static(request): # Verifying RIB routes dut = "r3" protocol = "bgp" - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_2 = { "r2": { @@ -419,22 +395,14 @@ def test_ecmp_remove_redistribute_static(request): "address_family": { "ipv4": { "unicast": { - "redistribute": [{ - "redist_type": "static", - "delete": True - - }] + "redistribute": [{"redist_type": "static", "delete": True}] } }, "ipv6": { "unicast": { - "redistribute": [{ - "redist_type": "static", - "delete": True - - }] + "redistribute": [{"redist_type": "static", "delete": True}] } - } + }, } } } @@ -442,81 +410,66 @@ def test_ecmp_remove_redistribute_static(request): logger.info("Remove redistribute static") result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: # Verifying RIB routes dut = "r3" protocol = "bgp" - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3 are deleted", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=[], protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=[], + protocol=protocol, + expected=False, + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name) logger.info("Enable redistribute static") input_dict_2 = { "r2": { "bgp": { "address_family": { - "ipv4": { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } - }, - "ipv6": { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } - } + "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}}, + "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}}, } } } } result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: # Verifying RIB routes dut = "r3" protocol = "bgp" - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) -def test_ecmp_shut_bgp_neighbor(request): +@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"]) +def test_ecmp_shut_bgp_neighbor(request, test_type): """ Shut BGP neigbors one by one and verify BGP and routing table updated accordingly in DUT """ @@ -530,40 +483,33 @@ def test_ecmp_shut_bgp_neighbor(request): protocol = "bgp" reset_config_on_routers(tgen) - static_or_nw(tgen, topo, tc_name, "redist_static", "r2") + static_or_nw(tgen, topo, tc_name, test_type, "r2") for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - for intf_num in range(len(INTF_LIST_R2)+1, 16): - intf_val = INTF_LIST_R2[intf_num:intf_num+16] + for intf_num in range(len(INTF_LIST_R2) + 1, 16): + intf_val = INTF_LIST_R2[intf_num : intf_num + 16] - input_dict_1 = { - "r2": { - "interface_list": [intf_val], - "status": "down" - } - } - logger.info("Shutting down neighbor interface {} on r2". - format(intf_val)) + input_dict_1 = {"r2": {"interface_list": [intf_val], "status": "down"}} + logger.info("Shutting down neighbor interface {} on r2".format(intf_val)) result = interface_status(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for addr_type in ADDR_TYPES: if intf_num + 16 < 32: @@ -571,52 +517,37 @@ def test_ecmp_shut_bgp_neighbor(request): else: check_hops = [] - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=check_hops, - protocol=protocol) + result = verify_rib( + tgen, addr_type, dut, input_dict, next_hop=check_hops, protocol=protocol + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - input_dict_1 = { - "r2": { - "interface_list": INTF_LIST_R2, - "status": "up" - } - } + input_dict_1 = {"r2": {"interface_list": INTF_LIST_R2, "status": "up"}} logger.info("Enabling all neighbor interface {} on r2") result = interface_status(tgen, topo, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) - static_or_nw(tgen, topo, tc_name, "redist_static", "r2") + static_or_nw(tgen, topo, tc_name, test_type, "r2") for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -639,22 +570,20 @@ def test_ecmp_remove_static_route(request): static_or_nw(tgen, topo, tc_name, "redist_static", "r2") for addr_type in ADDR_TYPES: - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) result = verify_rib( - tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], protocol=protocol) + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for addr_type in ADDR_TYPES: input_dict_2 = { @@ -663,7 +592,7 @@ def test_ecmp_remove_static_route(request): { "network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type], - "delete": True + "delete": True, } ] } @@ -672,23 +601,29 @@ def test_ecmp_remove_static_route(request): logger.info("Remove static routes") result = create_static_routes(tgen, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) logger.info("Verifying %s routes on r3 are removed", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_2, - next_hop=[], protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_2, + next_hop=[], + protocol=protocol, + expected=False, + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name) for addr_type in ADDR_TYPES: # Enable static routes input_dict_4 = { "r2": { "static_routes": [ - { - "network": NETWORK[addr_type], - "next_hop": NEXT_HOP_IP[addr_type] - } + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} ] } } @@ -696,14 +631,21 @@ def test_ecmp_remove_static_route(request): logger.info("Enable static route") result = create_static_routes(tgen, input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_4, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -725,22 +667,20 @@ def test_ecmp_remove_nw_advertise(request): reset_config_on_routers(tgen) static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2") for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_3 = { "r2": { @@ -748,64 +688,59 @@ def test_ecmp_remove_nw_advertise(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [{ - "network": NETWORK["ipv4"], - "delete": True - }] - } - }, + "advertise_networks": [ + {"network": NETWORK["ipv4"], "delete": True} + ] + } + }, "ipv6": { "unicast": { - "advertise_networks": [{ - "network": NETWORK["ipv6"], - "delete": True - }] - } + "advertise_networks": [ + {"network": NETWORK["ipv6"], "delete": True} + ] } - } + }, } } } + } logger.info("Withdraw advertised networks") result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=[], protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=[], + protocol=protocol, + expected=False, + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name) static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2") for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) if __name__ == "__main__": diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/P1/bgpd.conf b/tests/topotests/bgp-evpn-vxlan_topo1/P1/bgpd.conf new file mode 100644 index 0000000000..cdf4cb4feb --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/P1/bgpd.conf @@ -0,0 +1 @@ +! diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/P1/ospfd.conf b/tests/topotests/bgp-evpn-vxlan_topo1/P1/ospfd.conf new file mode 100644 index 0000000000..772675ddff --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/P1/ospfd.conf @@ -0,0 +1,4 @@ +! +router ospf + network 10.20.0.0/16 area 0 + network 10.20.20.20/32 area 0 diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/P1/zebra.conf b/tests/topotests/bgp-evpn-vxlan_topo1/P1/zebra.conf new file mode 100644 index 0000000000..95b5da8402 --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/P1/zebra.conf @@ -0,0 +1,7 @@ +! +interface lo + ip address 10.20.20.20/32 +interface P1-eth0 + ip address 10.20.1.2/24 +interface P1-eth1 + ip address 10.20.2.2/24 diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/PE1/bgpd.conf b/tests/topotests/bgp-evpn-vxlan_topo1/PE1/bgpd.conf new file mode 100644 index 0000000000..d337201f71 --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/PE1/bgpd.conf @@ -0,0 +1,9 @@ +router bgp 65000 + timers 3 9 + bgp router-id 10.10.10.10 + no bgp default ipv4-unicast + neighbor 10.30.30.30 remote-as 65000 + neighbor 10.30.30.30 update-source lo + address-family l2vpn evpn + neighbor 10.30.30.30 activate + advertise-all-vni diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/PE1/evpn.vni.json b/tests/topotests/bgp-evpn-vxlan_topo1/PE1/evpn.vni.json new file mode 100644 index 0000000000..d9f2182aa0 --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/PE1/evpn.vni.json @@ -0,0 +1,16 @@ +{ + "vni":101, + "type":"L2", + "vrf":"default", + "vxlanInterface":"vxlan101", + "ifindex":5, + "vtepIp":"10.10.10.10", + "mcastGroup":"0.0.0.0", + "advertiseGatewayMacip":"No", + "numMacs":5, + "numArpNd":2, + "numRemoteVteps":[ + "10.30.30.30" + ] +} + diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/PE1/ospfd.conf b/tests/topotests/bgp-evpn-vxlan_topo1/PE1/ospfd.conf new file mode 100644 index 0000000000..31c7fc4188 --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/PE1/ospfd.conf @@ -0,0 +1,4 @@ +! +router ospf + network 10.20.0.0/16 area 0 + network 10.10.10.10/32 area 0 diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/PE1/zebra.conf b/tests/topotests/bgp-evpn-vxlan_topo1/PE1/zebra.conf new file mode 100644 index 0000000000..938ec7bca9 --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/PE1/zebra.conf @@ -0,0 +1,10 @@ +! +log file zebra.log +! +interface lo + ip address 10.10.10.10/32 +interface PE1-eth0 + ip address 10.10.1.1/24 +interface PE1-eth1 + ip address 10.20.1.1/24 +! diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/PE2/bgpd.conf b/tests/topotests/bgp-evpn-vxlan_topo1/PE2/bgpd.conf new file mode 100644 index 0000000000..d99e33fc06 --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/PE2/bgpd.conf @@ -0,0 +1,10 @@ +router bgp 65000 + timers bgp 3 9 + bgp router-id 10.30.30.30 + no bgp default ipv4-unicast + neighbor 10.10.10.10 remote-as 65000 + neighbor 10.10.10.10 update-source lo + ! + address-family l2vpn evpn + neighbor 10.10.10.10 activate + advertise-all-vni diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/PE2/evpn.vni.json b/tests/topotests/bgp-evpn-vxlan_topo1/PE2/evpn.vni.json new file mode 100644 index 0000000000..13255ab4f2 --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/PE2/evpn.vni.json @@ -0,0 +1,15 @@ +{ + "vni":101, + "type":"L2", + "vrf":"default", + "vxlanInterface":"vxlan101", + "ifindex":5, + "vtepIp":"10.30.30.30", + "mcastGroup":"0.0.0.0", + "advertiseGatewayMacip":"No", + "numMacs":5, + "numArpNd":2, + "numRemoteVteps":[ + "10.10.10.10" + ] +} diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/PE2/ospfd.conf b/tests/topotests/bgp-evpn-vxlan_topo1/PE2/ospfd.conf new file mode 100644 index 0000000000..c1a8308db5 --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/PE2/ospfd.conf @@ -0,0 +1,4 @@ +! +router ospf + network 10.20.0.0/16 area 0 + network 10.30.30.30/32 area 0 diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/PE2/zebra.conf b/tests/topotests/bgp-evpn-vxlan_topo1/PE2/zebra.conf new file mode 100644 index 0000000000..07b83f6395 --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/PE2/zebra.conf @@ -0,0 +1,8 @@ +! +interface lo + ip address 10.30.30.30/32 +interface PE2-eth0 + ip address 10.20.2.3/24 +interface PE2-eth1 + ip address 10.10.1.3/24 +! diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/__init__.py b/tests/topotests/bgp-evpn-vxlan_topo1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/__init__.py diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/host1/bgpd.conf b/tests/topotests/bgp-evpn-vxlan_topo1/host1/bgpd.conf new file mode 100644 index 0000000000..cdf4cb4feb --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/host1/bgpd.conf @@ -0,0 +1 @@ +! diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/host1/ospfd.conf b/tests/topotests/bgp-evpn-vxlan_topo1/host1/ospfd.conf new file mode 100644 index 0000000000..cdf4cb4feb --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/host1/ospfd.conf @@ -0,0 +1 @@ +! diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/host1/zebra.conf b/tests/topotests/bgp-evpn-vxlan_topo1/host1/zebra.conf new file mode 100644 index 0000000000..91fae9eeba --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/host1/zebra.conf @@ -0,0 +1,3 @@ +! +int host1-eth0 + ip address 10.10.1.55/24 diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/host2/bgpd.conf b/tests/topotests/bgp-evpn-vxlan_topo1/host2/bgpd.conf new file mode 100644 index 0000000000..cdf4cb4feb --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/host2/bgpd.conf @@ -0,0 +1 @@ +! diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/host2/ospfd.conf b/tests/topotests/bgp-evpn-vxlan_topo1/host2/ospfd.conf new file mode 100644 index 0000000000..cdf4cb4feb --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/host2/ospfd.conf @@ -0,0 +1 @@ +! diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/host2/zebra.conf b/tests/topotests/bgp-evpn-vxlan_topo1/host2/zebra.conf new file mode 100644 index 0000000000..df9adeb3b5 --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/host2/zebra.conf @@ -0,0 +1,3 @@ +! +interface host2-eth0 + ip address 10.10.1.56/24 diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/test_bgp_evpn_vxlan.py b/tests/topotests/bgp-evpn-vxlan_topo1/test_bgp_evpn_vxlan.py new file mode 100755 index 0000000000..ad72540185 --- /dev/null +++ b/tests/topotests/bgp-evpn-vxlan_topo1/test_bgp_evpn_vxlan.py @@ -0,0 +1,289 @@ +#!/usr/bin/env python + +# +# test_bgp_evpn_vxlan.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2020 by Volta Networks +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_bgp_evpn_vxlan.py: Test VXLAN EVPN MAC a route signalling over BGP. +""" + +import os +import sys +import json +from functools import partial +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + + +class TemplateTopo(Topo): + "Test topology builder" + + def build(self, *_args, **_opts): + "Build function" + tgen = get_topogen(self) + + # This function only purpose is to define allocation and relationship + # between routers, switches and hosts. + # + # + # Create routers + tgen.add_router("P1") + tgen.add_router("PE1") + tgen.add_router("PE2") + tgen.add_router("host1") + tgen.add_router("host2") + + # Host1-PE1 + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["host1"]) + switch.add_link(tgen.gears["PE1"]) + + # PE1-P1 + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["PE1"]) + switch.add_link(tgen.gears["P1"]) + + # P1-PE2 + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["P1"]) + switch.add_link(tgen.gears["PE2"]) + + # PE2-host2 + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["PE2"]) + switch.add_link(tgen.gears["host2"]) + + +def setup_module(mod): + "Sets up the pytest environment" + # This function initiates the topology build with Topogen... + tgen = Topogen(TemplateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + tgen.start_topology() + + pe1 = tgen.gears["PE1"] + pe2 = tgen.gears["PE2"] + p1 = tgen.gears["P1"] + + # set up PE bridges with the EVPN member interfaces facing the CE hosts + pe1.run("ip link add name br101 type bridge stp_state 0") + pe1.run("ip link set dev br101 up") + pe1.run( + "ip link add vxlan101 type vxlan id 101 dstport 4789 local 10.10.10.10 nolearning" + ) + pe1.run("ip link set dev vxlan101 master br101") + pe1.run("ip link set up dev vxlan101") + pe1.run("ip link set dev PE1-eth0 master br101") + + pe2.run("ip link add name br101 type bridge stp_state 0") + pe2.run("ip link set dev br101 up") + pe2.run( + "ip link add vxlan101 type vxlan id 101 dstport 4789 local 10.30.30.30 nolearning" + ) + pe2.run("ip link set dev vxlan101 master br101") + pe2.run("ip link set up dev vxlan101") + pe2.run("ip link set dev PE2-eth1 master br101") + p1.run("sysctl -w net.ipv4.ip_forward=1") + + # This is a sample of configuration loading. + router_list = tgen.routers() + + # For all registred routers, load the zebra configuration file + for rname, router in router_list.iteritems(): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + + # After loading the configurations, this function loads configured daemons. + tgen.start_router() + + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + + # This function tears down the whole topology. + tgen.stop_topology() + + +def test_pe1_converge_evpn(): + "Wait for protocol convergence" + + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + pe1 = tgen.gears["PE1"] + json_file = "{}/{}/evpn.vni.json".format(CWD, pe1.name) + expected = json.loads(open(json_file).read()) + + test_func = partial( + topotest.router_json_cmp, pe1, "show evpn vni 101 json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=125, wait=1) + assertmsg = '"{}" JSON output mismatches'.format(pe1.name) + assert result is None, assertmsg + # tgen.mininet_cli() + + +def test_pe2_converge_evpn(): + "Wait for protocol convergence" + + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + pe2 = tgen.gears["PE2"] + json_file = "{}/{}/evpn.vni.json".format(CWD, pe2.name) + expected = json.loads(open(json_file).read()) + + test_func = partial( + topotest.router_json_cmp, pe2, "show evpn vni 101 json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=125, wait=1) + assertmsg = '"{}" JSON output mismatches'.format(pe2.name) + assert result is None, assertmsg + # tgen.mininet_cli() + + +def mac_learn_test(host, local): + "check the host MAC gets learned by the VNI" + + host_output = host.vtysh_cmd("show interface {}-eth0".format(host.name)) + int_lines = host_output.splitlines() + line_items = int_lines[7].split(": ") + mac = line_items[1] + mac_output = local.vtysh_cmd("show evpn mac vni 101 mac {} json".format(mac)) + mac_output_json = json.loads(mac_output) + assertmsg = "Local MAC output does not match interface mac {}".format(mac) + assert mac_output_json[mac]["type"] == "local" + + +def mac_test_local_remote(local, remote): + "test MAC transfer between local and remote" + + local_output = local.vtysh_cmd("show evpn mac vni all json") + remote_output = remote.vtysh_cmd("show evpn mac vni all json") + local_output_vni = local.vtysh_cmd("show evpn vni detail json") + local_output_json = json.loads(local_output) + remote_output_json = json.loads(remote_output) + local_output_vni_json = json.loads(local_output_vni) + + for vni in local_output_json: + mac_list = local_output_json[vni]["macs"] + for mac in mac_list: + if mac_list[mac]["type"] == "local" and mac_list[mac]["intf"] != "br101": + assertmsg = "JSON output mismatches local: {} remote: {}".format( + local_output_vni_json[0]["vtepIp"], + remote_output_json[vni]["macs"][mac]["remoteVtep"], + ) + assert ( + remote_output_json[vni]["macs"][mac]["remoteVtep"] + == local_output_vni_json[0]["vtepIp"] + ), assertmsg + + +def test_learning_pe1(): + "test MAC learning on PE1" + + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + host1 = tgen.gears["host1"] + pe1 = tgen.gears["PE1"] + mac_learn_test(host1, pe1) + + +def test_learning_pe2(): + "test MAC learning on PE2" + + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + host2 = tgen.gears["host2"] + pe2 = tgen.gears["PE2"] + mac_learn_test(host2, pe2) + + +def test_local_remote_mac_pe1(): + " Test MAC transfer PE1 local and PE2 remote" + + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + pe1 = tgen.gears["PE1"] + pe2 = tgen.gears["PE2"] + mac_test_local_remote(pe1, pe2) + + +def test_local_remote_mac_pe2(): + " Test MAC transfer PE2 local and PE1 remote" + + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + pe1 = tgen.gears["PE1"] + pe2 = tgen.gears["PE2"] + mac_test_local_remote(pe2, pe1) + + # Memory leak test template + + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py b/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py index 3b2d9c25d7..b0ff3ac437 100755 --- a/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py +++ b/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py @@ -72,18 +72,26 @@ from lib.topogen import Topogen, TopoRouter, get_topogen # Required to instantiate the topology builder class. from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, reset_config_on_routers, - verify_rib, create_static_routes, - create_prefix_lists, verify_prefix_lists, - create_route_maps, check_address_types + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + create_prefix_lists, + verify_prefix_lists, + create_route_maps, + check_address_types, ) from lib.topolog import logger from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp_and_verify, verify_best_path_as_per_bgp_attribute, - verify_best_path_as_per_admin_distance, modify_as_number, - verify_as_numbers + verify_bgp_convergence, + create_router_bgp, + clear_bgp_and_verify, + verify_best_path_as_per_bgp_attribute, + verify_best_path_as_per_admin_distance, + modify_as_number, + verify_as_numbers, ) from lib.topojson import build_topo_from_json, build_config_from_json @@ -147,8 +155,7 @@ def setup_module(mod): # Checking BGP convergence result = verify_bgp_convergence(tgen, topo) - assert result is True, ("setup_module :Failed \n Error:" - " {}".format(result)) + assert result is True, "setup_module :Failed \n Error:" " {}".format(result) logger.info("Running setup_module() done") @@ -165,8 +172,7 @@ def teardown_module(): # Stop toplogy and Remove tmp files tgen.stop_topology() - logger.info("Testsuite end time: %s", - time.asctime(time.localtime(time.time()))) + logger.info("Testsuite end time: %s", time.asctime(time.localtime(time.time()))) logger.info("=" * 40) @@ -176,6 +182,7 @@ def teardown_module(): ## ##################################################### + def test_next_hop_attribute(request): """ Verifying route are not getting installed in, as next_hop is @@ -204,44 +211,38 @@ def test_next_hop_attribute(request): "ipv4": { "unicast": { "advertise_networks": [ - { - "network": "200.50.2.0/32" - }, - { - "network": "200.60.2.0/32" - } + {"network": "200.50.2.0/32"}, + {"network": "200.60.2.0/32"}, ] } }, "ipv6": { "unicast": { "advertise_networks": [ - { - "network": "200:50:2::/128" - }, - { - "network": "200:60:2::/128" - } + {"network": "200:50:2::/128"}, + {"network": "200:60:2::/128"}, ] } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r1" protocol = "bgp" # Verification should fail as nexthop-self is not enabled for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: "\ + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) + assert result is not True, ( + "Testcase {} : Failed \n Error: " "{} routes are not present in RIB".format(addr_type, tc_name) + ) # Configure next-hop-self to bgp neighbor input_dict_1 = { @@ -251,25 +252,17 @@ def test_next_hop_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } - } + }, } } }, @@ -279,42 +272,33 @@ def test_next_hop_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r1" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -343,27 +327,19 @@ def test_aspath_attribute(request): "ipv4": { "unicast": { "advertise_networks": [ - { - "network": "200.50.2.0/32" - }, - { - "network": "200.60.2.0/32" - } + {"network": "200.50.2.0/32"}, + {"network": "200.60.2.0/32"}, ] } }, "ipv6": { "unicast": { "advertise_networks": [ - { - "network": "200:50:2::/128" - }, - { - "network": "200:60:2::/128" - } + {"network": "200:50:2::/128"}, + {"network": "200:60:2::/128"}, ] } - } + }, } } }, @@ -373,25 +349,17 @@ def test_aspath_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } - } + }, } } }, @@ -401,42 +369,34 @@ def test_aspath_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" - attribute = "aspath" + attribute = "path" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - {"r7": input_dict["r7"]}, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Modify AS-Path and verify best path is changed # Create Prefix list @@ -445,66 +405,52 @@ def test_aspath_attribute(request): "r3": { "prefix_lists": { "ipv4": { - "pf_ls_1_ipv4": [{ - "seqid": 10, - "network": "200.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_ls_1_ipv4": [ + { + "seqid": 10, + "network": "200.0.0.0/8", + "le": "32", + "action": "permit", + } + ] }, "ipv6": { - "pf_ls_1_ipv6": [{ - "seqid": 10, - "network": "200::/8", - "le": "128", - "action": "permit" - }] - } + "pf_ls_1_ipv6": [ + { + "seqid": 10, + "network": "200::/8", + "le": "128", + "action": "permit", + } + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map input_dict_3 = { "r3": { "route_maps": { - "RMAP_AS_PATH": [{ - "action": "permit", - "match": { - "ipv4": { - "prefix_lists": "pf_ls_1_ipv4" - } + "RMAP_AS_PATH": [ + { + "action": "permit", + "match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}}, + "set": {"path": {"as_num": "111 222", "as_action": "prepend"}}, }, - "set": { - "aspath": { - "as_num": "111 222", - "as_action": "prepend" - } - } - }, - { - "action": "permit", - "match": { - "ipv6": { - "prefix_lists": "pf_ls_1_ipv6" - } + { + "action": "permit", + "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}}, + "set": {"path": {"as_num": "111 222", "as_action": "prepend"}}, }, - "set": { - "aspath": { - "as_num": "111 222", - "as_action": "prepend" - } - } - }] + ] } } } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure neighbor for route map input_dict_4 = { @@ -518,8 +464,10 @@ def test_aspath_attribute(request): "dest_link": { "r3": { "route_maps": [ - {"name": "RMAP_AS_PATH", - "direction": "in"} + { + "name": "RMAP_AS_PATH", + "direction": "in", + } ] } } @@ -534,32 +482,34 @@ def test_aspath_attribute(request): "dest_link": { "r3": { "route_maps": [ - {"name": "RMAP_AS_PATH", - "direction": "in"} + { + "name": "RMAP_AS_PATH", + "direction": "in", + } ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" - attribute = "aspath" + attribute = "path" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - {"r7": input_dict["r7"]}, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -588,27 +538,19 @@ def test_localpref_attribute(request): "ipv4": { "unicast": { "advertise_networks": [ - { - "network": "200.50.2.0/32" - }, - { - "network": "200.60.2.0/32" - } + {"network": "200.50.2.0/32"}, + {"network": "200.60.2.0/32"}, ] } }, "ipv6": { "unicast": { "advertise_networks": [ - { - "network": "200:50:2::/128" - }, - { - "network": "200:60:2::/128" - } + {"network": "200:50:2::/128"}, + {"network": "200:60:2::/128"}, ] } - } + }, } } }, @@ -618,25 +560,17 @@ def test_localpref_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } - } + }, } } }, @@ -646,95 +580,78 @@ def test_localpref_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create Prefix list input_dict_2 = { "r2": { "prefix_lists": { "ipv4": { - "pf_ls_1_ipv4": [{ - "seqid": 10, - "network": "200.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_ls_1_ipv4": [ + { + "seqid": 10, + "network": "200.0.0.0/8", + "le": "32", + "action": "permit", + } + ] }, "ipv6": { - "pf_ls_1_ipv6": [{ - "seqid": 10, - "network": "200::/8", - "le": "128", - "action": "permit" - }] - } + "pf_ls_1_ipv6": [ + { + "seqid": 10, + "network": "200::/8", + "le": "128", + "action": "permit", + } + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map input_dict_3 = { "r2": { "route_maps": { - "RMAP_LOCAL_PREF": [{ - "action": "permit", - "seq_id": "10", - "match": { - "ipv4": { - "prefix_lists": "pf_ls_1_ipv4" - } + "RMAP_LOCAL_PREF": [ + { + "action": "permit", + "seq_id": "10", + "match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}}, + "set": {"locPrf": 1111}, }, - "set": { - "localpref": 1111 - } - }, - { - "action": "permit", - "seq_id": "20", - "match": { - "ipv6": { - "prefix_lists": "pf_ls_1_ipv6" - } + { + "action": "permit", + "seq_id": "20", + "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}}, + "set": {"locPrf": 1111}, }, - "set": { - "localpref": 1111 - } - }] + ] } } } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure neighbor for route map input_dict_4 = { @@ -748,8 +665,10 @@ def test_localpref_attribute(request): "dest_link": { "r2-link1": { "route_maps": [ - {"name": "RMAP_LOCAL_PREF", - "direction": "in"} + { + "name": "RMAP_LOCAL_PREF", + "direction": "in", + } ] } } @@ -764,77 +683,69 @@ def test_localpref_attribute(request): "dest_link": { "r2-link1": { "route_maps": [ - {"name": "RMAP_LOCAL_PREF", - "direction": "in"} + { + "name": "RMAP_LOCAL_PREF", + "direction": "in", + } ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" - attribute = "localpref" + attribute = "locPrf" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - {"r7": input_dict["r7"]}, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Modify route map input_dict_3 = { "r2": { "route_maps": { - "RMAP_LOCAL_PREF": [{ - "action": "permit", - "seq_id": "10", - "match": { - "ipv4": { - "prefix_lists": "pf_ls_1_ipv4" - } + "RMAP_LOCAL_PREF": [ + { + "action": "permit", + "seq_id": "10", + "match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}}, + "set": {"locPrf": 50}, }, - "set": { - "localpref": 50 - } - }, - { - "action": "permit", - "seq_id": "20", - "match": { - "ipv6": { - "prefix_lists": "pf_ls_1_ipv6" - } + { + "action": "permit", + "seq_id": "20", + "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}}, + "set": {"locPrf": 50}, }, - "set": { - "localpref": 50 - } - }] + ] } } } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" - attribute = "localpref" + attribute = "locPrf" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - {"r7": input_dict["r7"]}, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -866,27 +777,19 @@ def test_weight_attribute(request): "ipv4": { "unicast": { "advertise_networks": [ - { - "network": "200.50.2.0/32" - }, - { - "network": "200.60.2.0/32" - } + {"network": "200.50.2.0/32"}, + {"network": "200.60.2.0/32"}, ] } }, "ipv6": { "unicast": { "advertise_networks": [ - { - "network": "200:50:2::/128" - }, - { - "network": "200:60:2::/128" - } + {"network": "200:50:2::/128"}, + {"network": "200:60:2::/128"}, ] } - } + }, } } }, @@ -896,25 +799,17 @@ def test_weight_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } - } + }, } } }, @@ -924,94 +819,77 @@ def test_weight_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create Prefix list input_dict_2 = { "r1": { "prefix_lists": { "ipv4": { - "pf_ls_1_ipv4": [{ - "seqid": 10, - "network": "200.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_ls_1_ipv4": [ + { + "seqid": 10, + "network": "200.0.0.0/8", + "le": "32", + "action": "permit", + } + ] }, "ipv6": { - "pf_ls_1_ipv6": [{ - "seqid": 10, - "network": "200::/8", - "le": "128", - "action": "permit" - }] - } + "pf_ls_1_ipv6": [ + { + "seqid": 10, + "network": "200::/8", + "le": "128", + "action": "permit", + } + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map input_dict_3 = { "r1": { "route_maps": { - "RMAP_WEIGHT": [{ - "action": "permit", - "seq_id": "5", - "match": { - "ipv4": { - "prefix_lists": "pf_ls_1_ipv4" - } + "RMAP_WEIGHT": [ + { + "action": "permit", + "seq_id": "5", + "match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}}, + "set": {"weight": 500}, }, - "set": { - "weight": 500 - } - }, - { - "action": "permit", - "seq_id": "10", - "match": { - "ipv6": { - "prefix_lists": "pf_ls_1_ipv6" - } + { + "action": "permit", + "seq_id": "10", + "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}}, + "set": {"weight": 500}, }, - "set": { - "weight": 500 - } - }] + ] } } } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure neighbor for route map input_dict_4 = { @@ -1025,8 +903,10 @@ def test_weight_attribute(request): "dest_link": { "r1": { "route_maps": [ - {"name": "RMAP_WEIGHT", - "direction": "in"} + { + "name": "RMAP_WEIGHT", + "direction": "in", + } ] } } @@ -1041,77 +921,69 @@ def test_weight_attribute(request): "dest_link": { "r1": { "route_maps": [ - {"name": "RMAP_WEIGHT", - "direction": "in"} + { + "name": "RMAP_WEIGHT", + "direction": "in", + } ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" attribute = "weight" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - {"r7": input_dict["r7"]}, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Modify route map input_dict_3 = { "r1": { "route_maps": { - "RMAP_WEIGHT": [{ - "action": "permit", - "seq_id": "5", - "match": { - "ipv4": { - "prefix_lists": "pf_ls_1_ipv4" - } + "RMAP_WEIGHT": [ + { + "action": "permit", + "seq_id": "5", + "match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}}, + "set": {"weight": 1000}, }, - "set": { - "weight": 1000 - } - }, - { - "action": "permit", - "seq_id": "10", - "match": { - "ipv6": { - "prefix_lists": "pf_ls_1_ipv6" - } + { + "action": "permit", + "seq_id": "10", + "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}}, + "set": {"weight": 1000}, }, - "set": { - "weight": 1000 - } - }] + ] } } } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" attribute = "weight" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - {"r7": input_dict["r7"]}, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1143,27 +1015,19 @@ def test_origin_attribute(request): "ipv4": { "unicast": { "advertise_networks": [ - { - "network": "200.50.2.0/32" - }, - { - "network": "200.60.2.0/32" - } + {"network": "200.50.2.0/32"}, + {"network": "200.60.2.0/32"}, ] } }, "ipv6": { "unicast": { "advertise_networks": [ - { - "network": "200:50:2::/128" - }, - { - "network": "200:60:2::/128" - } + {"network": "200:50:2::/128"}, + {"network": "200:60:2::/128"}, ] } - } + }, } } }, @@ -1173,25 +1037,17 @@ def test_origin_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } - } + }, } } }, @@ -1201,25 +1057,17 @@ def test_origin_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } - } + }, } } }, @@ -1230,7 +1078,7 @@ def test_origin_attribute(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -1238,54 +1086,41 @@ def test_origin_attribute(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to create static routes input_dict_3 = { "r5": { "static_routes": [ - { - "network": "200.50.2.0/32", - "next_hop": "Null0" - }, - { - "network": "200.60.2.0/32", - "next_hop": "Null0" - }, - { - "network": "200:50:2::/128", - "next_hop": "Null0" - }, - { - "network": "200:60:2::/128", - "next_hop": "Null0" - } + {"network": "200.50.2.0/32", "next_hop": "Null0"}, + {"network": "200.60.2.0/32", "next_hop": "Null0"}, + {"network": "200:50:2::/128", "next_hop": "Null0"}, + {"network": "200:60:2::/128", "next_hop": "Null0"}, ] } } result = create_static_routes(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" attribute = "origin" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - {"r4": input_dict["r4"]}, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, {"r4": input_dict["r4"]}, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1317,27 +1152,19 @@ def test_med_attribute(request): "ipv4": { "unicast": { "advertise_networks": [ - { - "network": "200.50.2.0/32" - }, - { - "network": "200.60.2.0/32" - } + {"network": "200.50.2.0/32"}, + {"network": "200.60.2.0/32"}, ] } }, "ipv6": { "unicast": { "advertise_networks": [ - { - "network": "200:50:2::/128" - }, - { - "network": "200:60:2::/128" - } + {"network": "200:50:2::/128"}, + {"network": "200:60:2::/128"}, ] } - } + }, } } }, @@ -1347,145 +1174,122 @@ def test_med_attribute(request): "ipv4": { "unicast": { "advertise_networks": [ - { - "network": "200.50.2.0/32" - }, - { - "network": "200.60.2.0/32" - } + {"network": "200.50.2.0/32"}, + {"network": "200.60.2.0/32"}, ] } }, "ipv6": { "unicast": { "advertise_networks": [ - { - "network": "200:50:2::/128" - }, - { - "network": "200:60:2::/128" - } + {"network": "200:50:2::/128"}, + {"network": "200:60:2::/128"}, ] } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create Prefix list input_dict_2 = { "r2": { "prefix_lists": { "ipv4": { - "pf_ls_r2_ipv4": [{ - "seqid": 10, - "network": "200.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_ls_r2_ipv4": [ + { + "seqid": 10, + "network": "200.0.0.0/8", + "le": "32", + "action": "permit", + } + ] }, "ipv6": { - "pf_ls_r2_ipv6": [{ - "seqid": 20, - "network": "200::/8", - "le": "128", - "action": "permit" - }] - } + "pf_ls_r2_ipv6": [ + { + "seqid": 20, + "network": "200::/8", + "le": "128", + "action": "permit", + } + ] + }, } }, "r3": { "prefix_lists": { "ipv4": { - "pf_ls_r3_ipv4": [{ - "seqid": 10, - "network": "200.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_ls_r3_ipv4": [ + { + "seqid": 10, + "network": "200.0.0.0/8", + "le": "32", + "action": "permit", + } + ] }, "ipv6": { - "pf_ls_r3_ipv6": [{ - "seqid": 20, - "network": "200::/8", - "le": "128", - "action": "permit" - }] - } + "pf_ls_r3_ipv6": [ + { + "seqid": 20, + "network": "200::/8", + "le": "128", + "action": "permit", + } + ] + }, } - } + }, } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map input_dict_3 = { "r2": { "route_maps": { - "RMAP_MED_R2": [{ - "action": "permit", - "seq_id": "10", - "match": { - "ipv4": { - "prefix_lists": "pf_ls_r2_ipv4" - } + "RMAP_MED_R2": [ + { + "action": "permit", + "seq_id": "10", + "match": {"ipv4": {"prefix_lists": "pf_ls_r2_ipv4"}}, + "set": {"metric": 100}, }, - "set": { - "med": 100 - } - }, - { - "action": "permit", - "seq_id": "20", - "match": { - "ipv6": { - "prefix_lists": "pf_ls_r2_ipv6" - } + { + "action": "permit", + "seq_id": "20", + "match": {"ipv6": {"prefix_lists": "pf_ls_r2_ipv6"}}, + "set": {"metric": 100}, }, - "set": { - "med": 100 - } - }] + ] } }, "r3": { "route_maps": { - "RMAP_MED_R3": [{ - "action": "permit", - "seq_id": "10", - "match": { - "ipv4": { - "prefix_lists": "pf_ls_r3_ipv4" - } + "RMAP_MED_R3": [ + { + "action": "permit", + "seq_id": "10", + "match": {"ipv4": {"prefix_lists": "pf_ls_r3_ipv4"}}, + "set": {"metric": 10}, }, - "set": { - "med": 10 - } - }, - { - "action": "permit", - "seq_id": "20", - "match": { - "ipv6": { - "prefix_lists": "pf_ls_r3_ipv6" - } + { + "action": "permit", + "seq_id": "20", + "match": {"ipv6": {"prefix_lists": "pf_ls_r3_ipv6"}}, + "set": {"metric": 10}, }, - "set": { - "med": 10 - } - }] + ] } - } + }, } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure neighbor for route map input_dict_4 = { @@ -1499,17 +1303,15 @@ def test_med_attribute(request): "dest_link": { "r2-link1": { "route_maps": [ - {"name": "RMAP_MED_R2", - "direction": "in"} + { + "name": "RMAP_MED_R2", + "direction": "in", + } ] } } }, - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}}, } } }, @@ -1520,20 +1322,18 @@ def test_med_attribute(request): "dest_link": { "r2-link1": { "route_maps": [ - {"name": "RMAP_MED_R2", - "direction": "in"} + { + "name": "RMAP_MED_R2", + "direction": "in", + } ] } } }, - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}}, } } - } + }, } } }, @@ -1543,107 +1343,95 @@ def test_med_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - }, + "r1": {"dest_link": {"r3": {"next_hop_self": True}}}, "r5": { "dest_link": { "r3": { "route_maps": [ - {"name": "RMAP_MED_R3", - "direction": "in"} + { + "name": "RMAP_MED_R3", + "direction": "in", + } ] } } - } + }, } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - }, + "r1": {"dest_link": {"r3": {"next_hop_self": True}}}, "r5": { "dest_link": { "r3": { "route_maps": [ - {"name": "RMAP_MED_R3", - "direction": "in"} + { + "name": "RMAP_MED_R3", + "direction": "in", + } ] } } - } + }, } } - } + }, } } - } + }, } - result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" - attribute = "med" + attribute = "metric" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - input_dict, attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, input_dict, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Modify route-map to set med value input_dict_3 = { "r3": { "route_maps": { - "RMAP_MED_R3": [{ - "action": "permit", - "seq_id": "10", - "match": { - "ipv4": { - "prefix_lists": "pf_ls_r3_ipv4" - } + "RMAP_MED_R3": [ + { + "action": "permit", + "seq_id": "10", + "match": {"ipv4": {"prefix_lists": "pf_ls_r3_ipv4"}}, + "set": {"metric": 200}, }, - "set": { - "med": 200 - } - }, - { - "action": "permit", - "seq_id": "20", - "match": { - "ipv6": { - "prefix_lists": "pf_ls_r3_ipv6" - } + { + "action": "permit", + "seq_id": "20", + "match": {"ipv6": {"prefix_lists": "pf_ls_r3_ipv6"}}, + "set": {"metric": 200}, }, - "set": { - "med": 200 - } - }] + ] } } } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" - attribute = "med" + attribute = "metric" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - input_dict, attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, input_dict, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1674,29 +1462,28 @@ def test_admin_distance(request): { "network": "200.50.2.0/32", "admin_distance": 80, - "next_hop": "10.0.0.14" + "next_hop": "10.0.0.14", }, { "network": "200.50.2.0/32", "admin_distance": 60, - "next_hop": "10.0.0.18" + "next_hop": "10.0.0.18", }, { "network": "200:50:2::/128", "admin_distance": 80, - "next_hop": "fd00::1" + "next_hop": "fd00::1", }, { "network": "200:50:2::/128", "admin_distance": 60, - "next_hop": "fd00::1" - } + "next_hop": "fd00::1", + }, ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes input_dict_2 = { @@ -1707,7 +1494,7 @@ def test_admin_distance(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -1715,60 +1502,63 @@ def test_admin_distance(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } + }, } } } } result = create_router_bgp(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" attribute = "admin_distance" input_dict = { - "ipv4": { - "r2": { - "static_routes": [{ - "network": "200.50.2.0/32", - "admin_distance": 80, - "next_hop": "10.0.0.14" - }, - { - "network": "200.50.2.0/32", - "admin_distance": 60, - "next_hop": "10.0.0.18" - } - ] - } - }, - "ipv6": { - "r2": { - "static_routes": [{ - "network": "200:50:2::/128", - "admin_distance": 80, - "next_hop": "fd00::1" - }, - { - "network": "200:50:2::/128", - "admin_distance": 60, - "next_hop": "fd00::1" - }] + "ipv4": { + "r2": { + "static_routes": [ + { + "network": "200.50.2.0/32", + "admin_distance": 80, + "next_hop": "10.0.0.14", + }, + { + "network": "200.50.2.0/32", + "admin_distance": 60, + "next_hop": "10.0.0.18", + }, + ] } - } + }, + "ipv6": { + "r2": { + "static_routes": [ + { + "network": "200:50:2::/128", + "admin_distance": 80, + "next_hop": "fd00::1", + }, + { + "network": "200:50:2::/128", + "admin_distance": 60, + "next_hop": "fd00::1", + }, + ] + } + }, } for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_admin_distance(tgen, addr_type, dut, - input_dict[addr_type], - attribute) + result = verify_best_path_as_per_admin_distance( + tgen, addr_type, dut, input_dict[addr_type], attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) diff --git a/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py b/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py index b8975997ea..22952f645c 100755 --- a/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py +++ b/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py @@ -60,16 +60,17 @@ from lib.topogen import Topogen, get_topogen # Import topoJson from lib, to create topology and initial configuration from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, reset_config_on_routers, - verify_rib, create_static_routes, - create_prefix_lists, verify_prefix_lists + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + create_prefix_lists, + verify_prefix_lists, ) from lib.topolog import logger -from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp_and_verify -) +from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology creation @@ -109,7 +110,7 @@ def setup_module(mod): testsuite_run_time = time.asctime(time.localtime(time.time())) logger.info("Testsuite start time: {}".format(testsuite_run_time)) - logger.info("="*40) + logger.info("=" * 40) logger.info("Running setup_module to create topology") @@ -133,8 +134,9 @@ def setup_module(mod): # Api call verify whether BGP is converged BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) - assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:" - " {}".format(BGP_CONVERGENCE)) + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format( + BGP_CONVERGENCE + ) logger.info("Running setup_module() done") @@ -153,9 +155,11 @@ def teardown_module(mod): # Stop toplogy and Remove tmp files tgen.stop_topology() - logger.info("Testsuite end time: {}". - format(time.asctime(time.localtime(time.time())))) - logger.info("="*40) + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + ##################################################### # @@ -180,34 +184,26 @@ def test_ip_prefix_lists_in_permit(request): # Create Static routes input_dict = { "r1": { - "static_routes": [{ - "network": "20.0.20.1/32", - "no_of_ip": 1, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "20.0.20.1/32", "no_of_ip": 1, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create ip prefix list input_dict_2 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] + "pf_list_1": [{"seqid": 10, "network": "any", "action": "permit"}] } } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure bgp neighbor with prefix list input_dict_3 = { @@ -218,7 +214,7 @@ def test_ip_prefix_lists_in_permit(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -235,10 +231,7 @@ def test_ip_prefix_lists_in_permit(request): "dest_link": { "r3": { "prefix_lists": [ - { - "name": "pf_list_1", - "direction": "in" - } + {"name": "pf_list_1", "direction": "in"} ] } } @@ -248,18 +241,16 @@ def test_ip_prefix_lists_in_permit(request): } } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -283,43 +274,34 @@ def test_ip_prefix_lists_out_permit(request): # Create Static routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 1, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "10.0.20.1/32", "no_of_ip": 1, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create Static routes input_dict_1 = { "r1": { - "static_routes": [{ - "network": "20.0.20.1/32", - "no_of_ip": 1, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "20.0.20.1/32", "no_of_ip": 1, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) input_dict_5 = { "r3": { - "static_routes": [{ - "network": "10.0.0.2/30", - "no_of_ip": 1, - "next_hop": "10.0.0.9" - }] + "static_routes": [ + {"network": "10.0.0.2/30", "no_of_ip": 1, "next_hop": "10.0.0.9"} + ] } } result = create_static_routes(tgen, input_dict_5) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes @@ -328,18 +310,15 @@ def test_ip_prefix_lists_out_permit(request): "r1": { "prefix_lists": { "ipv4": { - "pf_list_1": [{ - "seqid": 10, - "network": "20.0.20.1/32", - "action": "permit" - }] + "pf_list_1": [ + {"seqid": 10, "network": "20.0.20.1/32", "action": "permit"} + ] } } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure prefix list to bgp neighbor # Configure bgp neighbor with prefix list @@ -356,7 +335,7 @@ def test_ip_prefix_lists_out_permit(request): "prefix_lists": [ { "name": "pf_list_1", - "direction": "out" + "direction": "out", } ] } @@ -365,8 +344,8 @@ def test_ip_prefix_lists_out_permit(request): }, "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} - ] + {"redist_type": "connected"}, + ], } } } @@ -375,19 +354,20 @@ def test_ip_prefix_lists_out_permit(request): } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) - - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: Routes still" \ - " present in RIB".format(tc_name) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name) write_test_footer(tc_name) @@ -410,16 +390,13 @@ def test_ip_prefix_lists_in_deny_and_permit_any(request): # Create Static Routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 1, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "10.0.20.1/32", "no_of_ip": 1, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes # Create ip prefix list @@ -428,24 +405,15 @@ def test_ip_prefix_lists_in_deny_and_permit_any(request): "prefix_lists": { "ipv4": { "pf_list_1": [ - { - "seqid": "10", - "network": "10.0.20.1/32", - "action": "deny" - }, - { - "seqid": "11", - "network": "any", - "action": "permit" - } + {"seqid": "10", "network": "10.0.20.1/32", "action": "deny"}, + {"seqid": "11", "network": "any", "action": "permit"}, ] } } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure bgp neighbor with prefix list input_dict_3 = { @@ -456,7 +424,7 @@ def test_ip_prefix_lists_in_deny_and_permit_any(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -473,10 +441,7 @@ def test_ip_prefix_lists_in_deny_and_permit_any(request): "dest_link": { "r3": { "prefix_lists": [ - { - "name": "pf_list_1", - "direction": "in" - } + {"name": "pf_list_1", "direction": "in"} ] } } @@ -486,19 +451,21 @@ def test_ip_prefix_lists_in_deny_and_permit_any(request): } } } - } + }, } # Configure prefix list to bgp neighbor result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name) write_test_footer(tc_name) @@ -525,23 +492,19 @@ def test_delete_prefix_lists(request): "prefix_lists": { "ipv4": { "pf_list_1": [ - { - "seqid": "10", - "network": "10.0.20.1/32", - "action": "deny" - } + {"seqid": "10", "network": "10.0.20.1/32", "action": "deny"} ] } } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_prefix_lists(tgen, input_dict_2) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Delete prefix list input_dict_2 = { @@ -553,7 +516,7 @@ def test_delete_prefix_lists(request): "seqid": "10", "network": "10.0.20.1/32", "action": "deny", - "delete": True + "delete": True, } ] } @@ -561,12 +524,10 @@ def test_delete_prefix_lists(request): } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -590,30 +551,24 @@ def test_ip_prefix_lists_out_deny_and_permit_any(request): # Create Static Routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 9, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create Static Routes input_dict_1 = { "r2": { - "static_routes": [{ - "network": "20.0.20.1/32", - "no_of_ip": 9, - "next_hop": "10.0.0.1" - }] + "static_routes": [ + {"network": "20.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.1"} + ] } } result = create_static_routes(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes @@ -627,21 +582,16 @@ def test_ip_prefix_lists_out_deny_and_permit_any(request): "seqid": "10", "network": "10.0.0.0/8", "le": "32", - "action": "deny" + "action": "deny", }, - { - "seqid": "11", - "network": "any", - "action": "permit" - } + {"seqid": "11", "network": "any", "action": "permit"}, ] } } } } result = create_prefix_lists(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure prefix list to bgp neighbor input_dict_4 = { @@ -652,7 +602,7 @@ def test_ip_prefix_lists_out_deny_and_permit_any(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -666,7 +616,7 @@ def test_ip_prefix_lists_out_deny_and_permit_any(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -685,7 +635,7 @@ def test_ip_prefix_lists_out_deny_and_permit_any(request): "prefix_lists": [ { "name": "pf_list_1", - "direction": "out" + "direction": "out", } ] } @@ -696,25 +646,26 @@ def test_ip_prefix_lists_out_deny_and_permit_any(request): } } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r4" protocol = "bgp" result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r4" protocol = "bgp" - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name) write_test_footer(tc_name) @@ -737,16 +688,13 @@ def test_modify_prefix_lists_in_permit_to_deny(request): # Create Static Routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 9, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes @@ -755,19 +703,20 @@ def test_modify_prefix_lists_in_permit_to_deny(request): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1": [{ - "seqid": "10", - "network": "10.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_list_1": [ + { + "seqid": "10", + "network": "10.0.0.0/8", + "le": "32", + "action": "permit", + } + ] } } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure prefix list to bgp neighbor input_dict_3 = { @@ -778,7 +727,7 @@ def test_modify_prefix_lists_in_permit_to_deny(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -792,13 +741,10 @@ def test_modify_prefix_lists_in_permit_to_deny(request): "unicast": { "neighbor": { "r1": { - "dest_link":{ + "dest_link": { "r3": { "prefix_lists": [ - { - "name": "pf_list_1", - "direction": "in" - } + {"name": "pf_list_1", "direction": "in"} ] } } @@ -808,18 +754,16 @@ def test_modify_prefix_lists_in_permit_to_deny(request): } } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Modify prefix list input_dict_1 = { @@ -831,34 +775,31 @@ def test_modify_prefix_lists_in_permit_to_deny(request): "seqid": "10", "network": "10.0.0.0/8", "le": "32", - "action": "deny" + "action": "deny", }, - { - "seqid": "11", - "network": "any", - "action": "permit" - } + {"seqid": "11", "network": "any", "action": "permit"}, ] } } } } result = create_prefix_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to clear bgp, so config changes would be reflected dut = "r3" result = clear_bgp_and_verify(tgen, topo, dut) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name) write_test_footer(tc_name) @@ -882,16 +823,13 @@ def test_modify_prefix_lists_in_deny_to_permit(request): # Create Static Routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 9, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes @@ -905,21 +843,16 @@ def test_modify_prefix_lists_in_deny_to_permit(request): "seqid": "10", "network": "10.0.0.0/8", "le": "32", - "action": "deny" + "action": "deny", }, - { - "seqid": "11", - "network": "any", - "action": "permit" - } + {"seqid": "11", "network": "any", "action": "permit"}, ] } } } } result = create_prefix_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure prefix list to bgp neighbor input_dict_2 = { @@ -930,7 +863,7 @@ def test_modify_prefix_lists_in_deny_to_permit(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -947,10 +880,7 @@ def test_modify_prefix_lists_in_deny_to_permit(request): "dest_link": { "r3": { "prefix_lists": [ - { - "name": "pf_list_1", - "direction": "in" - } + {"name": "pf_list_1", "direction": "in"} ] } } @@ -960,51 +890,51 @@ def test_modify_prefix_lists_in_deny_to_permit(request): } } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name) # Modify ip prefix list input_dict_1 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1": [{ - "seqid": "10", - "network": "10.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_list_1": [ + { + "seqid": "10", + "network": "10.0.0.0/8", + "le": "32", + "action": "permit", + } + ] } } } - } result = create_prefix_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to clear bgp, so config changes would be reflected dut = "r3" result = clear_bgp_and_verify(tgen, topo, dut) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -1028,16 +958,13 @@ def test_modify_prefix_lists_out_permit_to_deny(request): # Create Static Routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 9, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes @@ -1046,20 +973,20 @@ def test_modify_prefix_lists_out_permit_to_deny(request): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1": [{ - "seqid": "10", - "network": "10.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_list_1": [ + { + "seqid": "10", + "network": "10.0.0.0/8", + "le": "32", + "action": "permit", + } + ] } } } - } result = create_prefix_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure prefix list to bgp neighbor input_dict_2 = { @@ -1070,7 +997,7 @@ def test_modify_prefix_lists_out_permit_to_deny(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -1089,7 +1016,7 @@ def test_modify_prefix_lists_out_permit_to_deny(request): "prefix_lists": [ { "name": "pf_list_1", - "direction": "out" + "direction": "out", } ] } @@ -1100,18 +1027,16 @@ def test_modify_prefix_lists_out_permit_to_deny(request): } } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r4" protocol = "bgp" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Modify ip prefix list input_dict_1 = { @@ -1123,35 +1048,31 @@ def test_modify_prefix_lists_out_permit_to_deny(request): "seqid": "10", "network": "10.0.0.0/8", "le": "32", - "action": "deny" + "action": "deny", }, - { - "seqid": "11", - "network": "any", - "action": "permit" - } + {"seqid": "11", "network": "any", "action": "permit"}, ] } } } - } result = create_prefix_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to clear bgp, so config changes would be reflected dut = "r3" result = clear_bgp_and_verify(tgen, topo, dut) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r4" protocol = "bgp" - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name) write_test_footer(tc_name) @@ -1175,16 +1096,13 @@ def test_modify_prefix_lists_out_deny_to_permit(request): # Create Static Routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 9, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes # Create ip prefix list @@ -1197,22 +1115,16 @@ def test_modify_prefix_lists_out_deny_to_permit(request): "seqid": "10", "network": "10.0.0.0/8", "le": "32", - "action": "deny" + "action": "deny", }, - { - "seqid": "11", - "network": "any", - "action": "permit" - } + {"seqid": "11", "network": "any", "action": "permit"}, ] } } } - } result = create_prefix_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure prefix list to bgp neighbor input_dict_2 = { @@ -1223,7 +1135,7 @@ def test_modify_prefix_lists_out_deny_to_permit(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -1237,12 +1149,12 @@ def test_modify_prefix_lists_out_deny_to_permit(request): "unicast": { "neighbor": { "r4": { - "dest_link":{ + "dest_link": { "r3": { "prefix_lists": [ { "name": "pf_list_1", - "direction": "out" + "direction": "out", } ] } @@ -1253,51 +1165,51 @@ def test_modify_prefix_lists_out_deny_to_permit(request): } } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r4" protocol = "bgp" - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name) # Modify ip prefix list input_dict_1 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1": [{ - "seqid": "10", - "network": "10.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_list_1": [ + { + "seqid": "10", + "network": "10.0.0.0/8", + "le": "32", + "action": "permit", + } + ] } } } - } result = create_prefix_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to clear bgp, so config changes would be reflected dut = "r3" result = clear_bgp_and_verify(tgen, topo, dut) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r4" protocol = "bgp" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -1321,30 +1233,24 @@ def test_ip_prefix_lists_implicit_deny(request): # Create Static Routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 9, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create Static Routes input_dict_1 = { "r2": { - "static_routes": [{ - "network": "20.0.20.1/32", - "no_of_ip": 9, - "next_hop": "10.0.0.1" - }] + "static_routes": [ + {"network": "20.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.1"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes # Create ip prefix list @@ -1352,20 +1258,20 @@ def test_ip_prefix_lists_implicit_deny(request): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1": [{ - "seqid": "10", - "network": "10.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_list_1": [ + { + "seqid": "10", + "network": "10.0.0.0/8", + "le": "32", + "action": "permit", + } + ] } } } - } result = create_prefix_lists(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure prefix list to bgp neighbor input_dict_4 = { @@ -1376,7 +1282,7 @@ def test_ip_prefix_lists_implicit_deny(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -1390,7 +1296,7 @@ def test_ip_prefix_lists_implicit_deny(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -1409,7 +1315,7 @@ def test_ip_prefix_lists_implicit_deny(request): "prefix_lists": [ { "name": "pf_list_1", - "direction": "out" + "direction": "out", } ] } @@ -1420,25 +1326,26 @@ def test_ip_prefix_lists_implicit_deny(request): } } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r4" protocol = "bgp" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r4" protocol = "bgp" - result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, "ipv4", dut, input_dict_1, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name) write_test_footer(tc_name) diff --git a/tests/topotests/bgp-route-map/test_route_map_topo1.py b/tests/topotests/bgp-route-map/test_route_map_topo1.py index 22dd3a6380..1aa951edaa 100755 --- a/tests/topotests/bgp-route-map/test_route_map_topo1.py +++ b/tests/topotests/bgp-route-map/test_route_map_topo1.py @@ -82,16 +82,29 @@ from mininet.topo import Topo # Required to instantiate the topology builder class. from lib.topojson import * from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, verify_bgp_community, - verify_rib, delete_route_maps, create_bgp_community_lists, - interface_status, create_route_maps, create_prefix_lists, - verify_route_maps, check_address_types, - shutdown_bringup_interface, verify_prefix_lists, reset_config_on_routers) + start_topology, + write_test_header, + write_test_footer, + verify_bgp_community, + verify_rib, + delete_route_maps, + create_bgp_community_lists, + interface_status, + create_route_maps, + create_prefix_lists, + verify_route_maps, + check_address_types, + shutdown_bringup_interface, + verify_prefix_lists, + reset_config_on_routers, +) from lib.topolog import logger from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp_and_verify, verify_bgp_attributes) + verify_bgp_convergence, + create_router_bgp, + clear_bgp_and_verify, + verify_bgp_attributes, +) from lib.topojson import build_topo_from_json, build_config_from_json @@ -109,15 +122,9 @@ except IOError: # Global variables bgp_convergence = False -NETWORK = { - "ipv4": ["11.0.20.1/32", "20.0.20.1/32"], - "ipv6": ["1::1/128", "2::1/128"] -} +NETWORK = {"ipv4": ["11.0.20.1/32", "20.0.20.1/32"], "ipv6": ["1::1/128", "2::1/128"]} MASK = {"ipv4": "32", "ipv6": "128"} -NEXT_HOP = { - "ipv4": "10.0.0.2", - "ipv6": "fd00::2" -} +NEXT_HOP = {"ipv4": "10.0.0.2", "ipv6": "fd00::2"} ADDR_TYPES = check_address_types() @@ -170,8 +177,9 @@ def setup_module(mod): # Api call verify whether BGP is converged bgp_convergence = verify_bgp_convergence(tgen, topo) - assert bgp_convergence is True, ("setup_module :Failed \n Error:" - " {}".format(bgp_convergence)) + assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format( + bgp_convergence + ) logger.info("Running setup_module() done") @@ -190,8 +198,9 @@ def teardown_module(): # Stop toplogy and Remove tmp files tgen.stop_topology() - logger.info("Testsuite end time: {}". - format(time.asctime(time.localtime(time.time())))) + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) logger.info("=" * 40) @@ -230,7 +239,8 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): result = create_static_routes(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Api call to redistribute static routes input_dict_1 = { @@ -242,7 +252,7 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -250,18 +260,19 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } - } + }, + }, } } } result = create_router_bgp(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_2 = { "r4": { @@ -277,7 +288,8 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): result = create_static_routes(tgen, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Api call to redistribute static routes input_dict_5 = { @@ -288,7 +300,7 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -296,81 +308,94 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_2 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "action": "permit", - "network": NETWORK["ipv4"][0] - }], - "pf_list_2_ipv4": [{ - "seqid": 10, - "action": "permit", - "network": NETWORK["ipv4"][1] - }] + "pf_list_1_ipv4": [ + { + "seqid": 10, + "action": "permit", + "network": NETWORK["ipv4"][0], + } + ], + "pf_list_2_ipv4": [ + { + "seqid": 10, + "action": "permit", + "network": NETWORK["ipv4"][1], + } + ], }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "action": "permit", - "network": NETWORK["ipv6"][0] - }], - "pf_list_2_ipv6": [{ - "seqid": 100, - "action": "permit", - "network": NETWORK["ipv6"][1] - }] - } + "pf_list_1_ipv6": [ + { + "seqid": 100, + "action": "permit", + "network": NETWORK["ipv6"][0], + } + ], + "pf_list_2_ipv6": [ + { + "seqid": 100, + "action": "permit", + "network": NETWORK["ipv6"][1], + } + ], + }, } } } result = create_prefix_lists(tgen, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map for addr_type in ADDR_TYPES: input_dict_6 = { - "r3": { - "route_maps": { - "rmap_match_tag_1_{}".format(addr_type): [{ - "action": "deny", - "match": { - addr_type: { - "prefix_lists": - "pf_list_1_{}".format(addr_type) + "r3": { + "route_maps": { + "rmap_match_tag_1_{}".format(addr_type): [ + { + "action": "deny", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, } - } - }], - "rmap_match_tag_2_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type: { - "prefix_lists": - "pf_list_2_{}".format(addr_type) + ], + "rmap_match_tag_2_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_list_2_{}".format(addr_type) + } + }, } - } - }] + ], + } } } - } result = create_route_maps(tgen, input_dict_6) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_7 = { @@ -384,12 +409,14 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): "dest_link": { "r3": { "route_maps": [ - {"name": - "rmap_match_tag_1_ipv4", - "direction": "in"}, - {"name": - "rmap_match_tag_1_ipv4", - "direction": "out"} + { + "name": "rmap_match_tag_1_ipv4", + "direction": "in", + }, + { + "name": "rmap_match_tag_1_ipv4", + "direction": "out", + }, ] } } @@ -404,19 +431,21 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): "dest_link": { "r3": { "route_maps": [ - {"name": - "rmap_match_tag_1_ipv6", - "direction": "in"}, - {"name": - "rmap_match_tag_1_ipv6", - "direction": "out"} + { + "name": "rmap_match_tag_1_ipv6", + "direction": "in", + }, + { + "name": "rmap_match_tag_1_ipv6", + "direction": "out", + }, ] } } } } } - } + }, } } } @@ -424,7 +453,8 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): result = create_router_bgp(tgen, topo, input_dict_7) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for adt in ADDR_TYPES: # Verifying RIB routes @@ -436,17 +466,17 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): { "network": [NETWORK[adt][1]], "no_of_ip": 9, - "next_hop": NEXT_HOP[adt] + "next_hop": NEXT_HOP[adt], } ] } } - result = verify_rib(tgen, adt, dut, input_dict_2, protocol=protocol, - expected=False) + result = verify_rib( + tgen, adt, dut, input_dict_2, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" - "routes are not present in rib \n Error: {}".format( - tc_name, result) + "routes are not present in rib \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) # Verifying RIB routes @@ -457,26 +487,28 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): { "network": [NETWORK[adt][0]], "no_of_ip": 9, - "next_hop": NEXT_HOP[adt] + "next_hop": NEXT_HOP[adt], } ] } } - result = verify_rib(tgen, adt, dut, input_dict, protocol=protocol, - expected=False) + result = verify_rib( + tgen, adt, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n " - "routes are not present in rib \n Error: {}".format( - tc_name, result) + "routes are not present in rib \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) write_test_footer(tc_name) -@pytest.mark.parametrize("prefix_action, rmap_action", [("permit", "permit"), - ("permit", "deny"), ("deny", "permit"), - ("deny", "deny")]) +@pytest.mark.parametrize( + "prefix_action, rmap_action", + [("permit", "permit"), ("permit", "deny"), ("deny", "permit"), ("deny", "deny")], +) def test_route_map_with_action_values_combination_of_prefix_action_p0( - request, prefix_action, rmap_action): + request, prefix_action, rmap_action +): """ TC_36: Test permit/deny statements operation in route-maps with a permutation and @@ -501,7 +533,7 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0( { "network": NETWORK[adt][0], "no_of_ip": 9, - "next_hop": NEXT_HOP[adt] + "next_hop": NEXT_HOP[adt], } ] } @@ -509,7 +541,8 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0( result = create_static_routes(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Api call to redistribute static routes input_dict_1 = { @@ -521,7 +554,7 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0( "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -529,65 +562,64 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0( "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } - } + }, + }, } } } result = create_router_bgp(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Permit in perfix list and route-map input_dict_2 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": prefix_action - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": prefix_action} + ] }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": prefix_action - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": prefix_action} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": rmap_action, - "match": { - addr_type: { - "prefix_lists": - "pf_list_1_{}".format(addr_type) - } + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": rmap_action, + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, } - } - ] + ] + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_7 = { @@ -601,9 +633,10 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0( "dest_link": { "r3": { "route_maps": [ - {"name": - "rmap_match_pf_1_ipv4", - "direction": "in"} + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } ] } } @@ -618,16 +651,17 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0( "dest_link": { "r3": { "route_maps": [ - {"name": - "rmap_match_pf_1_ipv6", - "direction": "in"} + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } ] } } } } } - } + }, } } } @@ -635,7 +669,8 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0( result = create_router_bgp(tgen, topo, input_dict_7) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) dut = "r3" protocol = "bgp" @@ -651,17 +686,18 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0( } } - #tgen.mininet_cli() - result = verify_rib(tgen, adt, dut, input_dict_2, protocol=protocol, - expected=False) + # tgen.mininet_cli() + result = verify_rib( + tgen, adt, dut, input_dict_2, protocol=protocol, expected=False + ) if "deny" in [prefix_action, rmap_action]: assert result is not True, "Testcase {} : Failed \n " - "Routes are still present \n Error: {}".\ - format(tc_name, result) + "Routes are still present \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) else: assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) def test_route_map_multiple_seq_different_match_set_clause_p0(request): @@ -683,16 +719,19 @@ def test_route_map_multiple_seq_different_match_set_clause_p0(request): # Create Static routes input_dict = { "r1": { - "static_routes": [{ - "network": NETWORK[adt][0], - "no_of_ip": 1, - "next_hop": NEXT_HOP[adt] - }] + "static_routes": [ + { + "network": NETWORK[adt][0], + "no_of_ip": 1, + "next_hop": NEXT_HOP[adt], + } + ] } } result = create_static_routes(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Api call to redistribute static routes input_dict_1 = { @@ -703,7 +742,7 @@ def test_route_map_multiple_seq_different_match_set_clause_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -711,94 +750,82 @@ def test_route_map_multiple_seq_different_match_set_clause_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create ip prefix list input_dict_2 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [ - { - "action": "permit", - "match": { - addr_type: { - "prefix_lists": - "pf_list_2_{}".format(addr_type) - } + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_list_2_{}".format(addr_type) + } + }, + "set": {"path": {"as_num": 500}}, }, - "set": { - "aspath": { - "as_num": 500 - } - } - }, - { - "action": "permit", - "match": { - addr_type: { - "prefix_lists": - "pf_list_2_{}".format(addr_type) - } + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_list_2_{}".format(addr_type) + } + }, + "set": {"locPrf": 150,}, }, - "set": { - "localpref": 150, - } - }, - { - "action": "permit", - "match": { - addr_type: { - "prefix_lists": - "pf_list_1_{}".format(addr_type) - } + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 50}, }, - "set": { - "med": 50 - } - } - ] - } - } + ] + } + } } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { @@ -811,25 +838,27 @@ def test_route_map_multiple_seq_different_match_set_clause_p0(request): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "out", + } + ] } } - } + }, } } }, @@ -839,65 +868,64 @@ def test_route_map_multiple_seq_different_match_set_clause_p0(request): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "out", + } + ] } } - } + }, } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for adt in ADDR_TYPES: # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = { - "r3": { - "route_maps": { - "rmap_match_pf_list1": [{ - "set": { - "med": 50, - } - }], - } - } + "r3": {"route_maps": {"rmap_match_pf_list1": [{"set": {"metric": 50,}}],}} } static_routes = [NETWORK[adt][0]] time.sleep(2) - result = verify_bgp_attributes(tgen, adt, dut, static_routes, - "rmap_match_pf_list1", input_dict) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_attributes( + tgen, adt, dut, static_routes, "rmap_match_pf_list1", input_dict + ) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) dut = "r4" - result = verify_bgp_attributes(tgen, adt, dut, static_routes, - "rmap_match_pf_list1", input_dict) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_attributes( + tgen, adt, dut, static_routes, "rmap_match_pf_list1", input_dict + ) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) logger.info("Testcase " + tc_name + " :Passed \n") @@ -924,16 +952,19 @@ def test_route_map_set_only_no_match_p0(request): # Create Static routes input_dict = { "r1": { - "static_routes": [{ - "network": NETWORK[adt][0], - "no_of_ip": 1, - "next_hop": NEXT_HOP[adt] - }] + "static_routes": [ + { + "network": NETWORK[adt][0], + "no_of_ip": 1, + "next_hop": NEXT_HOP[adt], + } + ] } } result = create_static_routes(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Api call to redistribute static routes input_dict_1 = { @@ -944,7 +975,7 @@ def test_route_map_set_only_no_match_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -952,17 +983,18 @@ def test_route_map_set_only_no_match_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map input_dict_3 = { @@ -971,11 +1003,7 @@ def test_route_map_set_only_no_match_p0(request): "rmap_match_pf_1": [ { "action": "permit", - "set": { - "med": 50, - "localpref": 150, - "weight": 4000 - } + "set": {"metric": 50, "locPrf": 150, "weight": 4000}, } ] } @@ -983,7 +1011,8 @@ def test_route_map_set_only_no_match_p0(request): } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { @@ -996,23 +1025,27 @@ def test_route_map_set_only_no_match_p0(request): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": "rmap_match_pf_1", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": "rmap_match_pf_1", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1", + "direction": "out", + } + ] } } - } + }, } } }, @@ -1022,61 +1055,63 @@ def test_route_map_set_only_no_match_p0(request): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": "rmap_match_pf_1", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": "rmap_match_pf_1", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1", + "direction": "out", + } + ] } } - } + }, } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) time.sleep(2) for adt in ADDR_TYPES: input_dict_4 = { "r3": { "route_maps": { - "rmap_match_pf_1": [ - { - "action": "permit", - "set": { - "med": 50, - } - } - ] + "rmap_match_pf_1": [{"action": "permit", "set": {"metric": 50,}}] } } } # Verifying RIB routes static_routes = [NETWORK[adt][0]] - result = verify_bgp_attributes(tgen, adt, "r3", static_routes, - "rmap_match_pf_1", input_dict_3) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) - - result = verify_bgp_attributes(tgen, adt, "r4", static_routes, - "rmap_match_pf_1", input_dict_4) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_attributes( + tgen, adt, "r3", static_routes, "rmap_match_pf_1", input_dict_3 + ) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_bgp_attributes( + tgen, adt, "r4", static_routes, "rmap_match_pf_1", input_dict_4 + ) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) logger.info("Testcase " + tc_name + " :Passed \n") @@ -1103,16 +1138,19 @@ def test_route_map_match_only_no_set_p0(request): # Create Static routes input_dict = { "r1": { - "static_routes": [{ - "network": NETWORK[adt][0], - "no_of_ip": 1, - "next_hop": NEXT_HOP[adt] - }] + "static_routes": [ + { + "network": NETWORK[adt][0], + "no_of_ip": 1, + "next_hop": NEXT_HOP[adt], + } + ] } } result = create_static_routes(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Api call to redistribute static routes input_dict_1 = { @@ -1123,7 +1161,7 @@ def test_route_map_match_only_no_set_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -1131,62 +1169,56 @@ def test_route_map_match_only_no_set_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create ip prefix list input_dict_2 = { "r1": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { "r1": { "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "set": { - "med": 50, - "localpref": 150, - } - } + "rmap_match_pf_1_{}".format(addr_type): [ + {"action": "permit", "set": {"metric": 50, "locPrf": 150,}} ] } } } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { @@ -1199,11 +1231,12 @@ def test_route_map_match_only_no_set_p0(request): "r3": { "dest_link": { "r1": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "out", + } + ] } } } @@ -1216,63 +1249,62 @@ def test_route_map_match_only_no_set_p0(request): "r3": { "dest_link": { "r1": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "out", + } + ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create ip prefix list input_dict_5 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map for addr_type in ADDR_TYPES: input_dict_6 = { "r3": { "route_maps": { - "rmap_match_pf_2_{}".format(addr_type): [{ + "rmap_match_pf_2_{}".format(addr_type): [ + { "action": "permit", "match": { addr_type: { - "prefix_lists": - "pf_list_1_{}".format(addr_type) + "prefix_lists": "pf_list_1_{}".format(addr_type) } - } + }, } ] } @@ -1280,7 +1312,8 @@ def test_route_map_match_only_no_set_p0(request): } result = create_route_maps(tgen, input_dict_6) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_7 = { @@ -1293,25 +1326,27 @@ def test_route_map_match_only_no_set_p0(request): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv4", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv4", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv4", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv4", + "direction": "out", + } + ] } } - } + }, } } }, @@ -1321,47 +1356,50 @@ def test_route_map_match_only_no_set_p0(request): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "out", + } + ] } } - } + }, } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_7) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for adt in ADDR_TYPES: # Verifying RIB routes static_routes = [NETWORK[adt][0]] - result = verify_bgp_attributes(tgen, adt, "r3", static_routes, - "rmap_match_pf_1", input_dict_3) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_attributes( + tgen, adt, "r3", static_routes, "rmap_match_pf_1", input_dict_3 + ) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) - - diff --git a/tests/topotests/bgp-route-map/test_route_map_topo2.py b/tests/topotests/bgp-route-map/test_route_map_topo2.py index f2398c33ff..3056aa29f3 100755 --- a/tests/topotests/bgp-route-map/test_route_map_topo2.py +++ b/tests/topotests/bgp-route-map/test_route_map_topo2.py @@ -122,17 +122,31 @@ from mininet.topo import Topo # Required to instantiate the topology builder class. from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, create_static_routes, - verify_rib, delete_route_maps, create_bgp_community_lists, - interface_status, create_route_maps, create_prefix_lists, - verify_route_maps, check_address_types, verify_bgp_community, - shutdown_bringup_interface, verify_prefix_lists, reset_config_on_routers, - verify_create_community_list) + start_topology, + write_test_header, + write_test_footer, + create_static_routes, + verify_rib, + delete_route_maps, + create_bgp_community_lists, + interface_status, + create_route_maps, + create_prefix_lists, + verify_route_maps, + check_address_types, + verify_bgp_community, + shutdown_bringup_interface, + verify_prefix_lists, + reset_config_on_routers, + verify_create_community_list, +) from lib.topolog import logger from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp_and_verify, verify_bgp_attributes) + verify_bgp_convergence, + create_router_bgp, + clear_bgp_and_verify, + verify_bgp_attributes, +) from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology and configuration creation @@ -147,10 +161,7 @@ except IOError: # Global variables # Global variables bgp_convergence = False -NETWORK = { - "ipv4": ["11.0.20.1/32", "11.0.20.2/32"], - "ipv6": ["2::1/128", "2::2/128"] -} +NETWORK = {"ipv4": ["11.0.20.1/32", "11.0.20.2/32"], "ipv6": ["2::1/128", "2::2/128"]} bgp_convergence = False BGP_CONVERGENCE = False @@ -180,7 +191,7 @@ def setup_module(mod): """ testsuite_run_time = time.asctime(time.localtime(time.time())) logger.info("Testsuite start time: {}".format(testsuite_run_time)) - logger.info("="*40) + logger.info("=" * 40) logger.info("Running setup_module to create topology") @@ -205,8 +216,9 @@ def setup_module(mod): # Api call verify whether BGP is converged bgp_convergence = verify_bgp_convergence(tgen, topo) - assert bgp_convergence is True, ("setup_module :Failed \n Error:" - " {}".format(bgp_convergence)) + assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format( + bgp_convergence + ) logger.info("Running setup_module() done") @@ -222,9 +234,10 @@ def teardown_module(mod): # Stop toplogy and Remove tmp files tgen.stop_topology() - logger.info("Testsuite end time: {}".format( - time.asctime(time.localtime(time.time())))) - logger.info("="*40) + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) ##################################################### @@ -254,134 +267,126 @@ def test_rmap_match_prefix_list_permit_in_and_outbound_prefixes_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit", - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit",} + ] }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 10, - "network": "any", - "action": "permit", - }] - } + "pf_list_1_ipv6": [ + {"seqid": 10, "network": "any", "action": "permit",} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: - # Create route map + # Create route map input_dict_3 = { "r3": { "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_" + addr_type - } + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: {"prefix_lists": "pf_list_1_" + addr_type} + }, + "set": {"locPrf": 150, "weight": 100}, }, - "set": { - "localpref": 150, - "weight": 100 - } - }, ], - "rmap_match_pf_2_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_" + addr_type - } + "rmap_match_pf_2_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: {"prefix_lists": "pf_list_1_" + addr_type} + }, + "set": {"metric": 50}, }, - "set": { - "med": 50 - } - }, - ] + ], } } } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv4", - "direction": "out" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "out" - }] - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv4", + "direction": "out", + } + ] + } + } + }, + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "out", + } + ] + } + } + }, + } + } + }, + } + } + } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -390,48 +395,52 @@ def test_rmap_match_prefix_list_permit_in_and_outbound_prefixes_p0(): # dual stack changes for addr_type in ADDR_TYPES: - result4 = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result4 is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result4) + tc_name, result4 + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } # dual stack changes for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result4 = verify_bgp_attributes(tgen, addr_type, dut, routes[ - addr_type],rmap_name, input_dict_3) + result4 = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result4 is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result4) + tc_name, result4 + ) # Verifying RIB routes dut = "r4" protocol = "bgp" # dual stack changes for addr_type in ADDR_TYPES: - result4 = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result4 is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result4) + tc_name, result4 + ) # Verifying BGP set attributes dut = "r4" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } # dual stack changes for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_2_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) # Uncomment next line for debugging @@ -462,267 +471,271 @@ def test_modify_set_match_clauses_in_rmap_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit", - }], - "pf_list_2_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit",} + ], + "pf_list_2_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ], }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 10, - "network": "any", - "action": "permit", - }], - "pf_list_2_ipv6": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 10, "network": "any", "action": "permit",} + ], + "pf_list_2_ipv6": [ + {"seqid": 10, "network": "any", "action": "permit"} + ], + }, } } - } + } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150,}, } - }, - "set": { - "localpref": 150, - } - }], - "rmap_match_pf_2_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) + ], + "rmap_match_pf_2_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 50}, } - }, - "set": { - "med": 50 - } - }] + ], + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv4", - "direction": "out" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "out" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv4", + "direction": "out", + } + ] + } + } + }, + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "out", + } + ] + } + } + }, + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } # dual stack changes for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result4 = verify_bgp_attributes(tgen, addr_type, dut, routes[ - addr_type],rmap_name, input_dict_3) + result4 = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result4 is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result4) + tc_name, result4 + ) # Verifying RIB routes dut = "r4" protocol = "bgp" # dual stack changes for addr_type in ADDR_TYPES: - result4 = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result4 is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result4) + tc_name, result4 + ) # Verifying BGP set attributes dut = "r4" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_2_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[ - addr_type],rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Modify set/match clause of in-used route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 1000, - } - }], - "rmap_match_pf_2_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "med": 2000 + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 1000,}, + } + ], + "rmap_match_pf_2_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 2000}, + } + ], } - }] - } - } + } } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying RIB routes dut = "r3" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying RIB routes dut = "r4" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r4" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_2_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -750,37 +763,29 @@ def test_delete_route_maps_p1(): # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_tag_1_{}".format(addr_type): [{ - "action": "deny", - "match": { - addr_type: { - "tag": "4001" - } - } - }] + "r3": { + "route_maps": { + "rmap_match_tag_1_{}".format(addr_type): [ + {"action": "deny", "match": {addr_type: {"tag": "4001"}}} + ] + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Delete route maps for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "route_maps": ["rmap_match_tag_1_{}".format(addr_type)] - } - } + input_dict = {"r3": {"route_maps": ["rmap_match_tag_1_{}".format(addr_type)]}} result = delete_route_maps(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) result = verify_route_maps(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) # Uncomment next line for debugging @@ -810,226 +815,223 @@ def test_modify_prefix_list_referenced_by_rmap_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit", - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit",} + ] }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit", - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit",} + ] + }, } } - } + } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150, "weight": 100}, } - }, - "set": { - "localpref": 150, - "weight": 100 - } - }], - "rmap_match_pf_2_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) + ], + "rmap_match_pf_2_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 50}, } - }, - "set": { - "med": 50 - } - }] + ], + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv4", - "direction": "out" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "out" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv4", + "direction": "out", + } + ] + } + } + }, + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "out", + } + ] + } + } + }, + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying RIB routes dut = "r4" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r4" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_2_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Modify ip prefix list input_dict_2 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "deny" - }] - }, - "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "deny" - }] - } + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "deny"} + ] + }, + "ipv6": { + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "deny"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) sleep(5) # Verifying RIB routes dut = "r3" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" - "routes are not present \n Error: {}".format( - tc_name, result) + "routes are not present \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) # Verifying RIB routes dut = "r4" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" "Expected behaviour: routes are not present \n " - "Error: {}".format( - tc_name, result) + "Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -1059,234 +1061,240 @@ def test_remove_prefix_list_referenced_by_rmap_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } - } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150,}, } - }, - "set": { - "localpref": 150, - } - }], - "rmap_match_pf_2_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "med": 50 - } - }] + ], + "rmap_match_pf_2_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 50}, + } + ], + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map for addr_type in ADDR_TYPES: input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv4", - "direction": "out" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "out" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv4", + "direction": "out", + } + ] + } + } + }, + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "out", + } + ] + } + } + }, + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying RIB routes dut = "r4" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r4" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_2_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Remove/Delete prefix list input_dict_3 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit", - "delete": True - }] - }, + "pf_list_1_ipv4": [ + { + "seqid": 10, + "network": "any", + "action": "permit", + "delete": True, + } + ] + }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit", - "delete": True - }] + "pf_list_1_ipv6": [ + { + "seqid": 100, + "network": "any", + "action": "permit", + "delete": True, + } + ] + }, } } - } } result = create_prefix_lists(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_prefix_lists(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to clear bgp, so config changes would be reflected dut = "r3" result = clear_bgp_and_verify(tgen, topo, dut) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" - "routes are not present \n Error: {}".format( - tc_name, result) + "routes are not present \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) # Verifying RIB routes dut = "r4" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" - "routes are not present \n Error: {}".\ - format(tc_name, result) + "routes are not present \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) write_test_footer(tc_name) @@ -1318,157 +1326,170 @@ def test_add_and_remove_community_list_referenced_by_rmap_p0(): # Create route map for addr_type in ADDR_TYPES: input_dict_5 = { - "r1": { - "route_maps": { - "rm_r1_out_{}".format(addr_type): [{ - "action": "permit", - "set": { - "large_community": {"num": "1:1:1 1:2:3 2:1:1 2:2:2"} - } - }] + "r1": { + "route_maps": { + "rm_r1_out_{}".format(addr_type): [ + { + "action": "permit", + "set": { + "large_community": {"num": "1:1:1 1:2:3 2:1:1 2:2:2"} + }, + } + ] + } } } - } result = create_route_maps(tgen, input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_6 = { - "r1": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r3": { - "dest_link": { - "r1": { - "route_maps": [{ - "name": "rm_r1_out_ipv4", - "direction": "out" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r3": { - "dest_link": { - "r1": { - "route_maps": [{ - "name": "rm_r1_out_ipv6", - "direction": "out" - }] - } - } - } - } - } - } - } - } - } - } + "r1": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r1": { + "route_maps": [ + { + "name": "rm_r1_out_ipv4", + "direction": "out", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r1": { + "route_maps": [ + { + "name": "rm_r1_out_ipv6", + "direction": "out", + } + ] + } + } + } + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_6) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: # Create standard large commumity-list - input_dict_1 = { - "r3": { - "bgp_community_lists": [ - { - "community_type": "standard", - "action": "permit", - "name": "rmap_lcomm_{}".format(addr_type), - "value": "1:1:1 1:2:3 2:1:1 2:2:2", - "large": True - } - ] - } + input_dict_1 = { + "r3": { + "bgp_community_lists": [ + { + "community_type": "standard", + "action": "permit", + "name": "rmap_lcomm_{}".format(addr_type), + "value": "1:1:1 1:2:3 2:1:1 2:2:2", + "large": True, + } + ] } - result = create_bgp_community_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + } + result = create_bgp_community_lists(tgen, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) # Verify BGP large community is created result = verify_create_community_list(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: - # Create route map + # Create route map input_dict_2 = { - "r3": { - "route_maps": { - "rm_r3_in_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type : { - "large-community-list": {"id": "rmap_lcomm_"+ - addr_type} - } - } - }] + "r3": { + "route_maps": { + "rm_r3_in_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "large-community-list": { + "id": "rmap_lcomm_" + addr_type + } + } + }, + } + ] + } } } - } result = create_route_maps(tgen, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_3 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": "rm_r3_in_ipv4", - "direction": "in" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": "rm_r3_in_ipv6", - "direction": "in" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rm_r3_in_ipv4", + "direction": "in", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rm_r3_in_ipv6", + "direction": "in", + } + ] + } + } + } + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) sleep(5) # Verifying RIB routes @@ -1476,25 +1497,25 @@ def test_add_and_remove_community_list_referenced_by_rmap_p0(): protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verify large-community-list dut = "r3" networks = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] - } - input_dict_4 = { - "largeCommunity": "1:1:1 1:2:3 2:1:1 2:2:2" + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } + input_dict_4 = {"largeCommunity": "1:1:1 1:2:3 2:1:1 2:2:2"} for addr_type in ADDR_TYPES: - result = verify_bgp_community(tgen, addr_type, dut, networks[ - addr_type],input_dict_4) + result = verify_bgp_community( + tgen, addr_type, dut, networks[addr_type], input_dict_4 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) # Uncomment next line for debugging @@ -1520,242 +1541,220 @@ def test_multiple_match_statement_in_route_map_logical_ORed_p0(): # Api call to advertise networks input_dict_nw1 = { - "r1": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "advertise_networks": [ - {"network": "10.0.30.1/32"} - ] - } - }, - "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } - } + "r1": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": {"advertise_networks": [{"network": "10.0.30.1/32"}]} + }, + "ipv6": { + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } } + } result = create_router_bgp(tgen, topo, input_dict_nw1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to advertise networks input_dict_nw2 = { - "r1": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "advertise_networks": [ - {"network": "20.0.30.1/32"} - ] - } - }, - "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "2::1/128"} - ] - } - } - } + "r1": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": {"advertise_networks": [{"network": "20.0.30.1/32"}]} + }, + "ipv6": { + "unicast": {"advertise_networks": [{"network": "2::1/128"}]} + }, } } } + } result = create_router_bgp(tgen, topo, input_dict_nw2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create ip prefix list input_dict_2 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create ip prefix list input_dict_2 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_2_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, + "pf_list_2_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, "ipv6": { - "pf_list_2_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_2_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) - input_dict_3_addr_type ={} + input_dict_3_addr_type = {} # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150}, } - }, - "set": { - "localpref": 150 - } - }] + ] + } } } - } input_dict_3_addr_type[addr_type] = input_dict_3 result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 200 - } - }] + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 200}, + } + ] + } } } - } input_dict_3_addr_type[addr_type] = input_dict_3 result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_6 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + } + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_6) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" - routes = { - "ipv4": ["10.0.30.1/32"], - "ipv6": ["1::1/128"] - } + routes = {"ipv4": ["10.0.30.1/32"], "ipv6": ["1::1/128"]} for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3_addr_type[addr_type]) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + result = verify_bgp_attributes( + tgen, + addr_type, + dut, + routes[addr_type], + rmap_name, + input_dict_3_addr_type[addr_type], + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) # Verifying BGP set attributes - routes = { - "ipv4": ["20.0.30.1/32"], - "ipv6": ["2::1/128"] - } + routes = {"ipv4": ["20.0.30.1/32"], "ipv6": ["2::1/128"]} for addr_type in ADDR_TYPES: - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1785,79 +1784,80 @@ def test_multiple_match_statement_in_route_map_logical_ANDed_p1(): input_dict_5 = { "r1": { "route_maps": { - "rm_r1_out_{}".format(addr_type): [{ - "action": "permit", - "set": { - "large_community": { - "num": "1:1:1 1:2:3 2:1:1 2:2:2"} + "rm_r1_out_{}".format(addr_type): [ + { + "action": "permit", + "set": { + "large_community": {"num": "1:1:1 1:2:3 2:1:1 2:2:2"} + }, } - }] + ] } } } result = create_route_maps(tgen, input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map for addr_type in ADDR_TYPES: input_dict_6 = { - "r1": { - "bgp": { - "address_family": { - addr_type: { - "unicast": { - "neighbor": { - "r3": { - "dest_link": { - "r1": { - "route_maps": [{ - "name": - "rm_r1_out_{}".format(addr_type), - "direction": "out" - }] - } - } - } - } - } - } - } - } - } + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r1": { + "route_maps": [ + { + "name": "rm_r1_out_{}".format( + addr_type + ), + "direction": "out", + } + ] + } + } + } + } + } + } + } + } + } } result = create_router_bgp(tgen, topo, input_dict_6) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create ip prefix list input_dict_2 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: - # Create standard large commumity-list + # Create standard large commumity-list input_dict_1 = { "r3": { "bgp_community_lists": [ @@ -1866,98 +1866,105 @@ def test_multiple_match_statement_in_route_map_logical_ANDed_p1(): "action": "permit", "name": "rmap_lcomm_{}".format(addr_type), "value": "1:1:1 1:2:3 2:1:1 2:2:2", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verify BGP large community is created result = verify_create_community_list(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { "r3": { "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 150, + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150,}, } - }] + ] } } } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for addr_type in ADDR_TYPES: - # Create route map + # Create route map input_dict_3 = { "r3": { "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type : { - "large_community_list": {"id": "rmap_lcomm_"+ - addr_type} - } - }, - "set": { - "localpref": 150, + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "large_community_list": { + "id": "rmap_lcomm_" + addr_type + } + } + }, + "set": {"locPrf": 150,}, } - }] + ] } } } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map for addr_type in ADDR_TYPES: input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - addr_type: { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_{}".format(addr_type), - "direction": "in" - }] - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_{}".format( + addr_type + ), + "direction": "in", + } + ] + } + } + } + } + } + } + } + } + } } result = create_router_bgp(tgen, topo, input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # sleep(10) # Verifying RIB routes dut = "r3" @@ -1966,20 +1973,23 @@ def test_multiple_match_statement_in_route_map_logical_ANDed_p1(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -2008,50 +2018,46 @@ def test_add_remove_rmap_to_specific_neighbor_p0(): input_dict_2 = { "r3": { "prefix_lists": { - "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "deny" - }] - }, - "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "deny" - }] - } + "ipv4": { + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "deny"} + ] + }, + "ipv6": { + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "deny"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 150, - } - }] + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150,}, + } + ] + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { @@ -2064,11 +2070,12 @@ def test_add_remove_rmap_to_specific_neighbor_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] } } } @@ -2081,36 +2088,36 @@ def test_add_remove_rmap_to_specific_neighbor_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n Error" - "Routes are still present: {}".format( - tc_name, result) + "Routes are still present: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) # Remove applied rmap from neighbor @@ -2124,12 +2131,13 @@ def test_add_remove_rmap_to_specific_neighbor_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", "direction": "in", - "delete": True - }] + "delete": True, + } + ] } } } @@ -2142,26 +2150,26 @@ def test_add_remove_rmap_to_specific_neighbor_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", "direction": "in", - "delete": True - }] + "delete": True, + } + ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -2170,7 +2178,8 @@ def test_add_remove_rmap_to_specific_neighbor_p0(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -2200,50 +2209,45 @@ def test_clear_bgp_and_flap_interface_to_verify_rmap_properties_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 150, - "weight": 100 - } - }] + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150, "weight": 100}, + } + ] + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { @@ -2256,11 +2260,12 @@ def test_clear_bgp_and_flap_interface_to_verify_rmap_properties_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] } } } @@ -2273,25 +2278,25 @@ def test_clear_bgp_and_flap_interface_to_verify_rmap_properties_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -2300,26 +2305,28 @@ def test_clear_bgp_and_flap_interface_to_verify_rmap_properties_p0(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # clear bgp, so config changes would be reflected dut = "r3" result = clear_bgp_and_verify(tgen, topo, dut) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -2328,20 +2335,23 @@ def test_clear_bgp_and_flap_interface_to_verify_rmap_properties_p0(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Flap interface to see if route-map properties are intact # Shutdown interface @@ -2358,8 +2368,7 @@ def test_clear_bgp_and_flap_interface_to_verify_rmap_properties_p0(): # Verify BGP convergence once interface is up result = verify_bgp_convergence(tgen, topo) - assert result is True, ( - "setup_module :Failed \n Error:" " {}".format(result)) + assert result is True, "setup_module :Failed \n Error:" " {}".format(result) # Verifying RIB routes dut = "r3" @@ -2368,20 +2377,23 @@ def test_clear_bgp_and_flap_interface_to_verify_rmap_properties_p0(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -2409,22 +2421,21 @@ def test_rmap_without_match_and_set_clause_p0(): # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_no_match_set_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5" - }], - "rmap_no_match_set_2_{}".format(addr_type): [{ - "action": "deny", - "seq_id": "5" - }] + "r3": { + "route_maps": { + "rmap_no_match_set_1_{}".format(addr_type): [ + {"action": "permit", "seq_id": "5"} + ], + "rmap_no_match_set_2_{}".format(addr_type): [ + {"action": "deny", "seq_id": "5"} + ], + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { @@ -2437,25 +2448,27 @@ def test_rmap_without_match_and_set_clause_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_no_match_set_1_ipv4", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_no_match_set_1_ipv4", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_no_match_set_2_ipv4", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_no_match_set_2_ipv4", + "direction": "out", + } + ] } } - } + }, } } }, @@ -2465,36 +2478,37 @@ def test_rmap_without_match_and_set_clause_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_no_match_set_1_ipv6", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_no_match_set_1_ipv6", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_no_match_set_2_ipv6", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_no_match_set_2_ipv6", + "direction": "out", + } + ] } } - } + }, } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -2503,17 +2517,18 @@ def test_rmap_without_match_and_set_clause_p0(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying RIB routes dut = "r4" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" - "routes are not present \n Error: {}".format( - tc_name, result) + "routes are not present \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) write_test_footer(tc_name) @@ -2542,74 +2557,69 @@ def test_set_localpref_weight_to_ebgp_and_med_to_ibgp_peers_p0(): input_dict_2 = { "r3": { "prefix_lists": { - "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, - "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "ipv4": { + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, + "ipv6": { + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map - input_dict_3_addr_type ={} + input_dict_3_addr_type = {} for addr_type in ADDR_TYPES: input_dict_3 = { "r3": { "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ + "rmap_match_pf_1_{}".format(addr_type): [ + { "action": "permit", "match": { addr_type: { - "prefix_lists": "pf_list_1_{}".format( - addr_type) + "prefix_lists": "pf_list_1_{}".format(addr_type) } }, - "set": { - "med": 50 + "set": {"metric": 50}, + } + ], + "rmap_match_pf_2_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) } - }], - "rmap_match_pf_2_{}".format(addr_type): [{ + }, + "set": {"locPrf": 150}, + } + ], + "rmap_match_pf_3_{}".format(addr_type): [ + { "action": "permit", "match": { addr_type: { - "prefix_lists": "pf_list_1_{}".format( - addr_type) - }}, - "set": { - "localpref": 150 - } - }], - "rmap_match_pf_3_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format( - addr_type) - }}, - "set": { - "weight": 1000 - } - }] - } + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"weight": 1000}, } - } + ], + } + } + } input_dict_3_addr_type[addr_type] = input_dict_3 result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { @@ -2622,36 +2632,39 @@ def test_set_localpref_weight_to_ebgp_and_med_to_ibgp_peers_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv4", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv4", + "direction": "out", + } + ] } } }, "r5": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_3_ipv4", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_3_ipv4", + "direction": "out", + } + ] } } - } + }, } } }, @@ -2661,123 +2674,137 @@ def test_set_localpref_weight_to_ebgp_and_med_to_ibgp_peers_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "out", + } + ] } } }, "r5": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_3_ipv6", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_3_ipv6", + "direction": "out", + } + ] } } - } + }, } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } rmap_name = "rmap_match_pf_1" for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[ - addr_type],rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying RIB routes dut = "r4" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r4" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } rmap_name = "rmap_match_pf_2" for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_2_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3_addr_type[addr_type], - expected=False) + result = verify_bgp_attributes( + tgen, + addr_type, + dut, + routes[addr_type], + rmap_name, + input_dict_3_addr_type[addr_type], + expected=False, + ) assert result is not True, "Testcase {} : Failed \n" - "Attributes are not set \n Error: {}".format( - tc_name, result) + "Attributes are not set \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) # Verifying RIB routes dut = "r5" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r5" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } rmap_name = "rmap_match_pf_3" for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_3_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3_addr_type[addr_type], - expected=False) + result = verify_bgp_attributes( + tgen, + addr_type, + dut, + routes[addr_type], + rmap_name, + input_dict_3_addr_type[addr_type], + expected=False, + ) assert result is not True, "Testcase {} : Failed \n" - "Attributes are not set \n Error: {}".format( - tc_name, result) + "Attributes are not set \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) write_test_footer(tc_name) @@ -2808,97 +2835,92 @@ def test_multiple_set_on_single_sequence_in_rmap_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 150, - "weight": 100, - "med": 50 - } - }] + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150, "weight": 100, "metric": 50}, + } + ] + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + } + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -2907,22 +2929,25 @@ def test_multiple_set_on_single_sequence_in_rmap_p0(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } rmap_name = "rmap_match_pf_1" for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -2951,150 +2976,147 @@ def test_route_maps_with_continue_clause_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "10", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 150 + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "10", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150}, + "continue": "30", }, - "continue": "30" - }, - { - "action": "permit", - "seq_id": "20", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } + { + "action": "permit", + "seq_id": "20", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 200}, }, - "set": { - "med": 200 - } - }, - { - "action": "permit", - "seq_id": "30", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } + { + "action": "permit", + "seq_id": "30", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 100}, }, - "set": { - "med": 100 - } - } - ] + ] + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + } + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" rmap_name = "rmap_match_pf_1" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] - } - seq_id = { - "ipv4": ["10", "30"], - "ipv6": ["10", "30"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } + seq_id = {"ipv4": ["10", "30"], "ipv6": ["10", "30"]} for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[ - addr_type],rmap_name, input_dict_3, seq_id[addr_type]) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + result = verify_bgp_attributes( + tgen, + addr_type, + dut, + routes[addr_type], + rmap_name, + input_dict_3, + seq_id[addr_type], + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) @@ -3123,120 +3145,114 @@ def test_route_maps_with_goto_clause_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "10", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "10", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "goto": "30", }, - "goto": "30" - }, - { - "action": "permit", - "seq_id": "20", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } + { + "action": "permit", + "seq_id": "20", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 100}, }, - "set": { - "med": 100 - } - }, - { - "action": "permit", - "seq_id": "30", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } + { + "action": "permit", + "seq_id": "30", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 200}, }, - "set": { - "med": 200 - } - } - ] + ] + } } } - } result = create_route_maps(tgen, input_dict_3) # tgen.mininet_cli() assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + } + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -3245,25 +3261,31 @@ def test_route_maps_with_goto_clause_p0(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" rmap_name = "rmap_match_pf_1" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] - } - seq_id = { - "ipv4": ["10", "30"], - "ipv6": ["10", "30"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } + seq_id = {"ipv4": ["10", "30"], "ipv6": ["10", "30"]} for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[ - addr_type],rmap_name, input_dict_3, seq_id[addr_type]) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + result = verify_bgp_attributes( + tgen, + addr_type, + dut, + routes[addr_type], + rmap_name, + input_dict_3, + seq_id[addr_type], + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) @@ -3292,107 +3314,104 @@ def test_route_maps_with_call_clause_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 150 - }, - "call": "rmap_match_pf_2_{}".format(addr_type) - }], - "rmap_match_pf_2_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "med": 200 - } - }] + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150}, + "call": "rmap_match_pf_2_{}".format(addr_type), + } + ], + "rmap_match_pf_2_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 200}, + } + ], + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + } + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -3401,29 +3420,34 @@ def test_route_maps_with_call_clause_p0(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } rmap_name = "rmap_match_pf_1" for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) rmap_name = "rmap_match_pf_2" for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_2_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -3453,150 +3477,149 @@ def test_create_rmap_match_prefix_list_to_deny_in_and_outbound_prefixes_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "deny", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 150, - } - }], - "rmap_match_pf_2_{}".format(addr_type): [{ - "action": "deny", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "med": 50 - } - }] + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "deny", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150,}, + } + ], + "rmap_match_pf_2_{}".format(addr_type): [ + { + "action": "deny", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 50}, + } + ], + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "out" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "out" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "out", + } + ] + } + } + }, + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "out", + } + ] + } + } + }, + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" - "routes are not present \n Error: {}".\ - format(tc_name, result) + "routes are not present \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) # Verifying RIB routes dut = "r4" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" - "routes are not present \n Error: {}".\ - format(tc_name, result) + "routes are not present \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) write_test_footer(tc_name) @@ -3627,18 +3650,15 @@ def test_create_rmap_to_match_tag_permit_inbound_prefixes_p0(): input_dict = { "r1": { "static_routes": [ - { - "network": NETWORK[addr_type], - "next_hop": "Null0", - "tag": 4001 - } + {"network": NETWORK[addr_type], "next_hop": "Null0", "tag": 4001} ] } } result = create_static_routes(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Api call to redistribute static routes input_dict_1 = { @@ -3650,7 +3670,7 @@ def test_create_rmap_to_match_tag_permit_inbound_prefixes_p0(): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -3658,84 +3678,82 @@ def test_create_rmap_to_match_tag_permit_inbound_prefixes_p0(): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } - } + }, + }, } } } result = create_router_bgp(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map input_dict_3 = { - "r1": { - "route_maps": { - "rmap_match_tag_1_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type: { - "tag": "4001" - } - } - }] + "r1": { + "route_maps": { + "rmap_match_tag_1_{}".format(addr_type): [ + {"action": "permit", "match": {addr_type: {"tag": "4001"}}} + ] + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r1": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r3": { - "dest_link": { - "r1": { - "route_maps": [{ - "name": - "rmap_match_tag_1_ipv4", - "direction": "out" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r3": { - "dest_link": { - "r1": { - "route_maps": [{ - "name": - "rmap_match_tag_1_ipv6", - "direction": "out" - }] - } - } - } - } - } - } - } - } - } - } + "r1": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r1": { + "route_maps": [ + { + "name": "rmap_match_tag_1_ipv4", + "direction": "out", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r1": { + "route_maps": [ + { + "name": "rmap_match_tag_1_ipv6", + "direction": "out", + } + ] + } + } + } + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -3745,18 +3763,14 @@ def test_create_rmap_to_match_tag_permit_inbound_prefixes_p0(): input_dict = { "r1": { "static_routes": [ - { - "network": NETWORK[addr_type], - "next_hop": "Null0", - "tag": 4001 - } + {"network": NETWORK[addr_type], "next_hop": "Null0", "tag": 4001} ] } } - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -3786,18 +3800,15 @@ def test_create_rmap_to_match_tag_deny_outbound_prefixes_p0(): input_dict = { "r1": { "static_routes": [ - { - "network": NETWORK[addr_type], - "next_hop": "Null0", - "tag": 4001 - } + {"network": NETWORK[addr_type], "next_hop": "Null0", "tag": 4001} ] } } result = create_static_routes(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Api call to redistribute static routes input_dict_1 = { @@ -3809,7 +3820,7 @@ def test_create_rmap_to_match_tag_deny_outbound_prefixes_p0(): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -3817,84 +3828,82 @@ def test_create_rmap_to_match_tag_deny_outbound_prefixes_p0(): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } - } + }, + }, } } } result = create_router_bgp(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map input_dict_3 = { - "r1": { - "route_maps": { - "rmap_match_tag_1_{}".format(addr_type): [{ - "action": "deny", - "match": { - addr_type: { - "tag": "4001" - } - } - }] + "r1": { + "route_maps": { + "rmap_match_tag_1_{}".format(addr_type): [ + {"action": "deny", "match": {addr_type: {"tag": "4001"}}} + ] + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r1": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r3": { - "dest_link": { - "r1": { - "route_maps": [{ - "name": - "rmap_match_tag_1_ipv4", - "direction": "out" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r3": { - "dest_link": { - "r1": { - "route_maps": [{ - "name": - "rmap_match_tag_1_ipv6", - "direction": "out" - }] - } - } - } - } - } - } - } - } - } + "r1": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r1": { + "route_maps": [ + { + "name": "rmap_match_tag_1_ipv4", + "direction": "out", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r1": { + "route_maps": [ + { + "name": "rmap_match_tag_1_ipv6", + "direction": "out", + } + ] + } + } + } + } + } + }, + } + } + } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -3904,19 +3913,15 @@ def test_create_rmap_to_match_tag_deny_outbound_prefixes_p0(): input_dict = { "r1": { "static_routes": [ - { - "network": NETWORK[addr_type], - "next_hop": "Null0", - "tag": 4001 - } + {"network": NETWORK[addr_type], "next_hop": "Null0", "tag": 4001} ] } } - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" - "routes are denied \n Error: {}".format( - tc_name, result) + "routes are denied \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) write_test_footer(tc_name) @@ -3924,6 +3929,7 @@ def test_create_rmap_to_match_tag_deny_outbound_prefixes_p0(): # Uncomment next line for debugging # tgen.mininet_cli() + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp-vrf-route-leak-basic/r1/bgpd.conf b/tests/topotests/bgp-vrf-route-leak-basic/r1/bgpd.conf index 626c268392..03dfbf9322 100644 --- a/tests/topotests/bgp-vrf-route-leak-basic/r1/bgpd.conf +++ b/tests/topotests/bgp-vrf-route-leak-basic/r1/bgpd.conf @@ -1,14 +1,16 @@ hostname r1 router bgp 99 vrf DONNA + no bgp ebgp-requires-policy address-family ipv4 unicast redistribute connected import vrf EVA ! ! router bgp 99 vrf EVA + no bgp ebgp-requires-policy address-family ipv4 unicast redistribute connected import vrf DONNA ! -!
\ No newline at end of file +! diff --git a/tests/topotests/bgp-vrf-route-leak-basic/test_bgp.py b/tests/topotests/bgp-vrf-route-leak-basic/test_bgp.py index b0d60403db..5aba89e3ca 100755 --- a/tests/topotests/bgp-vrf-route-leak-basic/test_bgp.py +++ b/tests/topotests/bgp-vrf-route-leak-basic/test_bgp.py @@ -31,7 +31,7 @@ import sys import pytest CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -47,7 +47,8 @@ class BGPVRFTopo(Topo): tgen = get_topogen(self) for routern in range(1, 2): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + def setup_module(mod): "Sets up the pytest environment" @@ -58,17 +59,16 @@ def setup_module(mod): for rname, router in tgen.routers().iteritems(): router.run("/bin/bash {}/setup_vrfs".format(CWD)) router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) # After loading the configurations, this function loads configured daemons. tgen.start_router() - #tgen.mininet_cli() + # tgen.mininet_cli() + def teardown_module(mod): "Teardown the pytest environment" @@ -77,6 +77,7 @@ def teardown_module(mod): # This function tears down the whole topology. tgen.stop_topology() + def test_vrf_route_leak(): logger.info("Ensure that routes are leaked back and forth") tgen = get_topogen() @@ -84,49 +85,50 @@ def test_vrf_route_leak(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - r1 = tgen.gears['r1'] + r1 = tgen.gears["r1"] donna = r1.vtysh_cmd("show ip route vrf DONNA json", isjson=True) route0 = donna["10.0.0.0/24"][0] - assert route0['protocol'] == "connected" + assert route0["protocol"] == "connected" route1 = donna["10.0.1.0/24"][0] - assert route1['protocol'] == "bgp" - assert route1['selected'] == True - nhop = route1['nexthops'][0] - assert nhop['fib'] == True + assert route1["protocol"] == "bgp" + assert route1["selected"] == True + nhop = route1["nexthops"][0] + assert nhop["fib"] == True route2 = donna["10.0.2.0/24"][0] - assert route2['protocol'] == "connected" + assert route2["protocol"] == "connected" route3 = donna["10.0.3.0/24"][0] - assert route3['protocol'] == "bgp" - assert route3['selected'] == True - nhop = route3['nexthops'][0] - assert nhop['fib'] == True + assert route3["protocol"] == "bgp" + assert route3["selected"] == True + nhop = route3["nexthops"][0] + assert nhop["fib"] == True eva = r1.vtysh_cmd("show ip route vrf EVA json", isjson=True) route0 = eva["10.0.0.0/24"][0] - assert route0['protocol'] == "bgp" - assert route0['selected'] == True - nhop = route0['nexthops'][0] - assert nhop['fib'] == True + assert route0["protocol"] == "bgp" + assert route0["selected"] == True + nhop = route0["nexthops"][0] + assert nhop["fib"] == True route1 = eva["10.0.1.0/24"][0] - assert route1['protocol'] == "connected" + assert route1["protocol"] == "connected" route2 = eva["10.0.2.0/24"][0] - assert route2['protocol'] == "bgp" - assert route2['selected'] == True - nhop = route2['nexthops'][0] - assert nhop['fib'] == True + assert route2["protocol"] == "bgp" + assert route2["selected"] == True + nhop = route2["nexthops"][0] + assert nhop["fib"] == True route3 = eva["10.0.3.0/24"][0] - assert route3['protocol'] == "connected" - #tgen.mininet_cli() + assert route3["protocol"] == "connected" + # tgen.mininet_cli() + def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_aggregate-address_origin/r1/bgpd.conf b/tests/topotests/bgp_aggregate-address_origin/r1/bgpd.conf index 528d02af36..9d519fae88 100644 --- a/tests/topotests/bgp_aggregate-address_origin/r1/bgpd.conf +++ b/tests/topotests/bgp_aggregate-address_origin/r1/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65000 + no bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 65001 address-family ipv4 unicast redistribute connected diff --git a/tests/topotests/bgp_aggregate-address_origin/r2/bgpd.conf b/tests/topotests/bgp_aggregate-address_origin/r2/bgpd.conf index 73d4d0aeea..38cf5aaca7 100644 --- a/tests/topotests/bgp_aggregate-address_origin/r2/bgpd.conf +++ b/tests/topotests/bgp_aggregate-address_origin/r2/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65001 + no bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 65000 exit-address-family ! diff --git a/tests/topotests/bgp_aggregate-address_origin/test_bgp_aggregate-address_origin.py b/tests/topotests/bgp_aggregate-address_origin/test_bgp_aggregate-address_origin.py index be29d143dd..fa799f8256 100644 --- a/tests/topotests/bgp_aggregate-address_origin/test_bgp_aggregate-address_origin.py +++ b/tests/topotests/bgp_aggregate-address_origin/test_bgp_aggregate-address_origin.py @@ -39,7 +39,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -47,16 +47,18 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -66,51 +68,41 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_aggregate_address_origin(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r2'] + router = tgen.gears["r2"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) expected = { - '192.168.255.1': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 3 - } - } + "192.168.255.1": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 3}}, } } return topotest.json_cmp(output, expected) def _bgp_aggregate_address_has_metric(router): output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.0/24 json")) - expected = { - 'paths': [ - { - 'origin': 'IGP' - } - ] - } + expected = {"paths": [{"origin": "IGP"}]} return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_converge, router) @@ -121,8 +113,11 @@ def test_bgp_aggregate_address_origin(): test_func = functools.partial(_bgp_aggregate_address_has_metric, router) success, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) - assert result is None, 'Failed to see applied ORIGIN (igp) for aggregated prefix in "{}"'.format(router) + assert ( + result is None + ), 'Failed to see applied ORIGIN (igp) for aggregated prefix in "{}"'.format(router) + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_aggregate-address_route-map/r1/bgpd.conf b/tests/topotests/bgp_aggregate-address_route-map/r1/bgpd.conf index ef34817bb1..292f0e967f 100644 --- a/tests/topotests/bgp_aggregate-address_route-map/r1/bgpd.conf +++ b/tests/topotests/bgp_aggregate-address_route-map/r1/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65000 + no bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 65001 address-family ipv4 unicast redistribute connected diff --git a/tests/topotests/bgp_aggregate-address_route-map/r2/bgpd.conf b/tests/topotests/bgp_aggregate-address_route-map/r2/bgpd.conf index 73d4d0aeea..38cf5aaca7 100644 --- a/tests/topotests/bgp_aggregate-address_route-map/r2/bgpd.conf +++ b/tests/topotests/bgp_aggregate-address_route-map/r2/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65001 + no bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 65000 exit-address-family ! diff --git a/tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py b/tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py index d6753e9b23..9c06c9d382 100644 --- a/tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py +++ b/tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py @@ -42,7 +42,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -50,16 +50,18 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -69,51 +71,41 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_maximum_prefix_invalid(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r2'] + router = tgen.gears["r2"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) expected = { - '192.168.255.1': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 3 - } - } + "192.168.255.1": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 3}}, } } return topotest.json_cmp(output, expected) def _bgp_aggregate_address_has_metric(router): output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.0/24 json")) - expected = { - 'paths': [ - { - 'med': 123 - } - ] - } + expected = {"paths": [{"metric": 123}]} return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_converge, router) @@ -124,8 +116,11 @@ def test_bgp_maximum_prefix_invalid(): test_func = functools.partial(_bgp_aggregate_address_has_metric, router) success, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) - assert result is None, 'Failed to see applied metric for aggregated prefix in "{}"'.format(router) + assert ( + result is None + ), 'Failed to see applied metric for aggregated prefix in "{}"'.format(router) + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_as_allow_in/bgp_as_allow_in.json b/tests/topotests/bgp_as_allow_in/bgp_as_allow_in.json new file mode 100644 index 0000000000..943876cdac --- /dev/null +++ b/tests/topotests/bgp_as_allow_in/bgp_as_allow_in.json @@ -0,0 +1,266 @@ +{ + "address_types": [ + "ipv4", + "ipv6" + ], + "ipv4base": "10.0.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 30, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:db8:f::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + } + } + } + } + } + } + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + } + } + } + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r5": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r5": { + "dest_link": { + "r3": {} + } + }, + "r2": { + "dest_link": { + "r3": {} + } + }, + "r4": { + "dest_link": { + "r3": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r5": { + "dest_link": { + "r3": {} + } + }, + "r2": { + "dest_link": { + "r3": {} + } + }, + "r4": { + "dest_link": { + "r3": {} + } + } + } + } + } + } + } + }, + "r4": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r4": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r4": {} + } + } + } + } + } + } + } + }, + "r5": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "500", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r5": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r5": {} + } + } + } + } + } + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py b/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py new file mode 100755 index 0000000000..89b15c46d3 --- /dev/null +++ b/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py @@ -0,0 +1,975 @@ +#!/usr/bin/python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Following tests are covered to test bgp allowas-in functionality: + +- Verify that routes coming from same AS are accepted only when + '"allowas-in" is configuerd. +- Verify that "allowas-in" feature works per address-family/VRF + 'basis and doesn't impact the other AFIs. +- Verify that the if number of occurrences of AS number in path is + 'more than the configured allowas-in value then we do not accept + 'the route. +- Verify that when we advertise a network, learned from the same AS + 'via allowas-in command, to an iBGP neighbor we see multiple + 'occurrences. +- Verify that when we advertise a network, learned from the same AS + 'via allowas-in command, to an eBGP neighbor we see multiple + 'occurrences of our own AS based on configured value+1. +""" + +import os +import sys +import time +import json +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + create_route_maps, + check_address_types, + step, +) +from lib.topolog import logger +from lib.bgp import ( + verify_bgp_convergence, + create_router_bgp, + clear_bgp_and_verify, + verify_bgp_rib, +) +from lib.topojson import build_topo_from_json, build_config_from_json + +# Reading the data from JSON File for topology creation +jsonFile = "{}/bgp_as_allow_in.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +# Global variables +BGP_CONVERGENCE = False +ADDR_TYPES = check_address_types() +NETWORK = {"ipv4": "2.2.2.2/32", "ipv6": "22:22::2/128"} +NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"} + + +class BGPALLOWASIN(Topo): + """ + Test BGPALLOWASIN - topology 1 + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(BGPALLOWASIN, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Checking BGP convergence + global BGP_CONVERGENCE + global ADDR_TYPES + + # Api call verify whether BGP is converged + BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format( + BGP_CONVERGENCE + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# +# Tests starting +# +##################################################### + + +def test_bgp_allowas_in_p0(request): + """ + Verify that routes coming from same AS are accepted only when + "allowas-in" is configuerd. + + """ + + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + reset_config_on_routers(tgen) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Advertise prefix 2.2.2.2/32 from Router-1(AS-200).") + step("Advertise an ipv6 prefix 22:22::2/128 from Router-1(AS-200).") + # configure static routes + dut = "r3" + protocol = "bgp" + + for addr_type in ADDR_TYPES: + # Enable static routes + input_dict_4 = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure redistribute static in Router BGP in R1") + + input_dict_2 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + step( + 'Check BGP table of router R3 using "sh bgp ipv4" and "sh bgp ' + 'ipv6" command.' + ) + step( + "We should not see prefix advertised from R1 in R3's BGP " + "table without allowas-in." + ) + logger.info("Verifying %s routes on r3, route should not be present", addr_type) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=NEXT_HOP_IP[addr_type], + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n" + "Expected behavior: routes should not present in rib \n" + "Error: {}".format(tc_name, result) + + step("Configure allowas-in on R3 for R2.") + step("We should see the prefix advertised from R1 in R3's BGP table.") + # Api call to enable allowas-in in bgp process. + input_dict_1 = { + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": { + "allowas-in": {"number_occurences": 1} + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +def test_bgp_allowas_in_per_addr_family_p0(request): + """ + Verify that "allowas-in" feature works per address-family/VRF + basis and doesn't impact the other AFIs. + + """ + + # This test is applicable only for dual stack. + if "ipv4" not in ADDR_TYPES or "ipv6" not in ADDR_TYPES: + pytest.skip("NOT APPLICABLE") + + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + reset_config_on_routers(tgen) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Advertise prefix 2.2.2.2/32 from Router-1(AS-200).") + step("Advertise an ipv6 prefix 22:22::2/128 from Router-1(AS-200).") + # configure static routes routes + dut = "r3" + protocol = "bgp" + + for addr_type in ADDR_TYPES: + # Enable static routes + input_dict_4 = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure redistribute static in Router BGP in R1") + + input_dict_2 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure allowas-in on R3 for R2 under IPv4 addr-family only") + # Api call to enable allowas-in in bgp process. + input_dict_1 = { + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": {"allowas-in": {"number_occurences": 1}} + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + static_route_ipv4 = { + "r1": { + "static_routes": [ + {"network": NETWORK["ipv4"], "next_hop": NEXT_HOP_IP["ipv4"]} + ] + } + } + + static_route_ipv6 = { + "r1": { + "static_routes": [ + {"network": NETWORK["ipv6"], "next_hop": NEXT_HOP_IP["ipv6"]} + ] + } + } + step("We should see R1 advertised prefix only in IPv4 AFI " "not in IPv6 AFI.") + result = verify_rib(tgen, "ipv4", dut, static_route_ipv4, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + result = verify_rib( + tgen, "ipv6", dut, static_route_ipv6, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \n" + "Expected behavior: routes are should not be present in ipv6 rib\n" + " Error: {}".format(tc_name, result) + + step("Repeat the same test for IPv6 AFI.") + step("Configure allowas-in on R3 for R2 under IPv6 addr-family only") + # Api call to enable allowas-in in bgp process. + input_dict_1 = { + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": { + "allowas-in": { + "number_occurences": 2, + "delete": True, + } + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": {"allowas-in": {"number_occurences": 2}} + } + } + } + } + }, + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + step("We should see R1 advertised prefix only in IPv6 AFI " "not in IPv4 AFI.") + result = verify_rib( + tgen, "ipv4", dut, static_route_ipv4, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \n" + "Expected behavior: routes should not be present in ipv4 rib\n" + " Error: {}".format(tc_name, result) + result = verify_rib(tgen, "ipv6", dut, static_route_ipv6, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_bgp_allowas_in_no_of_occurrences_p0(request): + """ + Verify that the if number of occurrences of AS number in path is + more than the configured allowas-in value then we do not accept + the route. + + """ + + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + reset_config_on_routers(tgen) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + dut = "r3" + protocol = "bgp" + + for addr_type in ADDR_TYPES: + # Enable static routes + static_routes = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, static_routes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure redistribute static in Router BGP in R1") + + input_dict_2 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure a route-map on R1 to prepend AS 4 times.") + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r1": { + "route_maps": { + "ASP_{}".format(addr_type): [ + { + "action": "permit", + "set": { + "path": { + "as_num": "200 200 200 200", + "as_action": "prepend", + } + }, + } + ] + } + } + } + result = create_route_maps(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure route map in out direction on R1") + # Configure neighbor for route map + input_dict_7 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": { + "route_maps": [ + { + "name": "ASP_{}".format( + addr_type + ), + "direction": "out", + } + ] + } + } + } + } + } + } + } + } + } + } + + result = create_router_bgp(tgen, topo, input_dict_7) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + for addr_type in ADDR_TYPES: + step('Configure "allowas-in 4" on R3 for R2.') + # Api call to enable allowas-in in bgp process. + input_dict_1 = { + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": { + "allowas-in": {"number_occurences": 4} + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + result = verify_rib( + tgen, addr_type, dut, static_routes, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \n " + "Expected behavior: routes are should not be present in rib\n" + "Error: {}".format(tc_name, result) + + for addr_type in ADDR_TYPES: + step('Configure "allowas-in 5" on R3 for R2.') + input_dict_1 = { + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": { + "allowas-in": {"number_occurences": 5} + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + static_routes = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + result = verify_rib(tgen, addr_type, dut, static_routes, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +def test_bgp_allowas_in_sameastoibgp_p1(request): + """ + Verify that when we advertise a network, learned from the same AS + via allowas-in command, to an iBGP neighbor we see multiple + occurrences. + + """ + + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + reset_config_on_routers(tgen) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + dut = "r3" + protocol = "bgp" + + for addr_type in ADDR_TYPES: + # Enable static routes + static_routes = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, static_routes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure redistribute static in Router BGP in R1") + + input_dict_2 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure a route-map on R2 to prepend AS 2 times.") + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "route_maps": { + "ASP_{}".format(addr_type): [ + { + "action": "permit", + "set": { + "path": {"as_num": "200 200", "as_action": "prepend"} + }, + } + ] + } + } + } + result = create_route_maps(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure route map in out direction on R2") + # Configure neighbor for route map + input_dict_7 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2": { + "route_maps": [ + { + "name": "ASP_{}".format( + addr_type + ), + "direction": "out", + } + ] + } + } + } + } + } + } + } + } + } + } + + result = create_router_bgp(tgen, topo, input_dict_7) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step('Configure "allowas-in 3" on R3 for R1.') + input_dict_1 = { + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": { + "allowas-in": {"number_occurences": 3} + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + input_dict_1 = { + "r4": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r4": { + "allowas-in": {"number_occurences": 3} + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + static_routes = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + dut = "r4" + path = "100 200 200 200" + result = verify_bgp_rib(tgen, addr_type, dut, static_routes, aspath=path) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +def test_bgp_allowas_in_sameastoebgp_p1(request): + """ + Verify that when we advertise a network, learned from the same AS + via allowas-in command, to an eBGP neighbor we see multiple + occurrences of our own AS based on configured value+1. + + """ + + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + reset_config_on_routers(tgen) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + dut = "r3" + protocol = "bgp" + + for addr_type in ADDR_TYPES: + # Enable static routes + static_routes = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, static_routes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure redistribute static in Router BGP in R1") + + input_dict_2 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure a route-map on R2 to prepend AS 2 times.") + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "route_maps": { + "ASP_{}".format(addr_type): [ + { + "action": "permit", + "set": { + "path": {"as_num": "200 200", "as_action": "prepend"} + }, + } + ] + } + } + } + result = create_route_maps(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure route map in out direction on R2") + # Configure neighbor for route map + input_dict_7 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2": { + "route_maps": [ + { + "name": "ASP_{}".format( + addr_type + ), + "direction": "out", + } + ] + } + } + } + } + } + } + } + } + } + } + + result = create_router_bgp(tgen, topo, input_dict_7) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + for addr_type in ADDR_TYPES: + step('Configure "allowas-in 3" on R3 for R1.') + input_dict_1 = { + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": { + "allowas-in": {"number_occurences": 3} + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + static_routes = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + dut = "r5" + path = "200 100 200 200 200" + result = verify_bgp_rib(tgen, addr_type, dut, static_routes, aspath=path) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/__init__.py b/tests/topotests/bgp_as_wide_bgp_identifier/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_as_wide_bgp_identifier/__init__.py diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/r1/bgpd.conf b/tests/topotests/bgp_as_wide_bgp_identifier/r1/bgpd.conf new file mode 100644 index 0000000000..75741a3c3e --- /dev/null +++ b/tests/topotests/bgp_as_wide_bgp_identifier/r1/bgpd.conf @@ -0,0 +1,6 @@ +! exit1 +router bgp 65001 + bgp router-id 10.10.10.10 + no bgp ebgp-requires-policy + neighbor 192.168.255.1 remote-as 65002 +! diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/r1/zebra.conf b/tests/topotests/bgp_as_wide_bgp_identifier/r1/zebra.conf new file mode 100644 index 0000000000..c060e1402e --- /dev/null +++ b/tests/topotests/bgp_as_wide_bgp_identifier/r1/zebra.conf @@ -0,0 +1,6 @@ +! exit1 +interface r1-eth0 + ip address 192.168.255.2/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/r2/bgpd.conf b/tests/topotests/bgp_as_wide_bgp_identifier/r2/bgpd.conf new file mode 100644 index 0000000000..18a6c66f69 --- /dev/null +++ b/tests/topotests/bgp_as_wide_bgp_identifier/r2/bgpd.conf @@ -0,0 +1,7 @@ +! spine +router bgp 65002 + bgp router-id 10.10.10.10 + no bgp ebgp-requires-policy + neighbor 192.168.255.2 remote-as 65001 + neighbor 192.168.255.3 remote-as 65002 +! diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/r2/zebra.conf b/tests/topotests/bgp_as_wide_bgp_identifier/r2/zebra.conf new file mode 100644 index 0000000000..a45520f97f --- /dev/null +++ b/tests/topotests/bgp_as_wide_bgp_identifier/r2/zebra.conf @@ -0,0 +1,6 @@ +! spine +interface r2-eth0 + ip address 192.168.255.1/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/r3/bgpd.conf b/tests/topotests/bgp_as_wide_bgp_identifier/r3/bgpd.conf new file mode 100644 index 0000000000..27bf126000 --- /dev/null +++ b/tests/topotests/bgp_as_wide_bgp_identifier/r3/bgpd.conf @@ -0,0 +1,6 @@ +! exit2 +router bgp 65002 + bgp router-id 10.10.10.10 + no bgp ebgp-requires-policy + neighbor 192.168.255.1 remote-as 65002 +! diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/r3/zebra.conf b/tests/topotests/bgp_as_wide_bgp_identifier/r3/zebra.conf new file mode 100644 index 0000000000..2f4dbc5efd --- /dev/null +++ b/tests/topotests/bgp_as_wide_bgp_identifier/r3/zebra.conf @@ -0,0 +1,6 @@ +! exit2 +interface r3-eth0 + ip address 192.168.255.3/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/test_bgp_as_wide_bgp_identifier.py b/tests/topotests/bgp_as_wide_bgp_identifier/test_bgp_as_wide_bgp_identifier.py new file mode 100644 index 0000000000..ebd6075b52 --- /dev/null +++ b/tests/topotests/bgp_as_wide_bgp_identifier/test_bgp_as_wide_bgp_identifier.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python + +# +# test_bgp_as_wide_bgp_identifier.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2020 by +# Donatas Abraitis <donatas.abraitis@gmail.com> +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +rfc6286: Autonomous-System-Wide Unique BGP Identifier for BGP-4 +Test if 'Bad BGP Identifier' notification is sent only to +internal peers (autonomous-system-wide). eBGP peers are not +affected and should work. +""" + +import os +import sys +import json +import time +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from mininet.topo import Topo + + +class TemplateTopo(Topo): + def build(self, *_args, **_opts): + tgen = get_topogen(self) + + for routern in range(1, 4): + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) + + +def setup_module(mod): + tgen = Topogen(TemplateTopo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for i, (rname, router) in enumerate(router_list.iteritems(), 1): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_as_wide_bgp_identifier(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + def _bgp_converge(router): + output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) + expected = {"192.168.255.1": {"bgpState": "Established"}} + return topotest.json_cmp(output, expected) + + def _bgp_failed(router): + output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) + expected = { + "192.168.255.1": { + "lastNotificationReason": "OPEN Message Error/Bad BGP Identifier" + } + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_converge, tgen.gears["r1"]) + success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + + assert result is None, 'Failed to converge: "{}"'.format(tgen.gears["r1"]) + + test_func = functools.partial(_bgp_failed, tgen.gears["r3"]) + success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + + assert result is None, 'Bad BGP Identifier notification not sent: "{}"'.format( + tgen.gears["r3"] + ) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_comm-list_delete/r1/bgpd.conf b/tests/topotests/bgp_comm-list_delete/r1/bgpd.conf index 6e1273f464..9518894351 100644 --- a/tests/topotests/bgp_comm-list_delete/r1/bgpd.conf +++ b/tests/topotests/bgp_comm-list_delete/r1/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65000 + no bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 65001 address-family ipv4 unicast redistribute connected route-map r2-out diff --git a/tests/topotests/bgp_comm-list_delete/r2/bgpd.conf b/tests/topotests/bgp_comm-list_delete/r2/bgpd.conf index 3d354d56b1..e4c1167745 100644 --- a/tests/topotests/bgp_comm-list_delete/r2/bgpd.conf +++ b/tests/topotests/bgp_comm-list_delete/r2/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65001 + no bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 65000 address-family ipv4 neighbor 192.168.255.1 route-map r1-in in diff --git a/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py b/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py index ed350ebfeb..314ad12a6d 100644 --- a/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py +++ b/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py @@ -37,7 +37,7 @@ import time import pytest CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -45,16 +45,18 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -64,20 +66,20 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_maximum_prefix_invalid(): tgen = get_topogen() @@ -86,20 +88,30 @@ def test_bgp_maximum_prefix_invalid(): def _bgp_converge(router): while True: - output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) - if output['192.168.255.1']['bgpState'] == 'Established': - if output['192.168.255.1']['addressFamilyInfo']['ipv4Unicast']['acceptedPrefixCounter'] == 2: + output = json.loads( + tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json") + ) + if output["192.168.255.1"]["bgpState"] == "Established": + if ( + output["192.168.255.1"]["addressFamilyInfo"]["ipv4Unicast"][ + "acceptedPrefixCounter" + ] + == 2 + ): return True def _bgp_comm_list_delete(router): - output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp 172.16.255.254/32 json")) - if '333:333' in output['paths'][0]['community']['list']: + output = json.loads( + tgen.gears[router].vtysh_cmd("show ip bgp 172.16.255.254/32 json") + ) + if "333:333" in output["paths"][0]["community"]["list"]: return False return True - if _bgp_converge('r2'): - assert _bgp_comm_list_delete('r2') == True + if _bgp_converge("r2"): + assert _bgp_comm_list_delete("r2") == True + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_communities_topo1/bgp_communities.json b/tests/topotests/bgp_communities_topo1/bgp_communities.json new file mode 100644 index 0000000000..da6aec239f --- /dev/null +++ b/tests/topotests/bgp_communities_topo1/bgp_communities.json @@ -0,0 +1,175 @@ +{ + "address_types": [ + "ipv4", + "ipv6" + ], + "ipv4base": "10.0.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 30, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:db8:f::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r0": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + } + } + } + } + } + } + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + } + } + } + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "300", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": {} + } + } + } + } + } + } + } + }, + "r0": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto" + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/bgp_communities_topo1/test_bgp_communities.py b/tests/topotests/bgp_communities_topo1/test_bgp_communities.py new file mode 100644 index 0000000000..7d960d6916 --- /dev/null +++ b/tests/topotests/bgp_communities_topo1/test_bgp_communities.py @@ -0,0 +1,635 @@ +#!/usr/bin/python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Following tests are covered to test bgp community functionality: +- Verify routes are not advertised when NO-ADVERTISE Community is applied + +""" + +import os +import sys +import time +import json +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + check_address_types, + step, + create_route_maps, + create_prefix_lists, + create_route_maps, +) +from lib.topolog import logger +from lib.bgp import ( + verify_bgp_convergence, + create_router_bgp, + clear_bgp_and_verify, + verify_bgp_rib, +) +from lib.topojson import build_topo_from_json, build_config_from_json +from copy import deepcopy + +# Reading the data from JSON File for topology creation +jsonFile = "{}/bgp_communities.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +# Global variables +BGP_CONVERGENCE = False +ADDR_TYPES = check_address_types() +NETWORK = {"ipv4": "2.2.2.2/32", "ipv6": "22:22::2/128"} +NEXT_HOP_IP = {} + + +class BGPCOMMUNITIES(Topo): + """ + Test BGPCOMMUNITIES - topology 1 + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(BGPCOMMUNITIES, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Checking BGP convergence + global BGP_CONVERGENCE + global ADDR_TYPES + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Api call verify whether BGP is converged + BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format( + BGP_CONVERGENCE + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# +# Tests starting +# +##################################################### + + +def test_bgp_no_advertise_community_p0(request): + """ + Verify routes are not advertised when NO-ADVERTISE Community is applied + + """ + + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + reset_config_on_routers(tgen) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + NEXT_HOP_IP = { + "ipv4": topo["routers"]["r0"]["links"]["r1"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0], + } + + # configure static routes + dut = "r3" + protocol = "bgp" + + for addr_type in ADDR_TYPES: + # Enable static routes + input_dict = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure redistribute static and connected in Router BGP " "in R1") + + input_dict_2 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "redistribute": [ + {"redist_type": "static"}, + {"redist_type": "connected"}, + ] + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "BGP neighbors are up, static and connected route advertised from" + " R1 are present on R2 BGP table and RIB using show ip bgp and " + " show ip route" + ) + step( + "Static and connected route advertised from R1 are present on R3" + " BGP table and RIB using show ip bgp and show ip route" + ) + + dut = "r3" + protocol = "bgp" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure prefix list P1 on R2 to permit route coming from R1") + # Create ip prefix list + input_dict_2 = { + "r2": { + "prefix_lists": { + addr_type: { + "pf_list_1_{}".format(addr_type): [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + } + } + } + } + result = create_prefix_lists(tgen, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + # Create route map + input_dict_3 = { + "r2": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: {"prefix_lists": "pf_list_1_" + addr_type} + }, + "set": {"community": {"num": "no-advertise"}}, + } + ] + } + } + } + result = create_route_maps(tgen, input_dict_3) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + step( + "Apply route-map RM1 on R2, R2 to R3 BGP neighbor with no" + " advertise community" + ) + # Configure neighbor for route map + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": { + "route_maps": [ + { + "name": "rmap_match_pf_1_" + + addr_type, + "direction": "in", + } + ] + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After advertising no advertise community to BGP neighbor " + "static and connected router got removed from R3 verify using " + "show ip bgp & show ip route" + ) + + result = verify_bgp_rib(tgen, addr_type, dut, input_dict, expected=False) + assert result is not True, "Testcase {} : Failed \n " + " Routes still present in R3 router. Error: {}".format(tc_name, result) + + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \n " + " Routes still present in R3 router. Error: {}".format(tc_name, result) + + step("Remove and Add no advertise community") + # Configure neighbor for route map + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": { + "route_maps": [ + { + "name": "rmap_match_pf_1_" + + addr_type, + "direction": "in", + "delete": True, + } + ] + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing no advertise community from BGP neighbor " + "static and connected router got advertised to R3 and " + "removing route-map, verify route using show ip bgp" + " and show ip route" + ) + + result = verify_bgp_rib(tgen, addr_type, dut, input_dict) + assert result is True, "Testcase {} : Failed \n " + " Routes still present in R3 router. Error: {}".format(tc_name, result) + + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n " + " Routes still present in R3 router. Error: {}".format(tc_name, result) + + step("Repeat above steps when IBGP nbr configured between R1, R2 & R2, R3") + topo1 = deepcopy(topo) + + topo1["routers"]["r1"]["bgp"]["local_as"] = "100" + topo1["routers"]["r2"]["bgp"]["local_as"] = "100" + topo1["routers"]["r3"]["bgp"]["local_as"] = "100" + + for rtr in ["r1", "r2", "r3"]: + if "bgp" in topo1["routers"][rtr].keys(): + delete_bgp = {rtr: {"bgp": {"delete": True}}} + result = create_router_bgp(tgen, topo1, delete_bgp) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + config_bgp = { + rtr: {"bgp": {"local_as": topo1["routers"][rtr]["bgp"]["local_as"]}} + } + result = create_router_bgp(tgen, topo1, config_bgp) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + build_config_from_json(tgen, topo1, save_bkup=False) + + step("verify bgp convergence before starting test case") + + bgp_convergence = verify_bgp_convergence(tgen, topo1) + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + # configure static routes + dut = "r3" + protocol = "bgp" + + for addr_type in ADDR_TYPES: + # Enable static routes + input_dict = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure redistribute static and connected in Router " "BGP in R1") + + input_dict_2 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "redistribute": [ + {"redist_type": "static"}, + {"redist_type": "connected"}, + ] + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "BGP neighbors are up, static and connected route advertised from" + " R1 are present on R2 BGP table and RIB using show ip bgp and " + " show ip route" + ) + step( + "Static and connected route advertised from R1 are present on R3" + " BGP table and RIB using show ip bgp and show ip route" + ) + + dut = "r2" + protocol = "bgp" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure prefix list P1 on R2 to permit route coming from R1") + # Create ip prefix list + input_dict_2 = { + "r2": { + "prefix_lists": { + addr_type: { + "pf_list_1_{}".format(addr_type): [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + } + } + } + } + result = create_prefix_lists(tgen, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + # Create route map + input_dict_3 = { + "r2": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: {"prefix_lists": "pf_list_1_" + addr_type} + }, + "set": {"community": {"num": "no-advertise"}}, + } + ] + } + } + } + result = create_route_maps(tgen, input_dict_3) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + step( + "Apply route-map RM1 on R2, R2 to R3 BGP neighbor with no" + " advertise community" + ) + + # Configure neighbor for route map + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": { + "route_maps": [ + { + "name": "rmap_match_pf_1_" + + addr_type, + "direction": "in", + } + ] + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After advertising no advertise community to BGP neighbor " + "static and connected router got removed from R3 verify using " + "show ip bgp & show ip route" + ) + + result = verify_bgp_rib(tgen, addr_type, dut, input_dict) + assert result is True, "Testcase {} : Failed \n " + " Routes still present in R3 router. Error: {}".format(tc_name, result) + + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n " + " Routes still present in R3 router. Error: {}".format(tc_name, result) + + step("Remove and Add no advertise community") + # Configure neighbor for route map + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": { + "route_maps": [ + { + "name": "rmap_match_pf_1_" + + addr_type, + "direction": "in", + "delete": True, + } + ] + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing no advertise community from BGP neighbor " + "static and connected router got advertised to R3 and " + "removing route verify using show ip bgp and " + " show ip route" + ) + + result = verify_bgp_rib(tgen, addr_type, dut, input_dict) + assert result is True, "Testcase {} : Failed \n " + " Routes still present in R3 router. Error: {}".format(tc_name, result) + + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n " + " Routes still present in R3 router. Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_default-route_route-map/r1/bgpd.conf b/tests/topotests/bgp_default-route_route-map/r1/bgpd.conf index a9925ab661..12e56e27c4 100644 --- a/tests/topotests/bgp_default-route_route-map/r1/bgpd.conf +++ b/tests/topotests/bgp_default-route_route-map/r1/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65000 + no bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 65001 address-family ipv4 unicast neighbor 192.168.255.2 default-originate route-map default diff --git a/tests/topotests/bgp_default-route_route-map/r2/bgpd.conf b/tests/topotests/bgp_default-route_route-map/r2/bgpd.conf index a8a6c49f4d..b6b560aa4d 100644 --- a/tests/topotests/bgp_default-route_route-map/r2/bgpd.conf +++ b/tests/topotests/bgp_default-route_route-map/r2/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65001 + no bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 65000 address-family ipv4 unicast redistribute connected diff --git a/tests/topotests/bgp_default-route_route-map/test_bgp_default-originate_route-map.py b/tests/topotests/bgp_default-route_route-map/test_bgp_default-originate_route-map.py index 992ee85ab1..ba9a6dffb5 100644 --- a/tests/topotests/bgp_default-route_route-map/test_bgp_default-originate_route-map.py +++ b/tests/topotests/bgp_default-route_route-map/test_bgp_default-originate_route-map.py @@ -42,7 +42,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -50,16 +50,18 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -69,51 +71,41 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_default_originate_route_map(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r2'] + router = tgen.gears["r2"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) expected = { - '192.168.255.1': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 1 - } - } + "192.168.255.1": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 1}}, } } return topotest.json_cmp(output, expected) def _bgp_default_route_has_metric(router): output = json.loads(router.vtysh_cmd("show ip bgp 0.0.0.0/0 json")) - expected = { - 'paths': [ - { - 'med': 123 - } - ] - } + expected = {"paths": [{"metric": 123}]} return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_converge, router) @@ -124,8 +116,11 @@ def test_bgp_default_originate_route_map(): test_func = functools.partial(_bgp_default_route_has_metric, router) success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert result is None, 'Failed to see applied metric for default route in "{}"'.format(router) + assert ( + result is None + ), 'Failed to see applied metric for default route in "{}"'.format(router) + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_distance_change/r1/bgpd.conf b/tests/topotests/bgp_distance_change/r1/bgpd.conf index 67994702bc..cd2ef675fc 100644 --- a/tests/topotests/bgp_distance_change/r1/bgpd.conf +++ b/tests/topotests/bgp_distance_change/r1/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65000 + no bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 65001 exit-address-family ! diff --git a/tests/topotests/bgp_distance_change/r2/bgpd.conf b/tests/topotests/bgp_distance_change/r2/bgpd.conf index 206f626da4..0faec85032 100644 --- a/tests/topotests/bgp_distance_change/r2/bgpd.conf +++ b/tests/topotests/bgp_distance_change/r2/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65001 + no bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 65000 address-family ipv4 redistribute connected diff --git a/tests/topotests/bgp_distance_change/test_bgp_distance_change.py b/tests/topotests/bgp_distance_change/test_bgp_distance_change.py index cf95aec098..6d09cd2e8c 100644 --- a/tests/topotests/bgp_distance_change/test_bgp_distance_change.py +++ b/tests/topotests/bgp_distance_change/test_bgp_distance_change.py @@ -41,7 +41,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -49,16 +49,18 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -68,60 +70,51 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_maximum_prefix_invalid(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r1'] + router = tgen.gears["r1"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.2 json")) expected = { - '192.168.255.2': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 2 - } - } + "192.168.255.2": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}}, } } return topotest.json_cmp(output, expected) def _bgp_distance_change(router): - router.vtysh_cmd(""" + router.vtysh_cmd( + """ configure terminal router bgp 65000 address-family ipv4 unicast distance bgp 123 123 123 - """) + """ + ) def _bgp_check_distance_change(router): output = json.loads(router.vtysh_cmd("show ip route 172.16.255.254/32 json")) - expected = { - '172.16.255.254/32': [ - { - 'protocol': 'bgp', - 'distance': 123 - } - ] - } + expected = {"172.16.255.254/32": [{"protocol": "bgp", "distance": 123}]} return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_converge, router) @@ -134,8 +127,11 @@ def test_bgp_maximum_prefix_invalid(): test_func = functools.partial(_bgp_check_distance_change, router) success, result = topotest.run_and_expect(test_func, None, count=15, wait=0.5) - assert result is None, 'Failed to see applied BGP distance in RIB "{}"'.format(router) + assert result is None, 'Failed to see applied BGP distance in RIB "{}"'.format( + router + ) + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_ebgp_requires_policy/r1/bgpd.conf b/tests/topotests/bgp_ebgp_requires_policy/r1/bgpd.conf index e06fa08b57..aaa01ebcf9 100644 --- a/tests/topotests/bgp_ebgp_requires_policy/r1/bgpd.conf +++ b/tests/topotests/bgp_ebgp_requires_policy/r1/bgpd.conf @@ -1,5 +1,4 @@ router bgp 65000 - bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 1000 neighbor 192.168.255.2 local-as 500 address-family ipv4 unicast diff --git a/tests/topotests/bgp_ebgp_requires_policy/r2/bgpd.conf b/tests/topotests/bgp_ebgp_requires_policy/r2/bgpd.conf index 0549697ff0..27427a9aaa 100644 --- a/tests/topotests/bgp_ebgp_requires_policy/r2/bgpd.conf +++ b/tests/topotests/bgp_ebgp_requires_policy/r2/bgpd.conf @@ -1,2 +1,3 @@ router bgp 1000 + no bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 500 diff --git a/tests/topotests/bgp_ebgp_requires_policy/r3/bgpd.conf b/tests/topotests/bgp_ebgp_requires_policy/r3/bgpd.conf index b4e304d82a..2deb4b663d 100644 --- a/tests/topotests/bgp_ebgp_requires_policy/r3/bgpd.conf +++ b/tests/topotests/bgp_ebgp_requires_policy/r3/bgpd.conf @@ -1,5 +1,4 @@ router bgp 65000 - bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 1000 neighbor 192.168.255.2 local-as 500 address-family ipv4 unicast diff --git a/tests/topotests/bgp_ebgp_requires_policy/r4/bgpd.conf b/tests/topotests/bgp_ebgp_requires_policy/r4/bgpd.conf index 0549697ff0..27427a9aaa 100644 --- a/tests/topotests/bgp_ebgp_requires_policy/r4/bgpd.conf +++ b/tests/topotests/bgp_ebgp_requires_policy/r4/bgpd.conf @@ -1,2 +1,3 @@ router bgp 1000 + no bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 500 diff --git a/tests/topotests/bgp_ebgp_requires_policy/r5/bgpd.conf b/tests/topotests/bgp_ebgp_requires_policy/r5/bgpd.conf index 99e6b6818d..92a2797921 100644 --- a/tests/topotests/bgp_ebgp_requires_policy/r5/bgpd.conf +++ b/tests/topotests/bgp_ebgp_requires_policy/r5/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65000 + no bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 65000 address-family ipv4 unicast redistribute connected diff --git a/tests/topotests/bgp_ebgp_requires_policy/r6/bgpd.conf b/tests/topotests/bgp_ebgp_requires_policy/r6/bgpd.conf index 164f975cb7..342f53d4c7 100644 --- a/tests/topotests/bgp_ebgp_requires_policy/r6/bgpd.conf +++ b/tests/topotests/bgp_ebgp_requires_policy/r6/bgpd.conf @@ -1,3 +1,2 @@ router bgp 65000 - bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 65000 diff --git a/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py b/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py index 6660b4e866..18e36311ad 100644 --- a/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py +++ b/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py @@ -27,6 +27,16 @@ bgp_ebgp_requires_policy.py: Test if eBGP sender without a filter applied to the peer is allowed to send advertisements. + +Scenario 1: + r1 has a filter applied for outgoing direction, + r2 receives 192.168.255.1/32. +Scenario 2: + r3 hasn't a filter appied for outgoing direction, + r4 does not receive 192.168.255.1/32. +Scenario 3: + r5 and r6 establish iBGP session which in turn should ignore + RFC8212. All routes for both directions MUST work. """ import os @@ -37,7 +47,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -45,24 +55,26 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 7): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r3']) - switch.add_link(tgen.gears['r4']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r4"]) + + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r5"]) + switch.add_link(tgen.gears["r6"]) - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r5']) - switch.add_link(tgen.gears['r6']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -72,20 +84,20 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_ebgp_requires_policy(): tgen = get_topogen() @@ -93,51 +105,46 @@ def test_ebgp_requires_policy(): pytest.skip(tgen.errors) def _bgp_converge(router): - output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) - expected = { - '192.168.255.1': { - 'bgpState': 'Established' - } - } + output = json.loads( + tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json") + ) + expected = {"192.168.255.1": {"bgpState": "Established"}} return topotest.json_cmp(output, expected) def _bgp_has_routes(router): - output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 routes json")) - expected = { - 'routes': { - '172.16.255.254/32': [ - { - 'valid': True - } - ] - } - } + output = json.loads( + tgen.gears[router].vtysh_cmd( + "show ip bgp neighbor 192.168.255.1 routes json" + ) + ) + expected = {"routes": {"172.16.255.254/32": [{"valid": True}]}} return topotest.json_cmp(output, expected) - test_func = functools.partial(_bgp_converge, 'r2') + test_func = functools.partial(_bgp_converge, "r2") success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert success is True, 'Failed bgp convergence (r2) in "{}"'.format(router) + assert success is True, 'Failed bgp convergence (r2) in "{}"'.format(tgen.gears["r2"]) - test_func = functools.partial(_bgp_has_routes, 'r2') + test_func = functools.partial(_bgp_has_routes, "r2") success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert success is True, 'eBGP policy is not working (r2) in "{}"'.format(router) + assert success is True, 'eBGP policy is not working (r2) in "{}"'.format(tgen.gears["r2"]) - test_func = functools.partial(_bgp_converge, 'r4') + test_func = functools.partial(_bgp_converge, "r4") success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert success is True, 'Failed bgp convergence (r4) in "{}"'.format(router) + assert success is True, 'Failed bgp convergence (r4) in "{}"'.format(tgen.gears["r4"]) - test_func = functools.partial(_bgp_has_routes, 'r4') + test_func = functools.partial(_bgp_has_routes, "r4") success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert success is False, 'eBGP policy is not working (r4) in "{}"'.format(router) + assert success is False, 'eBGP policy is not working (r4) in "{}"'.format(tgen.gears["r4"]) - test_func = functools.partial(_bgp_converge, 'r6') + test_func = functools.partial(_bgp_converge, "r6") success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert success is True, 'Failed bgp convergence (r6) in "{}"'.format(router) + assert success is True, 'Failed bgp convergence (r6) in "{}"'.format(tgen.gears["r6"]) - test_func = functools.partial(_bgp_has_routes, 'r6') + test_func = functools.partial(_bgp_has_routes, "r6") success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert success is True, 'eBGP policy is not working (r6) in "{}"'.format(router) + assert success is True, 'eBGP policy is not working (r6) in "{}"'.format(tgen.gears["r6"]) + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_instance_del_test/test_bgp_instance_del_test.py b/tests/topotests/bgp_instance_del_test/test_bgp_instance_del_test.py index 115c7793ad..47cc0eb39d 100755 --- a/tests/topotests/bgp_instance_del_test/test_bgp_instance_del_test.py +++ b/tests/topotests/bgp_instance_del_test/test_bgp_instance_del_test.py @@ -25,65 +25,72 @@ import os import sys import pytest -sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')) +sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")) from lib.ltemplate import * + def test_check_linux_vrf(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/check_linux_vrf.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/check_linux_vrf.py", False, CliOnFail, CheckFunc) + def test_adjacencies(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' - ltemplateTest('scripts/adjacencies.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' + ltemplateTest("scripts/adjacencies.py", False, CliOnFail, CheckFunc) + def SKIP_test_add_routes(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' - ltemplateTest('scripts/add_routes.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' + ltemplateTest("scripts/add_routes.py", False, CliOnFail, CheckFunc) + def test_check_routes(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' - ltemplateTest('scripts/check_routes.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' + ltemplateTest("scripts/check_routes.py", False, CliOnFail, CheckFunc) -#manual data path setup test - remove once have bgp/zebra vrf path working + +# manual data path setup test - remove once have bgp/zebra vrf path working def test_check_linux_mpls(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/check_linux_mpls.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/check_linux_mpls.py", False, CliOnFail, CheckFunc) + def test_del_bgp_instances(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' - ltemplateTest('scripts/del_bgp_instances.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' + ltemplateTest("scripts/del_bgp_instances.py", False, CliOnFail, CheckFunc) + -if __name__ == '__main__': +if __name__ == "__main__": retval = pytest.main(["-s"]) sys.exit(retval) diff --git a/tests/topotests/bgp_ipv6_rtadv/r1/bgpd.conf b/tests/topotests/bgp_ipv6_rtadv/r1/bgpd.conf index 1623b4578b..4d96bec2cb 100644 --- a/tests/topotests/bgp_ipv6_rtadv/r1/bgpd.conf +++ b/tests/topotests/bgp_ipv6_rtadv/r1/bgpd.conf @@ -1,5 +1,6 @@ router bgp 101 bgp router-id 10.254.254.1 + no bgp ebgp-requires-policy neighbor r2g peer-group neighbor r2g remote-as external neighbor r2g bfd diff --git a/tests/topotests/bgp_ipv6_rtadv/r2/bgpd.conf b/tests/topotests/bgp_ipv6_rtadv/r2/bgpd.conf index bf42d21812..4d02fc4f29 100644 --- a/tests/topotests/bgp_ipv6_rtadv/r2/bgpd.conf +++ b/tests/topotests/bgp_ipv6_rtadv/r2/bgpd.conf @@ -1,5 +1,6 @@ router bgp 102 bgp router-id 10.254.254.2 + no bgp ebgp-requires-policy neighbor r2g peer-group neighbor r2g remote-as external neighbor r2g bfd diff --git a/tests/topotests/bgp_ipv6_rtadv/test_bgp_ipv6_rtadv.py b/tests/topotests/bgp_ipv6_rtadv/test_bgp_ipv6_rtadv.py index 6cf223af42..10b2f3595f 100644 --- a/tests/topotests/bgp_ipv6_rtadv/test_bgp_ipv6_rtadv.py +++ b/tests/topotests/bgp_ipv6_rtadv/test_bgp_ipv6_rtadv.py @@ -34,7 +34,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -48,17 +48,19 @@ from mininet.topo import Topo class BGPIPV6RTADVTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 2 routers. - tgen.add_router('r1') - tgen.add_router('r2') + tgen.add_router("r1") + tgen.add_router("r2") + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): "Sets up the pytest environment" @@ -69,17 +71,16 @@ def setup_module(mod): for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) # Initialize all routers. tgen.start_router() + def teardown_module(_mod): "Teardown the pytest environment" tgen = get_topogen() @@ -99,44 +100,51 @@ def test_protocols_convergence(): # Check IPv4 routing tables. logger.info("Checking IPv4 routes for convergence") for router in tgen.routers().values(): - json_file = '{}/{}/ipv4_routes.json'.format(CWD, router.name) + json_file = "{}/{}/ipv4_routes.json".format(CWD, router.name) if not os.path.isfile(json_file): - logger.info('skipping file {}'.format(json_file)) + logger.info("skipping file {}".format(json_file)) continue expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ip route json'.format(router.name), expected) - _, result = topotest.run_and_expect(test_func, None, count=160, - wait=0.5) + test_func = partial( + topotest.router_json_cmp, + router, + "show ip route json".format(router.name), + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg # Check IPv6 routing tables. logger.info("Checking IPv6 routes for convergence") for router in tgen.routers().values(): - json_file = '{}/{}/ipv6_routes.json'.format(CWD, router.name) + json_file = "{}/{}/ipv6_routes.json".format(CWD, router.name) if not os.path.isfile(json_file): - logger.info('skipping file {}'.format(json_file)) + logger.info("skipping file {}".format(json_file)) continue expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ipv6 route json'.format(router.name), expected) - _, result = topotest.run_and_expect(test_func, None, count=160, - wait=0.5) + test_func = partial( + topotest.router_json_cmp, + router, + "show ipv6 route json".format(router.name), + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg + def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/ce1/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_direct/ce1/bgpd.conf index bd10248d7b..4ce87f773b 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/ce1/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/ce1/bgpd.conf @@ -7,6 +7,7 @@ log monitor notifications log commands router bgp 5226 bgp router-id 99.0.0.1 + no bgp ebgp-requires-policy neighbor 192.168.1.1 remote-as 5226 neighbor 192.168.1.1 update-source 192.168.1.2 address-family ipv4 unicast @@ -28,6 +29,3 @@ route-map rm-nh permit 10 ! end - - - diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/ce2/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_direct/ce2/bgpd.conf index ab86c5e1b8..0d5ec148b1 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/ce2/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/ce2/bgpd.conf @@ -7,6 +7,7 @@ log monitor notifications log commands router bgp 5226 bgp router-id 99.0.0.2 + no bgp ebgp-requires-policy neighbor 192.168.1.1 remote-as 5226 neighbor 192.168.1.1 update-source 192.168.1.2 address-family ipv4 unicast @@ -28,6 +29,3 @@ route-map rm-nh permit 10 ! end - - - diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/ce3/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_direct/ce3/bgpd.conf index 7d239b0bd5..d58b9b1c90 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/ce3/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/ce3/bgpd.conf @@ -7,6 +7,7 @@ log monitor notifications log commands router bgp 5226 bgp router-id 99.0.0.3 + no bgp ebgp-requires-policy neighbor 192.168.1.1 remote-as 5226 neighbor 192.168.1.1 update-source 192.168.1.2 address-family ipv4 unicast @@ -28,6 +29,3 @@ route-map rm-nh permit 10 ! end - - - diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/customize.py b/tests/topotests/bgp_l3vpn_to_bgp_direct/customize.py index b4649059bc..05db9ab14b 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/customize.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/customize.py @@ -88,12 +88,15 @@ from lib.ltemplate import ltemplateRtrCmd from mininet.topo import Topo import shutil + CWD = os.path.dirname(os.path.realpath(__file__)) # test name based on directory TEST = os.path.basename(CWD) + class ThisTestTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -102,68 +105,71 @@ class ThisTestTopo(Topo): # between routers, switches and hosts. # # Create P/PE routers - tgen.add_router('r1') - #check for mpls + tgen.add_router("r1") + # check for mpls if tgen.hasmpls != True: - logger.info('MPLS not available, tests will be skipped') + logger.info("MPLS not available, tests will be skipped") return for routern in range(2, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # Create CE routers for routern in range(1, 4): - tgen.add_router('ce{}'.format(routern)) + tgen.add_router("ce{}".format(routern)) - #CE/PE links - tgen.add_link(tgen.gears['ce1'], tgen.gears['r1'], 'ce1-eth0', 'r1-eth4') - tgen.add_link(tgen.gears['ce2'], tgen.gears['r3'], 'ce2-eth0', 'r3-eth4') - tgen.add_link(tgen.gears['ce3'], tgen.gears['r4'], 'ce3-eth0', 'r4-eth4') + # CE/PE links + tgen.add_link(tgen.gears["ce1"], tgen.gears["r1"], "ce1-eth0", "r1-eth4") + tgen.add_link(tgen.gears["ce2"], tgen.gears["r3"], "ce2-eth0", "r3-eth4") + tgen.add_link(tgen.gears["ce3"], tgen.gears["r4"], "ce3-eth0", "r4-eth4") # Create a switch with just one router connected to it to simulate a # empty network. switch = {} - switch[0] = tgen.add_switch('sw0') - switch[0].add_link(tgen.gears['r1'], nodeif='r1-eth0') - switch[0].add_link(tgen.gears['r2'], nodeif='r2-eth0') + switch[0] = tgen.add_switch("sw0") + switch[0].add_link(tgen.gears["r1"], nodeif="r1-eth0") + switch[0].add_link(tgen.gears["r2"], nodeif="r2-eth0") - switch[1] = tgen.add_switch('sw1') - switch[1].add_link(tgen.gears['r2'], nodeif='r2-eth1') - switch[1].add_link(tgen.gears['r3'], nodeif='r3-eth0') - switch[1].add_link(tgen.gears['r4'], nodeif='r4-eth0') + switch[1] = tgen.add_switch("sw1") + switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth1") + switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth0") + switch[1].add_link(tgen.gears["r4"], nodeif="r4-eth0") + + switch[1] = tgen.add_switch("sw2") + switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth2") + switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth1") - switch[1] = tgen.add_switch('sw2') - switch[1].add_link(tgen.gears['r2'], nodeif='r2-eth2') - switch[1].add_link(tgen.gears['r3'], nodeif='r3-eth1') def ltemplatePreRouterStartHook(): cc = ltemplateRtrCmd() tgen = get_topogen() - logger.info('pre router-start hook') - #check for mpls + logger.info("pre router-start hook") + # check for mpls if tgen.hasmpls != True: - logger.info('MPLS not available, skipping setup') + logger.info("MPLS not available, skipping setup") return False - #check for normal init + # check for normal init if len(tgen.net) == 1: - logger.info('Topology not configured, skipping setup') + logger.info("Topology not configured, skipping setup") return False - #configure r2 mpls interfaces - intfs = ['lo', 'r2-eth0', 'r2-eth1', 'r2-eth2'] + # configure r2 mpls interfaces + intfs = ["lo", "r2-eth0", "r2-eth1", "r2-eth2"] for intf in intfs: - cc.doCmd(tgen, 'r2', 'echo 1 > /proc/sys/net/mpls/conf/{}/input'.format(intf)) - #configure MPLS - rtrs = ['r1', 'r3', 'r4'] - cmds = ['echo 1 > /proc/sys/net/mpls/conf/lo/input'] + cc.doCmd(tgen, "r2", "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf)) + # configure MPLS + rtrs = ["r1", "r3", "r4"] + cmds = ["echo 1 > /proc/sys/net/mpls/conf/lo/input"] for rtr in rtrs: router = tgen.gears[rtr] for cmd in cmds: cc.doCmd(tgen, rtr, cmd) - intfs = ['lo', rtr+'-eth0', rtr+'-eth4'] + intfs = ["lo", rtr + "-eth0", rtr + "-eth4"] for intf in intfs: - cc.doCmd(tgen, rtr, 'echo 1 > /proc/sys/net/mpls/conf/{}/input'.format(intf)) - logger.info('setup mpls input') + cc.doCmd( + tgen, rtr, "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf) + ) + logger.info("setup mpls input") return True + def ltemplatePostRouterStartHook(): - logger.info('post router-start hook') + logger.info("post router-start hook") return True - diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/r1/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_direct/r1/bgpd.conf index 7ec941ee6b..33041262f6 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/r1/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/r1/bgpd.conf @@ -8,6 +8,7 @@ log commands router bgp 5226 bgp router-id 1.1.1.1 bgp cluster-id 1.1.1.1 + no bgp ebgp-requires-policy neighbor 192.168.1.2 remote-as 5226 neighbor 192.168.1.2 update-source 192.168.1.1 neighbor 192.168.1.2 route-reflector-client diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/r2/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_direct/r2/bgpd.conf index 241c2ac0ae..524051426b 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/r2/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/r2/bgpd.conf @@ -8,6 +8,7 @@ log commands router bgp 5226 bgp router-id 2.2.2.2 bgp cluster-id 2.2.2.2 + no bgp ebgp-requires-policy neighbor 1.1.1.1 remote-as 5226 neighbor 1.1.1.1 update-source 2.2.2.2 neighbor 3.3.3.3 remote-as 5226 @@ -28,6 +29,3 @@ router bgp 5226 neighbor 4.4.4.4 route-reflector-client exit-address-family end - - - diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/r3/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_direct/r3/bgpd.conf index 5591c633c6..29b9f0da6c 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/r3/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/r3/bgpd.conf @@ -8,8 +8,9 @@ log commands router bgp 5226 bgp router-id 3.3.3.3 bgp cluster-id 3.3.3.3 + no bgp ebgp-requires-policy neighbor 192.168.1.2 remote-as 5226 - neighbor 192.168.1.2 update-source 192.168.1.2 + neighbor 192.168.1.2 update-source 192.168.1.2 neighbor 192.168.1.2 route-reflector-client neighbor 2.2.2.2 remote-as 5226 neighbor 2.2.2.2 update-source 3.3.3.3 @@ -37,6 +38,3 @@ router bgp 5226 vnc redistribute ipv4 bgp-direct ! end - - - diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/r4/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_direct/r4/bgpd.conf index 145390d724..e09b505ee4 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/r4/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/r4/bgpd.conf @@ -8,6 +8,7 @@ log commands router bgp 5226 bgp router-id 4.4.4.4 bgp cluster-id 4.4.4.4 + no bgp ebgp-requires-policy neighbor 192.168.1.2 remote-as 5226 neighbor 192.168.1.2 update-source 192.168.1.1 neighbor 192.168.1.2 route-reflector-client @@ -37,6 +38,3 @@ router bgp 5226 vnc redistribute ipv4 bgp-direct ! end - - - diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/add_routes.py b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/add_routes.py index 3a24367a56..3f1157ad72 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/add_routes.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/add_routes.py @@ -1,51 +1,193 @@ from lutil import luCommand -luCommand('r1','vtysh -c "show bgp next"','99.0.0.. valid', 'wait', 'See CE static NH') -luCommand('r3','vtysh -c "show bgp next"','99.0.0.. valid', 'wait', 'See CE static NH') -luCommand('r4','vtysh -c "show bgp next"','99.0.0.. valid', 'wait', 'See CE static NH') -luCommand('r1','vtysh -c "show bgp ipv4 uni"','i5.*i5','wait','See CE routes') -luCommand('r3','vtysh -c "show bgp ipv4 uni"','i5.*i5','wait','See CE routes') -luCommand('r4','vtysh -c "show bgp ipv4 uni"','i5.*i5','wait','See CE routes') -luCommand('ce1','vtysh -c "show bgp ipv4 uni 5.1.0.0/24"','','none','See CE routes') -luCommand('r1','vtysh -c "show bgp ipv4 uni 5.1.0.0/24"','','none','See CE routes') -luCommand('ce2','vtysh -c "show bgp ipv4 uni 5.1.0.0/24"','','none','See CE routes') -luCommand('r3','vtysh -c "show bgp ipv4 uni 5.1.0.0/24"','','none','See CE routes') -luCommand('ce3','vtysh -c "show bgp ipv4 uni 5.1.2.0/24"','','none','See CE routes') -luCommand('r4','vtysh -c "show bgp ipv4 uni 5.1.2.0/24"','','none','See CE routes') -luCommand('r1','vtysh -c "add vrf cust1 prefix 99.0.0.1/32"','.','none','IP Address') -luCommand('r1','vtysh -c "show vnc registrations local"','99.0.0.1','wait','Local Registration') -luCommand('r1','vtysh -c "show vnc registrations imported"','2 out of 2 imported','wait','Imported Registrations') -luCommand('r3','vtysh -c "show bgp ipv4 vpn"','i99.0.0.1/32','wait','See R1s static address') -luCommand('r4','vtysh -c "show bgp ipv4 vpn"','i99.0.0.1/32','wait','See R1s static address') -luCommand('r3','vtysh -c "show bgp ipv4 vpn rd 10:1"','i5.*i5','wait','See R1s imports') -luCommand('r4','vtysh -c "show bgp ipv4 vpn rd 10:1"','i5.*i5','wait','See R1s imports') +luCommand( + "r1", 'vtysh -c "show bgp next"', "99.0.0.. valid", "wait", "See CE static NH" +) +luCommand( + "r3", 'vtysh -c "show bgp next"', "99.0.0.. valid", "wait", "See CE static NH" +) +luCommand( + "r4", 'vtysh -c "show bgp next"', "99.0.0.. valid", "wait", "See CE static NH" +) +luCommand("r1", 'vtysh -c "show bgp ipv4 uni"', "i5.*i5", "wait", "See CE routes") +luCommand("r3", 'vtysh -c "show bgp ipv4 uni"', "i5.*i5", "wait", "See CE routes") +luCommand("r4", 'vtysh -c "show bgp ipv4 uni"', "i5.*i5", "wait", "See CE routes") +luCommand("ce1", 'vtysh -c "show bgp ipv4 uni 5.1.0.0/24"', "", "none", "See CE routes") +luCommand("r1", 'vtysh -c "show bgp ipv4 uni 5.1.0.0/24"', "", "none", "See CE routes") +luCommand("ce2", 'vtysh -c "show bgp ipv4 uni 5.1.0.0/24"', "", "none", "See CE routes") +luCommand("r3", 'vtysh -c "show bgp ipv4 uni 5.1.0.0/24"', "", "none", "See CE routes") +luCommand("ce3", 'vtysh -c "show bgp ipv4 uni 5.1.2.0/24"', "", "none", "See CE routes") +luCommand("r4", 'vtysh -c "show bgp ipv4 uni 5.1.2.0/24"', "", "none", "See CE routes") -luCommand('r3','vtysh -c "add vrf cust1 prefix 99.0.0.2/32"','.','none','IP Address') -luCommand('r3','vtysh -c "show vnc registrations local"','99.0.0.2','wait','Local Registration') -have2ndImports = luCommand('r3','vtysh -c "show vnc registrations imported"','2 out of 2 imported','none','Imported Registrations',2) +luCommand( + "r1", 'vtysh -c "add vrf cust1 prefix 99.0.0.1/32"', ".", "none", "IP Address" +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations local"', + "99.0.0.1", + "wait", + "Local Registration", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations imported"', + "2 out of 2 imported", + "wait", + "Imported Registrations", +) +luCommand( + "r3", + 'vtysh -c "show bgp ipv4 vpn"', + "i99.0.0.1/32", + "wait", + "See R1s static address", +) +luCommand( + "r4", + 'vtysh -c "show bgp ipv4 vpn"', + "i99.0.0.1/32", + "wait", + "See R1s static address", +) +luCommand( + "r3", 'vtysh -c "show bgp ipv4 vpn rd 10:1"', "i5.*i5", "wait", "See R1s imports" +) +luCommand( + "r4", 'vtysh -c "show bgp ipv4 vpn rd 10:1"', "i5.*i5", "wait", "See R1s imports" +) + +luCommand( + "r3", 'vtysh -c "add vrf cust1 prefix 99.0.0.2/32"', ".", "none", "IP Address" +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations local"', + "99.0.0.2", + "wait", + "Local Registration", +) +have2ndImports = luCommand( + "r3", + 'vtysh -c "show vnc registrations imported"', + "2 out of 2 imported", + "none", + "Imported Registrations", + 2, +) if have2ndImports: - luCommand('r3','vtysh -c "show vnc registrations imported"','2 out of 2 imported','pass','Imported Registrations') -luCommand('r1','vtysh -c "show bgp ipv4 vpn"','i99.0.0.2/32','wait','See R3s static address') -luCommand('r4','vtysh -c "show bgp ipv4 vpn"','i99.0.0.2/32','wait','See R3s static address') + luCommand( + "r3", + 'vtysh -c "show vnc registrations imported"', + "2 out of 2 imported", + "pass", + "Imported Registrations", + ) +luCommand( + "r1", + 'vtysh -c "show bgp ipv4 vpn"', + "i99.0.0.2/32", + "wait", + "See R3s static address", +) +luCommand( + "r4", + 'vtysh -c "show bgp ipv4 vpn"', + "i99.0.0.2/32", + "wait", + "See R3s static address", +) if have2ndImports: - luCommand('r1','vtysh -c "show bgp ipv4 vpn rd 10:3"','i5.*i5','none','See R3s imports') - luCommand('r4','vtysh -c "show bgp ipv4 vpn rd 10:3"','i5.*i5','none','See R3s imports') + luCommand( + "r1", + 'vtysh -c "show bgp ipv4 vpn rd 10:3"', + "i5.*i5", + "none", + "See R3s imports", + ) + luCommand( + "r4", + 'vtysh -c "show bgp ipv4 vpn rd 10:3"', + "i5.*i5", + "none", + "See R3s imports", + ) -luCommand('r4','vtysh -c "add vrf cust1 prefix 99.0.0.3/32"','.','none','IP Address') -luCommand('r4','vtysh -c "show vnc registrations local"','99.0.0.3','wait','Local Registration') -luCommand('r4','vtysh -c "show vnc registrations imported"','2 out of 2 imported','wait','Imported Registrations') -luCommand('r1','vtysh -c "show bgp ipv4 vpn"','i99.0.0.3/32','wait','See R4s static address') -luCommand('r3','vtysh -c "show bgp ipv4 vpn"','i99.0.0.3/32','wait','See R4s static address') -luCommand('r1','vtysh -c "show bgp ipv4 vpn rd 10:4"','i5.*i5','wait','See R4s imports') -luCommand('r3','vtysh -c "show bgp ipv4 vpn rd 10:4"','i5.*i5','wait','See R4s imports') +luCommand( + "r4", 'vtysh -c "add vrf cust1 prefix 99.0.0.3/32"', ".", "none", "IP Address" +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations local"', + "99.0.0.3", + "wait", + "Local Registration", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations imported"', + "2 out of 2 imported", + "wait", + "Imported Registrations", +) +luCommand( + "r1", + 'vtysh -c "show bgp ipv4 vpn"', + "i99.0.0.3/32", + "wait", + "See R4s static address", +) +luCommand( + "r3", + 'vtysh -c "show bgp ipv4 vpn"', + "i99.0.0.3/32", + "wait", + "See R4s static address", +) +luCommand( + "r1", 'vtysh -c "show bgp ipv4 vpn rd 10:4"', "i5.*i5", "wait", "See R4s imports" +) +luCommand( + "r3", 'vtysh -c "show bgp ipv4 vpn rd 10:4"', "i5.*i5", "wait", "See R4s imports" +) -luCommand('r1','vtysh -c "show vnc registrations remote"','5.1.2.0/24 .*5.1.3.0/24','wait','R4s registrations') -luCommand('r3','vtysh -c "show vnc registrations remote"','5.1.2.0/24 .*5.1.3.0/24','wait','R4s registrations') +luCommand( + "r1", + 'vtysh -c "show vnc registrations remote"', + "5.1.2.0/24 .*5.1.3.0/24", + "wait", + "R4s registrations", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations remote"', + "5.1.2.0/24 .*5.1.3.0/24", + "wait", + "R4s registrations", +) if have2ndImports: - luCommand('r1','vtysh -c "show vnc registrations remote"','5.1.0.0/24 .*5.1.1.0/24','wait','Remote registrations') - luCommand('r3','vtysh -c "show vnc registrations remote"','5.1.0.0/24 .*5.1.1.0/24','wait','Remote registrations') -luCommand('r4','vtysh -c "show vnc registrations remote"','5.1.0.0/24 .*5.1.1.0/24','wait','Remote registrations') -luCommand('r1','vtysh -c "show vnc registrations"','.','none') -luCommand('r3','vtysh -c "show vnc registrations"','.','none') -luCommand('r4','vtysh -c "show vnc registrations"','.','none') + luCommand( + "r1", + 'vtysh -c "show vnc registrations remote"', + "5.1.0.0/24 .*5.1.1.0/24", + "wait", + "Remote registrations", + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations remote"', + "5.1.0.0/24 .*5.1.1.0/24", + "wait", + "Remote registrations", + ) +luCommand( + "r4", + 'vtysh -c "show vnc registrations remote"', + "5.1.0.0/24 .*5.1.1.0/24", + "wait", + "Remote registrations", +) +luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none") diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py index 1317a510d1..ea059c576e 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py @@ -1,20 +1,64 @@ from lutil import luCommand -luCommand('ce1','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping') -luCommand('ce2','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping') -luCommand('ce3','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping') -luCommand('ce1','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180) -luCommand('ce2','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180) -luCommand('ce3','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180) -luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180) -luCommand('r1','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180) -luCommand('r3','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180) -luCommand('r4','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180) -luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up') -luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up') -luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up') -luCommand('r1','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping') -luCommand('r1','ping 4.4.4.4 -c 1',' 0. packet loss','wait','PE->PE4 (loopback) ping') -luCommand('r4','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping') + +luCommand("ce1", "ping 192.168.1.1 -c 1", " 0. packet loss", "pass", "CE->PE ping") +luCommand("ce2", "ping 192.168.1.1 -c 1", " 0. packet loss", "pass", "CE->PE ping") +luCommand("ce3", "ping 192.168.1.1 -c 1", " 0. packet loss", "pass", "CE->PE ping") +luCommand("ce1", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180) +luCommand("ce2", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180) +luCommand("ce3", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180) +luCommand( + "r1", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r3", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r4", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r2", + 'vtysh -c "show bgp summary"', + " 00:0.* 00:0.* 00:0", + "wait", + "Core adjacencies up", + 180, +) +luCommand( + "r1", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180 +) +luCommand( + "r3", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180 +) +luCommand( + "r4", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180 +) +luCommand( + "r1", + 'vtysh -c "show bgp vrf all summary"', + " 00:0.* 00:0", + "pass", + "All adjacencies up", +) +luCommand( + "r3", + 'vtysh -c "show bgp vrf all summary"', + " 00:0.* 00:0", + "pass", + "All adjacencies up", +) +luCommand( + "r4", + 'vtysh -c "show bgp vrf all summary"', + " 00:0.* 00:0", + "pass", + "All adjacencies up", +) +luCommand( + "r1", "ping 3.3.3.3 -c 1", " 0. packet loss", "wait", "PE->PE3 (loopback) ping" +) +luCommand( + "r1", "ping 4.4.4.4 -c 1", " 0. packet loss", "wait", "PE->PE4 (loopback) ping" +) +luCommand( + "r4", "ping 3.3.3.3 -c 1", " 0. packet loss", "wait", "PE->PE3 (loopback) ping" +) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/check_routes.py b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/check_routes.py index 492be9e4da..96b4978261 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/check_routes.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/check_routes.py @@ -1,17 +1,55 @@ from lutil import luCommand -luCommand('ce1','vtysh -c "show bgp ipv4 uni"','7 routes and 7','wait','Local and remote routes') -luCommand('ce2','vtysh -c "show bgp ipv4 uni"','7 routes and 9','wait','Local and remote routes') -luCommand('ce3','vtysh -c "show bgp ipv4 uni"','7 routes and 7','wait','Local and remote routes') -luCommand('r1','vtysh -c "show bgp ipv4 uni"','7 routes and 9','pass','Unicast SAFI') -luCommand('r2','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Unicast SAFI') -luCommand('r3','vtysh -c "show bgp ipv4 uni"','7 routes and 9','pass','Unicast SAFI') -luCommand('r4','vtysh -c "show bgp ipv4 uni"','7 routes and 9','pass','Unicast SAFI') -have2ndImports = luCommand('r3','vtysh -c "show vnc registrations imported"','2 out of 2 imported','none','Imported Registrations',2) + +luCommand( + "ce1", + 'vtysh -c "show bgp ipv4 uni"', + "7 routes and 7", + "wait", + "Local and remote routes", +) +luCommand( + "ce2", + 'vtysh -c "show bgp ipv4 uni"', + "7 routes and 9", + "wait", + "Local and remote routes", +) +luCommand( + "ce3", + 'vtysh -c "show bgp ipv4 uni"', + "7 routes and 7", + "wait", + "Local and remote routes", +) +luCommand( + "r1", 'vtysh -c "show bgp ipv4 uni"', "7 routes and 9", "pass", "Unicast SAFI" +) +luCommand( + "r2", + 'vtysh -c "show bgp ipv4 uni"', + "No BGP prefixes displayed", + "pass", + "Unicast SAFI", +) +luCommand( + "r3", 'vtysh -c "show bgp ipv4 uni"', "7 routes and 9", "pass", "Unicast SAFI" +) +luCommand( + "r4", 'vtysh -c "show bgp ipv4 uni"', "7 routes and 9", "pass", "Unicast SAFI" +) +have2ndImports = luCommand( + "r3", + 'vtysh -c "show vnc registrations imported"', + "2 out of 2 imported", + "none", + "Imported Registrations", + 2, +) if have2ndImports: - num = '9 routes and 9' + num = "9 routes and 9" else: - num = '7 routes and 7' -luCommand('r1','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI') -luCommand('r2','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI') -luCommand('r3','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI') -luCommand('r4','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI') + num = "7 routes and 7" +luCommand("r1", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI") +luCommand("r2", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI") +luCommand("r3", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI") +luCommand("r4", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI") diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/cleanup_all.py b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/cleanup_all.py index 3a2f037833..9f21d99913 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/cleanup_all.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/cleanup_all.py @@ -1,17 +1,114 @@ from lutil import luCommand -luCommand('r1','vtysh -c "clear vrf cust1 prefix 99.0.0.1/32"','.','none','Cleared VRF route') -luCommand('r3','vtysh -c "clear vrf cust1 prefix 99.0.0.2/32"','.','none','Cleared VRF route') -luCommand('r4','vtysh -c "clear vrf cust1 prefix 99.0.0.3/32"','.','none','Cleared VRF route') -luCommand('r1','vtysh -c "show vnc registrations local"','99.0.0.1','fail','Local Registration cleared') -luCommand('r3','vtysh -c "show vnc registrations local"','99.0.0.2','fail','Local Registration cleared') -luCommand('r4','vtysh -c "show vnc registrations local"','99.0.0.3','fail','Local Registration cleared') -luCommand('r1','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated') -luCommand('r2','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Unicast SAFI') -luCommand('r3','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated') -luCommand('r4','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated') -luCommand('ce1','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes') -luCommand('ce2','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes') -luCommand('ce3','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes') -luCommand('r1','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared') -luCommand('r3','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared') -luCommand('r4','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared') + +luCommand( + "r1", + 'vtysh -c "clear vrf cust1 prefix 99.0.0.1/32"', + ".", + "none", + "Cleared VRF route", +) +luCommand( + "r3", + 'vtysh -c "clear vrf cust1 prefix 99.0.0.2/32"', + ".", + "none", + "Cleared VRF route", +) +luCommand( + "r4", + 'vtysh -c "clear vrf cust1 prefix 99.0.0.3/32"', + ".", + "none", + "Cleared VRF route", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations local"', + "99.0.0.1", + "fail", + "Local Registration cleared", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations local"', + "99.0.0.2", + "fail", + "Local Registration cleared", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations local"', + "99.0.0.3", + "fail", + "Local Registration cleared", +) +luCommand( + "r1", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Unicast SAFI updated", +) +luCommand( + "r2", + 'vtysh -c "show bgp ipv4 uni"', + "No BGP prefixes displayed", + "pass", + "Unicast SAFI", +) +luCommand( + "r3", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Unicast SAFI updated", +) +luCommand( + "r4", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Unicast SAFI updated", +) +luCommand( + "ce1", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Local and remote routes", +) +luCommand( + "ce2", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Local and remote routes", +) +luCommand( + "ce3", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Local and remote routes", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations remote"', + "Prefix ", + "fail", + "Remote Registration cleared", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations remote"', + "Prefix ", + "fail", + "Remote Registration cleared", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations remote"', + "Prefix ", + "fail", + "Remote Registration cleared", +) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/test_bgp_l3vpn_to_bgp_direct.py b/tests/topotests/bgp_l3vpn_to_bgp_direct/test_bgp_l3vpn_to_bgp_direct.py index f710c84c37..d226904102 100755 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/test_bgp_l3vpn_to_bgp_direct.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/test_bgp_l3vpn_to_bgp_direct.py @@ -25,46 +25,51 @@ import os import sys import pytest -sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')) +sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")) from lib.ltemplate import * + def test_adjacencies(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/adjacencies.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/adjacencies.py", False, CliOnFail, CheckFunc) + def test_add_routes(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/add_routes.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/add_routes.py", False, CliOnFail, CheckFunc) + def test_check_routes(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/check_routes.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/check_routes.py", False, CliOnFail, CheckFunc) + def test_cleanup_all(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/cleanup_all.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/cleanup_all.py", False, CliOnFail, CheckFunc) + -if __name__ == '__main__': +if __name__ == "__main__": retval = pytest.main(["-s"]) sys.exit(retval) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce1/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce1/bgpd.conf index a38fb1e9a1..3786350996 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce1/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce1/bgpd.conf @@ -9,6 +9,7 @@ log file bgpd.log router bgp 5227 bgp router-id 99.0.0.1 + no bgp ebgp-requires-policy neighbor 192.168.1.1 remote-as 5227 neighbor 192.168.1.1 update-source 192.168.1.2 address-family ipv4 unicast @@ -41,6 +42,3 @@ route-map sharp-nh permit 10 ! end - - - diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce2/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce2/bgpd.conf index 3aeb9f9c9f..ffe2d54f58 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce2/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce2/bgpd.conf @@ -9,6 +9,7 @@ log file bgpd.log router bgp 5227 bgp router-id 99.0.0.2 + no bgp ebgp-requires-policy neighbor 192.168.1.1 remote-as 5227 neighbor 192.168.1.1 update-source 192.168.1.2 address-family ipv4 unicast @@ -41,6 +42,3 @@ route-map sharp-nh permit 10 ! end - - - diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/bgpd.conf index a65b36f364..31a00b8f73 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/bgpd.conf @@ -9,6 +9,7 @@ log file bgpd.log router bgp 5227 bgp router-id 99.0.0.3 + no bgp ebgp-requires-policy neighbor 192.168.1.1 remote-as 5227 neighbor 192.168.1.1 update-source 192.168.1.2 address-family ipv4 unicast @@ -31,6 +32,3 @@ route-map rm-nh permit 10 ! end - - - diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/bgpd.conf index cb08db5314..f57f96bec4 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/bgpd.conf @@ -9,6 +9,7 @@ log file bgpd.log router bgp 5228 vrf ce4-cust2 bgp router-id 99.0.0.4 + no bgp ebgp-requires-policy neighbor 192.168.2.1 remote-as 5228 neighbor 192.168.2.1 update-source 192.168.2.2 address-family ipv4 unicast @@ -31,6 +32,3 @@ route-map rm-nh permit 10 ! end - - - diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py index e62d139a0c..fb919f02d0 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py @@ -89,12 +89,15 @@ from lib.ltemplate import ltemplateRtrCmd from mininet.topo import Topo import shutil + CWD = os.path.dirname(os.path.realpath(__file__)) # test name based on directory TEST = os.path.basename(CWD) + class ThisTestTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -103,125 +106,148 @@ class ThisTestTopo(Topo): # between routers, switches and hosts. # # Create P/PE routers - #check for mpls - tgen.add_router('r1') + # check for mpls + tgen.add_router("r1") if tgen.hasmpls != True: - logger.info('MPLS not available, tests will be skipped') + logger.info("MPLS not available, tests will be skipped") return mach = platform.machine() krel = platform.release() - if mach[:1] == 'a' and topotest.version_cmp(krel, '4.11') < 0: - logger.info('Need Kernel version 4.11 to run on arm processor') + if mach[:1] == "a" and topotest.version_cmp(krel, "4.11") < 0: + logger.info("Need Kernel version 4.11 to run on arm processor") return for routern in range(2, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # Create CE routers for routern in range(1, 5): - tgen.add_router('ce{}'.format(routern)) + tgen.add_router("ce{}".format(routern)) - #CE/PE links - tgen.add_link(tgen.gears['ce1'], tgen.gears['r1'], 'ce1-eth0', 'r1-eth4') - tgen.add_link(tgen.gears['ce2'], tgen.gears['r3'], 'ce2-eth0', 'r3-eth4') - tgen.add_link(tgen.gears['ce3'], tgen.gears['r4'], 'ce3-eth0', 'r4-eth4') - tgen.add_link(tgen.gears['ce4'], tgen.gears['r4'], 'ce4-eth0', 'r4-eth5') + # CE/PE links + tgen.add_link(tgen.gears["ce1"], tgen.gears["r1"], "ce1-eth0", "r1-eth4") + tgen.add_link(tgen.gears["ce2"], tgen.gears["r3"], "ce2-eth0", "r3-eth4") + tgen.add_link(tgen.gears["ce3"], tgen.gears["r4"], "ce3-eth0", "r4-eth4") + tgen.add_link(tgen.gears["ce4"], tgen.gears["r4"], "ce4-eth0", "r4-eth5") # Create a switch with just one router connected to it to simulate a # empty network. switch = {} - switch[0] = tgen.add_switch('sw0') - switch[0].add_link(tgen.gears['r1'], nodeif='r1-eth0') - switch[0].add_link(tgen.gears['r2'], nodeif='r2-eth0') + switch[0] = tgen.add_switch("sw0") + switch[0].add_link(tgen.gears["r1"], nodeif="r1-eth0") + switch[0].add_link(tgen.gears["r2"], nodeif="r2-eth0") - switch[1] = tgen.add_switch('sw1') - switch[1].add_link(tgen.gears['r2'], nodeif='r2-eth1') - switch[1].add_link(tgen.gears['r3'], nodeif='r3-eth0') - switch[1].add_link(tgen.gears['r4'], nodeif='r4-eth0') + switch[1] = tgen.add_switch("sw1") + switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth1") + switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth0") + switch[1].add_link(tgen.gears["r4"], nodeif="r4-eth0") + + switch[1] = tgen.add_switch("sw2") + switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth2") + switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth1") - switch[1] = tgen.add_switch('sw2') - switch[1].add_link(tgen.gears['r2'], nodeif='r2-eth2') - switch[1].add_link(tgen.gears['r3'], nodeif='r3-eth1') l3mdev_accept = 0 + def ltemplatePreRouterStartHook(): global l3mdev_accept cc = ltemplateRtrCmd() krel = platform.release() tgen = get_topogen() - logger.info('pre router-start hook, kernel=' + krel) + logger.info("pre router-start hook, kernel=" + krel) - if topotest.version_cmp(krel, '4.15') >= 0 and \ - topotest.version_cmp(krel, '4.18') <= 0: + if ( + topotest.version_cmp(krel, "4.15") >= 0 + and topotest.version_cmp(krel, "4.18") <= 0 + ): l3mdev_accept = 1 - if topotest.version_cmp(krel, '5.0') >= 0: + if topotest.version_cmp(krel, "5.0") >= 0: l3mdev_accept = 1 - logger.info('setting net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept)) - #check for mpls + logger.info("setting net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept)) + # check for mpls if tgen.hasmpls != True: - logger.info('MPLS not available, skipping setup') + logger.info("MPLS not available, skipping setup") return False - #check for normal init + # check for normal init if len(tgen.net) == 1: - logger.info('Topology not configured, skipping setup') + logger.info("Topology not configured, skipping setup") return False - #trace errors/unexpected output + # trace errors/unexpected output cc.resetCounts() - #configure r2 mpls interfaces - intfs = ['lo', 'r2-eth0', 'r2-eth1', 'r2-eth2'] + # configure r2 mpls interfaces + intfs = ["lo", "r2-eth0", "r2-eth1", "r2-eth2"] for intf in intfs: - cc.doCmd(tgen, 'r2', 'echo 1 > /proc/sys/net/mpls/conf/{}/input'.format(intf)) - - #configure cust1 VRFs & MPLS - rtrs = ['r1', 'r3', 'r4'] - cmds = ['ip link add {0}-cust1 type vrf table 10', - 'ip ru add oif {0}-cust1 table 10', - 'ip ru add iif {0}-cust1 table 10', - 'ip link set dev {0}-cust1 up', - 'sysctl -w net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept)] + cc.doCmd(tgen, "r2", "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf)) + + # configure cust1 VRFs & MPLS + rtrs = ["r1", "r3", "r4"] + cmds = [ + "ip link add {0}-cust1 type vrf table 10", + "ip ru add oif {0}-cust1 table 10", + "ip ru add iif {0}-cust1 table 10", + "ip link set dev {0}-cust1 up", + "sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept), + ] for rtr in rtrs: router = tgen.gears[rtr] for cmd in cmds: cc.doCmd(tgen, rtr, cmd.format(rtr)) - cc.doCmd(tgen, rtr, 'ip link set dev {0}-eth4 master {0}-cust1'.format(rtr)) - intfs = [rtr+'-cust1', 'lo', rtr+'-eth0', rtr+'-eth4'] + cc.doCmd(tgen, rtr, "ip link set dev {0}-eth4 master {0}-cust1".format(rtr)) + intfs = [rtr + "-cust1", "lo", rtr + "-eth0", rtr + "-eth4"] for intf in intfs: - cc.doCmd(tgen, rtr, 'echo 1 > /proc/sys/net/mpls/conf/{}/input'.format(intf)) - logger.info('setup {0} vrf {0}-cust1, {0}-eth4. enabled mpls input.'.format(rtr)) - #configure cust2 VRFs & MPLS - rtrs = ['r4'] - cmds = ['ip link add {0}-cust2 type vrf table 20', - 'ip ru add oif {0}-cust2 table 20', - 'ip ru add iif {0}-cust2 table 20', - 'ip link set dev {0}-cust2 up'] + cc.doCmd( + tgen, rtr, "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf) + ) + logger.info( + "setup {0} vrf {0}-cust1, {0}-eth4. enabled mpls input.".format(rtr) + ) + # configure cust2 VRFs & MPLS + rtrs = ["r4"] + cmds = [ + "ip link add {0}-cust2 type vrf table 20", + "ip ru add oif {0}-cust2 table 20", + "ip ru add iif {0}-cust2 table 20", + "ip link set dev {0}-cust2 up", + ] for rtr in rtrs: for cmd in cmds: cc.doCmd(tgen, rtr, cmd.format(rtr)) - cc.doCmd(tgen, rtr, 'ip link set dev {0}-eth5 master {0}-cust2'.format(rtr)) - intfs = [rtr+'-cust2', rtr+'-eth5'] + cc.doCmd(tgen, rtr, "ip link set dev {0}-eth5 master {0}-cust2".format(rtr)) + intfs = [rtr + "-cust2", rtr + "-eth5"] for intf in intfs: - cc.doCmd(tgen, rtr, 'echo 1 > /proc/sys/net/mpls/conf/{}/input'.format(intf)) - logger.info('setup {0} vrf {0}-cust2, {0}-eth5. enabled mpls input.'.format(rtr)) - #put ce4-eth0 into a VRF (no default instance!) - rtrs = ['ce4'] - cmds = ['ip link add {0}-cust2 type vrf table 20', - 'ip ru add oif {0}-cust2 table 20', - 'ip ru add iif {0}-cust2 table 20', - 'ip link set dev {0}-cust2 up', - 'sysctl -w net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept)] + cc.doCmd( + tgen, rtr, "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf) + ) + logger.info( + "setup {0} vrf {0}-cust2, {0}-eth5. enabled mpls input.".format(rtr) + ) + # put ce4-eth0 into a VRF (no default instance!) + rtrs = ["ce4"] + cmds = [ + "ip link add {0}-cust2 type vrf table 20", + "ip ru add oif {0}-cust2 table 20", + "ip ru add iif {0}-cust2 table 20", + "ip link set dev {0}-cust2 up", + "sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept), + ] for rtr in rtrs: for cmd in cmds: cc.doCmd(tgen, rtr, cmd.format(rtr)) - cc.doCmd(tgen, rtr, 'ip link set dev {0}-eth0 master {0}-cust2'.format(rtr)) + cc.doCmd(tgen, rtr, "ip link set dev {0}-eth0 master {0}-cust2".format(rtr)) if cc.getOutput() != 4: InitSuccess = False - logger.info('Unexpected output seen ({} times, tests will be skipped'.format(cc.getOutput())) + logger.info( + "Unexpected output seen ({} times, tests will be skipped".format( + cc.getOutput() + ) + ) else: InitSuccess = True - logger.info('VRF config successful!') + logger.info("VRF config successful!") return InitSuccess + def ltemplatePostRouterStartHook(): - logger.info('post router-start hook') + logger.info("post router-start hook") return True diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/bgpd.conf index c1bad0b7c6..a9549e8fee 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/bgpd.conf @@ -16,6 +16,7 @@ log file bgpd.log debugging router bgp 5226 bgp router-id 1.1.1.1 bgp cluster-id 1.1.1.1 + no bgp ebgp-requires-policy neighbor 2.2.2.2 remote-as 5226 neighbor 2.2.2.2 update-source 1.1.1.1 @@ -31,6 +32,7 @@ router bgp 5226 router bgp 5227 vrf r1-cust1 bgp router-id 192.168.1.1 + no bgp ebgp-requires-policy neighbor 192.168.1.2 remote-as 5227 neighbor 192.168.1.2 update-source 192.168.1.1 diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r2/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r2/bgpd.conf index 4ccb0ca5c0..cda6d9429a 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r2/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r2/bgpd.conf @@ -10,6 +10,7 @@ log file bgpd.log debugging router bgp 5226 bgp router-id 2.2.2.2 bgp cluster-id 2.2.2.2 + no bgp ebgp-requires-policy neighbor 1.1.1.1 remote-as 5226 neighbor 1.1.1.1 update-source 2.2.2.2 neighbor 3.3.3.3 remote-as 5226 @@ -30,6 +31,3 @@ router bgp 5226 neighbor 4.4.4.4 route-reflector-client exit-address-family end - - - diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r3/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r3/bgpd.conf index c4b6ac9bb4..e2a8de7db7 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r3/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r3/bgpd.conf @@ -11,6 +11,7 @@ log file bgpd.log router bgp 5226 bgp router-id 3.3.3.3 bgp cluster-id 3.3.3.3 + no bgp ebgp-requires-policy neighbor 2.2.2.2 remote-as 5226 neighbor 2.2.2.2 update-source 3.3.3.3 @@ -25,9 +26,10 @@ router bgp 5226 router bgp 5227 vrf r3-cust1 bgp router-id 192.168.1.1 + no bgp ebgp-requires-policy neighbor 192.168.1.2 remote-as 5227 - neighbor 192.168.1.2 update-source 192.168.1.1 + neighbor 192.168.1.2 update-source 192.168.1.1 address-family ipv4 unicast neighbor 192.168.1.2 activate diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r4/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r4/bgpd.conf index 6295406e69..7b267a6ee1 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r4/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r4/bgpd.conf @@ -14,6 +14,7 @@ log file bgpd.log debug router bgp 5226 bgp router-id 4.4.4.4 bgp cluster-id 4.4.4.4 + no bgp ebgp-requires-policy neighbor 2.2.2.2 remote-as 5226 neighbor 2.2.2.2 update-source 4.4.4.4 @@ -28,9 +29,10 @@ router bgp 5226 router bgp 5227 vrf r4-cust1 bgp router-id 192.168.1.1 + no bgp ebgp-requires-policy neighbor 192.168.1.2 remote-as 5227 - neighbor 192.168.1.2 update-source 192.168.1.1 + neighbor 192.168.1.2 update-source 192.168.1.1 address-family ipv4 unicast neighbor 192.168.1.2 activate @@ -47,6 +49,7 @@ router bgp 5227 vrf r4-cust1 router bgp 5228 vrf r4-cust2 bgp router-id 192.168.2.1 + no bgp ebgp-requires-policy neighbor 192.168.2.2 remote-as 5228 neighbor 192.168.2.2 update-source 192.168.2.1 diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/add_routes.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/add_routes.py index 19b73d2057..5c7427763d 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/add_routes.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/add_routes.py @@ -1,13 +1,59 @@ from lutil import luCommand -luCommand('r1','vtysh -c "add vrf r1-cust1 prefix 99.0.0.1/32"','.','none','IP Address') -luCommand('r3','vtysh -c "add vrf r3-cust1 prefix 99.0.0.2/32"','.','none','IP Address') -luCommand('r4','vtysh -c "add vrf r4-cust1 prefix 99.0.0.3/32"','.','none','IP Address') -luCommand('r1','vtysh -c "show vnc registrations local"','99.0.0.1','pass','Local Registration') -luCommand('r3','vtysh -c "show vnc registrations local"','99.0.0.2','pass','Local Registration') -luCommand('r4','vtysh -c "show vnc registrations local"','99.0.0.3','pass','Local Registration') -luCommand('r1','vtysh -c "show vnc registrations remote"','4 out of 4','wait','Remote Registration', 10) -luCommand('r3','vtysh -c "show vnc registrations remote"','6 out of 6','wait','Remote Registration', 10) -luCommand('r4','vtysh -c "show vnc registrations remote"','4 out of 4','wait','Remote Registration', 10) -luCommand('r1','vtysh -c "show vnc registrations"','.','none') -luCommand('r3','vtysh -c "show vnc registrations"','.','none') -luCommand('r4','vtysh -c "show vnc registrations"','.','none') + +luCommand( + "r1", 'vtysh -c "add vrf r1-cust1 prefix 99.0.0.1/32"', ".", "none", "IP Address" +) +luCommand( + "r3", 'vtysh -c "add vrf r3-cust1 prefix 99.0.0.2/32"', ".", "none", "IP Address" +) +luCommand( + "r4", 'vtysh -c "add vrf r4-cust1 prefix 99.0.0.3/32"', ".", "none", "IP Address" +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations local"', + "99.0.0.1", + "pass", + "Local Registration", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations local"', + "99.0.0.2", + "pass", + "Local Registration", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations local"', + "99.0.0.3", + "pass", + "Local Registration", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations remote"', + "4 out of 4", + "wait", + "Remote Registration", + 10, +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations remote"', + "6 out of 6", + "wait", + "Remote Registration", + 10, +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations remote"', + "4 out of 4", + "wait", + "Remote Registration", + 10, +) +luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none") diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py index c2b0cf9e7a..53cf353fa0 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py @@ -1,18 +1,64 @@ from lutil import luCommand -luCommand('ce1','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180) -luCommand('ce2','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180) -luCommand('ce3','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180) -luCommand('ce4','vtysh -c "show bgp vrf all summary"',' 00:0','wait','Adjacencies up',180) -luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180) -luCommand('r1','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180) -luCommand('r3','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180) -luCommand('r4','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180) -luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up') -luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up') -luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0.* 00:0','pass','All adjacencies up') -luCommand('r1','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping') -luCommand('r1','ping 4.4.4.4 -c 1',' 0. packet loss','wait','PE->PE4 (loopback) ping') -luCommand('r4','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping') + +luCommand("ce1", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180) +luCommand("ce2", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180) +luCommand("ce3", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180) +luCommand( + "ce4", 'vtysh -c "show bgp vrf all summary"', " 00:0", "wait", "Adjacencies up", 180 +) +luCommand( + "r1", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r3", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r4", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r2", + 'vtysh -c "show bgp summary"', + " 00:0.* 00:0.* 00:0", + "wait", + "Core adjacencies up", + 180, +) +luCommand( + "r1", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180 +) +luCommand( + "r3", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180 +) +luCommand( + "r4", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180 +) +luCommand( + "r1", + 'vtysh -c "show bgp vrf all summary"', + " 00:0.* 00:0", + "pass", + "All adjacencies up", +) +luCommand( + "r3", + 'vtysh -c "show bgp vrf all summary"', + " 00:0.* 00:0", + "pass", + "All adjacencies up", +) +luCommand( + "r4", + 'vtysh -c "show bgp vrf all summary"', + " 00:0.* 00:0.* 00:0", + "pass", + "All adjacencies up", +) +luCommand( + "r1", "ping 3.3.3.3 -c 1", " 0. packet loss", "wait", "PE->PE3 (loopback) ping" +) +luCommand( + "r1", "ping 4.4.4.4 -c 1", " 0. packet loss", "wait", "PE->PE4 (loopback) ping" +) +luCommand( + "r4", "ping 3.3.3.3 -c 1", " 0. packet loss", "wait", "PE->PE3 (loopback) ping" +) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py index 9827a9e2c1..20113b1058 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py @@ -1,47 +1,83 @@ from lutil import luCommand, luLast from lib import topotest -ret = luCommand('r2', 'ip -M route show', - '\d*(?= via inet 10.0.2.4 dev r2-eth1)','wait','See mpls route to r4') +ret = luCommand( + "r2", + "ip -M route show", + "\d*(?= via inet 10.0.2.4 dev r2-eth1)", + "wait", + "See mpls route to r4", +) found = luLast() if ret != False and found != None: label4r4 = found.group(0) - luCommand('r2', 'ip -M route show', - '.', 'pass', - 'See %s as label to r4' % label4r4) - ret = luCommand('r2', 'ip -M route show', - '\d*(?= via inet 10.0.1.1 dev r2-eth0)', 'wait', - 'See mpls route to r1') + luCommand("r2", "ip -M route show", ".", "pass", "See %s as label to r4" % label4r4) + ret = luCommand( + "r2", + "ip -M route show", + "\d*(?= via inet 10.0.1.1 dev r2-eth0)", + "wait", + "See mpls route to r1", + ) found = luLast() if ret != False and found != None: label4r1 = found.group(0) - luCommand('r2', 'ip -M route show', - '.', 'pass', 'See %s as label to r1' % label4r1) - - luCommand('r1', 'ip route show vrf r1-cust1', - '99.0.0.4', 'pass', 'VRF->MPLS PHP route installed') - luCommand('r4', 'ip route show vrf r4-cust2', - '99.0.0.1','pass', 'VRF->MPLS PHP route installed') - - luCommand('r1', 'ip -M route show', '101', 'pass', 'MPLS->VRF route installed') - luCommand('r4', 'ip -M route show', '1041', 'pass', 'MPLS->VRF1 route installed') - luCommand('r4', 'ip -M route show', '1042', 'pass', 'MPLS->VRF2 route installed') - - luCommand('ce1', 'ping 99.0.0.4 -I 99.0.0.1 -c 1', - ' 0. packet loss','wait','CE->CE (loopback) ping - l3vpn+zebra case') - #skip due to VRF weirdness - #luCommand('ce4', 'ping 99.0.0.1 -I 99.0.0.4 -c 1', + luCommand("r2", "ip -M route show", ".", "pass", "See %s as label to r1" % label4r1) + + luCommand( + "r1", + "ip route show vrf r1-cust1", + "99.0.0.4", + "pass", + "VRF->MPLS PHP route installed", + ) + luCommand( + "r4", + "ip route show vrf r4-cust2", + "99.0.0.1", + "pass", + "VRF->MPLS PHP route installed", + ) + + luCommand("r1", "ip -M route show", "101", "pass", "MPLS->VRF route installed") + luCommand("r4", "ip -M route show", "1041", "pass", "MPLS->VRF1 route installed") + luCommand("r4", "ip -M route show", "1042", "pass", "MPLS->VRF2 route installed") + + luCommand( + "ce1", + "ping 99.0.0.4 -I 99.0.0.1 -c 1", + " 0. packet loss", + "wait", + "CE->CE (loopback) ping - l3vpn+zebra case", + ) + # skip due to VRF weirdness + # luCommand('ce4', 'ping 99.0.0.1 -I 99.0.0.4 -c 1', # ' 0. packet loss','wait','CE->CE (loopback) ping - l3vpn+zebra case') - luCommand('ce1', 'ping 99.0.0.4 -I 99.0.0.1 -c 1', - ' 0. packet loss','wait','CE->CE (loopback) ping') - #luCommand('ce4', 'ping 99.0.0.1 -I 99.0.0.4 -c 1', + luCommand( + "ce1", + "ping 99.0.0.4 -I 99.0.0.1 -c 1", + " 0. packet loss", + "wait", + "CE->CE (loopback) ping", + ) + # luCommand('ce4', 'ping 99.0.0.1 -I 99.0.0.4 -c 1', # ' 0. packet loss','wait','CE->CE (loopback) ping') - luCommand('r3', 'ip -M route show', '103', 'pass', 'MPLS->VRF route installed') - luCommand('ce2', 'ping 99.0.0.3 -I 99.0.0.2 -c 1', - ' 0. packet loss','wait','CE2->CE3 (loopback) ping') - luCommand('ce3', 'ping 99.0.0.4 -I 99.0.0.3 -c 1', - ' 0. packet loss','wait','CE3->CE4 (loopback) ping') + luCommand("r3", "ip -M route show", "103", "pass", "MPLS->VRF route installed") + luCommand( + "ce2", + "ping 99.0.0.3 -I 99.0.0.2 -c 1", + " 0. packet loss", + "wait", + "CE2->CE3 (loopback) ping", + ) + luCommand( + "ce3", + "ping 99.0.0.4 -I 99.0.0.3 -c 1", + " 0. packet loss", + "wait", + "CE3->CE4 (loopback) ping", + ) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py index 547a5949a3..b552ea0406 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py @@ -1,27 +1,93 @@ from lutil import luCommand from customize import l3mdev_accept -l3mdev_rtrs = ['r1', 'r3', 'r4', 'ce4'] +l3mdev_rtrs = ["r1", "r3", "r4", "ce4"] for rtr in l3mdev_rtrs: - luCommand(rtr,'sysctl net.ipv4.tcp_l3mdev_accept',' = \d*','none','') + luCommand(rtr, "sysctl net.ipv4.tcp_l3mdev_accept", " = \d*", "none", "") found = luLast() - luCommand(rtr,'ss -naep',':179','pass','IPv4:bgp, l3mdev{}'.format(found.group(0))) - luCommand(rtr,'ss -naep',':.*:179','pass','IPv6:bgp') - luCommand(rtr,'sysctl net.ipv4.tcp_l3mdev_accept',' = {}'.format(l3mdev_accept),'pass','l3mdev matches expected (real/expected{}/{})'.format(found.group(0),l3mdev_accept)) + luCommand( + rtr, "ss -naep", ":179", "pass", "IPv4:bgp, l3mdev{}".format(found.group(0)) + ) + luCommand(rtr, "ss -naep", ":.*:179", "pass", "IPv6:bgp") + luCommand( + rtr, + "sysctl net.ipv4.tcp_l3mdev_accept", + " = {}".format(l3mdev_accept), + "pass", + "l3mdev matches expected (real/expected{}/{})".format( + found.group(0), l3mdev_accept + ), + ) -rtrs = ['r1', 'r3', 'r4'] +rtrs = ["r1", "r3", "r4"] for rtr in rtrs: - luCommand(rtr, 'ip link show type vrf {}-cust1'.format(rtr),'cust1: .*UP','pass','VRF cust1 intf up') - luCommand(rtr, 'ip add show vrf {}-cust1'.format(rtr),'r..eth4.*UP','pass','VRF cust1 IP intf up') - luCommand(rtr, 'ip add show vrf {}-cust1'.format(rtr),'192.168','pass','VRF cust1 IP config') - luCommand(rtr, 'ip route show vrf {}-cust1'.format(rtr),'192.168...0/24 dev r.-eth','pass','VRF cust1 interface route') -luCommand('r4', 'ip link show type vrf r4-cust2','cust2: .*UP','pass','VRF cust2 up') -luCommand('r4', 'ip add show vrf r4-cust2','r..eth5.*UP.* 192.168','pass','VRF cust1 IP config') -luCommand(rtr, 'ip route show vrf r4-cust2'.format(rtr),'192.168...0/24 dev r.-eth','pass','VRF cust2 interface route') -rtrs = ['ce1', 'ce2', 'ce3'] + luCommand( + rtr, + "ip link show type vrf {}-cust1".format(rtr), + "cust1: .*UP", + "pass", + "VRF cust1 intf up", + ) + luCommand( + rtr, + "ip add show vrf {}-cust1".format(rtr), + "r..eth4.*UP", + "pass", + "VRF cust1 IP intf up", + ) + luCommand( + rtr, + "ip add show vrf {}-cust1".format(rtr), + "192.168", + "pass", + "VRF cust1 IP config", + ) + luCommand( + rtr, + "ip route show vrf {}-cust1".format(rtr), + "192.168...0/24 dev r.-eth", + "pass", + "VRF cust1 interface route", + ) +luCommand("r4", "ip link show type vrf r4-cust2", "cust2: .*UP", "pass", "VRF cust2 up") +luCommand( + "r4", + "ip add show vrf r4-cust2", + "r..eth5.*UP.* 192.168", + "pass", + "VRF cust1 IP config", +) +luCommand( + rtr, + "ip route show vrf r4-cust2".format(rtr), + "192.168...0/24 dev r.-eth", + "pass", + "VRF cust2 interface route", +) +rtrs = ["ce1", "ce2", "ce3"] for rtr in rtrs: - luCommand(rtr, 'ip route show','192.168...0/24 dev ce.-eth0','pass','CE interface route') - luCommand(rtr,'ping 192.168.1.1 -c 1',' 0. packet loss','wait','CE->PE ping') -luCommand('ce4', 'ip link show type vrf ce4-cust2','cust2: .*UP','pass','VRF cust2 up') -luCommand('ce4', 'ip route show vrf ce4-cust2','192.168...0/24 dev ce.-eth0','pass','CE interface route') -luCommand('ce4','ping 192.168.2.1 -c 1 -I ce4-cust2',' 0. packet loss','wait','CE4->PE4 ping') + luCommand( + rtr, + "ip route show", + "192.168...0/24 dev ce.-eth0", + "pass", + "CE interface route", + ) + luCommand(rtr, "ping 192.168.1.1 -c 1", " 0. packet loss", "wait", "CE->PE ping") +luCommand( + "ce4", "ip link show type vrf ce4-cust2", "cust2: .*UP", "pass", "VRF cust2 up" +) +luCommand( + "ce4", + "ip route show vrf ce4-cust2", + "192.168...0/24 dev ce.-eth0", + "pass", + "CE interface route", +) +luCommand( + "ce4", + "ping 192.168.2.1 -c 1 -I ce4-cust2", + " 0. packet loss", + "wait", + "CE4->PE4 ping", +) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py index e47ea5f2cd..f5a29b95c9 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py @@ -1,5 +1,5 @@ from lutil import luCommand -from bgprib import bgpribRequireVpnRoutes,bgpribRequireUnicastRoutes +from bgprib import bgpribRequireVpnRoutes, bgpribRequireUnicastRoutes ######################################################################## # CE routers: contain routes they originate @@ -12,32 +12,32 @@ from bgprib import bgpribRequireVpnRoutes,bgpribRequireUnicastRoutes # ce4 vtysh -c "show bgp ipv4 uni" want = [ - {'p':'5.1.0.0/24', 'n':'99.0.0.1'}, - {'p':'5.1.1.0/24', 'n':'99.0.0.1'}, - {'p':'99.0.0.1/32', 'n':'0.0.0.0'}, + {"p": "5.1.0.0/24", "n": "99.0.0.1"}, + {"p": "5.1.1.0/24", "n": "99.0.0.1"}, + {"p": "99.0.0.1/32", "n": "0.0.0.0"}, ] -bgpribRequireUnicastRoutes('ce1','ipv4','','Cust 1 routes in ce1',want) +bgpribRequireUnicastRoutes("ce1", "ipv4", "", "Cust 1 routes in ce1", want) want = [ - {'p':'5.1.0.0/24', 'n':'99.0.0.2'}, - {'p':'5.1.1.0/24', 'n':'99.0.0.2'}, - {'p':'99.0.0.2/32', 'n':'0.0.0.0'}, + {"p": "5.1.0.0/24", "n": "99.0.0.2"}, + {"p": "5.1.1.0/24", "n": "99.0.0.2"}, + {"p": "99.0.0.2/32", "n": "0.0.0.0"}, ] -bgpribRequireUnicastRoutes('ce2','ipv4','','Cust 2 routes in ce1',want) +bgpribRequireUnicastRoutes("ce2", "ipv4", "", "Cust 2 routes in ce1", want) want = [ - {'p':'5.1.2.0/24', 'n':'99.0.0.3'}, - {'p':'5.1.3.0/24', 'n':'99.0.0.3'}, - {'p':'99.0.0.3/32', 'n':'0.0.0.0'}, + {"p": "5.1.2.0/24", "n": "99.0.0.3"}, + {"p": "5.1.3.0/24", "n": "99.0.0.3"}, + {"p": "99.0.0.3/32", "n": "0.0.0.0"}, ] -bgpribRequireUnicastRoutes('ce3','ipv4','','Cust 3 routes in ce1',want) +bgpribRequireUnicastRoutes("ce3", "ipv4", "", "Cust 3 routes in ce1", want) want = [ - {'p':'5.4.2.0/24', 'n':'99.0.0.4'}, - {'p':'5.4.3.0/24', 'n':'99.0.0.4'}, - {'p':'99.0.0.4/32', 'n':'0.0.0.0'}, + {"p": "5.4.2.0/24", "n": "99.0.0.4"}, + {"p": "5.4.3.0/24", "n": "99.0.0.4"}, + {"p": "99.0.0.4/32", "n": "0.0.0.0"}, ] -bgpribRequireUnicastRoutes('ce4','ipv4','ce4-cust2','Cust 4 routes in ce1',want) +bgpribRequireUnicastRoutes("ce4", "ipv4", "ce4-cust2", "Cust 4 routes in ce1", want) ######################################################################## @@ -47,116 +47,169 @@ bgpribRequireUnicastRoutes('ce4','ipv4','ce4-cust2','Cust 4 routes in ce1',want) # r1 vtysh -c "show bgp vrf r1-cust1 ipv4" # want_r1_cust1_routes = [ - {'p':'5.1.0.0/24', 'n':'99.0.0.1'}, - {'p':'5.1.1.0/24', 'n':'99.0.0.1'}, - {'p':'99.0.0.1/32', 'n':'192.168.1.2'}, + {"p": "5.1.0.0/24", "n": "99.0.0.1"}, + {"p": "5.1.1.0/24", "n": "99.0.0.1"}, + {"p": "99.0.0.1/32", "n": "192.168.1.2"}, ] -bgpribRequireUnicastRoutes('r1','ipv4','r1-cust1','Customer 1 routes in r1 vrf',want_r1_cust1_routes) +bgpribRequireUnicastRoutes( + "r1", "ipv4", "r1-cust1", "Customer 1 routes in r1 vrf", want_r1_cust1_routes +) want_r3_cust1_routes = [ - {'p':'5.1.0.0/24', 'n':'99.0.0.2'}, - {'p':'5.1.1.0/24', 'n':'99.0.0.2'}, - {'p':'99.0.0.2/32', 'n':'192.168.1.2'}, + {"p": "5.1.0.0/24", "n": "99.0.0.2"}, + {"p": "5.1.1.0/24", "n": "99.0.0.2"}, + {"p": "99.0.0.2/32", "n": "192.168.1.2"}, ] -bgpribRequireUnicastRoutes('r3','ipv4','r3-cust1','Customer 1 routes in r3 vrf',want_r3_cust1_routes) +bgpribRequireUnicastRoutes( + "r3", "ipv4", "r3-cust1", "Customer 1 routes in r3 vrf", want_r3_cust1_routes +) want_r4_cust1_routes = [ - {'p':'5.1.2.0/24', 'n':'99.0.0.3'}, - {'p':'5.1.3.0/24', 'n':'99.0.0.3'}, - {'p':'99.0.0.3/32', 'n':'192.168.1.2'}, + {"p": "5.1.2.0/24", "n": "99.0.0.3"}, + {"p": "5.1.3.0/24", "n": "99.0.0.3"}, + {"p": "99.0.0.3/32", "n": "192.168.1.2"}, ] -bgpribRequireUnicastRoutes('r4','ipv4','r4-cust1','Customer 1 routes in r4 vrf',want_r4_cust1_routes) +bgpribRequireUnicastRoutes( + "r4", "ipv4", "r4-cust1", "Customer 1 routes in r4 vrf", want_r4_cust1_routes +) want_r4_cust2_routes = [ - {'p':'5.4.2.0/24', 'n':'99.0.0.4'}, - {'p':'5.4.3.0/24', 'n':'99.0.0.4'}, - {'p':'99.0.0.4/32', 'n':'192.168.2.2'}, + {"p": "5.4.2.0/24", "n": "99.0.0.4"}, + {"p": "5.4.3.0/24", "n": "99.0.0.4"}, + {"p": "99.0.0.4/32", "n": "192.168.2.2"}, ] -bgpribRequireUnicastRoutes('r4','ipv4','r4-cust2','Customer 2 routes in r4 vrf',want_r4_cust2_routes) +bgpribRequireUnicastRoutes( + "r4", "ipv4", "r4-cust2", "Customer 2 routes in r4 vrf", want_r4_cust2_routes +) ######################################################################## # PE routers: core unicast routes are empty ######################################################################## -luCommand('r1','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Core Unicast SAFI clean') -luCommand('r2','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Core Unicast SAFI clean') -luCommand('r3','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Core Unicast SAFI clean') -luCommand('r4','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Core Unicast SAFI clean') +luCommand( + "r1", + 'vtysh -c "show bgp ipv4 uni"', + "No BGP prefixes displayed", + "pass", + "Core Unicast SAFI clean", +) +luCommand( + "r2", + 'vtysh -c "show bgp ipv4 uni"', + "No BGP prefixes displayed", + "pass", + "Core Unicast SAFI clean", +) +luCommand( + "r3", + 'vtysh -c "show bgp ipv4 uni"', + "No BGP prefixes displayed", + "pass", + "Core Unicast SAFI clean", +) +luCommand( + "r4", + 'vtysh -c "show bgp ipv4 uni"', + "No BGP prefixes displayed", + "pass", + "Core Unicast SAFI clean", +) ######################################################################## # PE routers: local ce-originated routes are leaked to vpn ######################################################################## # nhzero is for the new code that sets nh of locally-leaked routes to 0 -#nhzero = 1 +# nhzero = 1 nhzero = 0 if nhzero: - luCommand('r1','vtysh -c "show bgp ipv4 vpn"', - 'Distinguisher: *10:1.*5.1.0.0/24 *0.0.0.0 .*5.1.1.0/24 *0.0.0.0 .*99.0.0.1/32 *0.0.0.0 ', - 'pass','vrf->vpn routes') - luCommand('r3','vtysh -c "show bgp ipv4 vpn"', - 'Distinguisher: *10:3.*5.1.0.0/24 *0.0.0.0 .*5.1.1.0/24 *0.0.0.0 .*99.0.0.2/32 *0.0.0.0 ', - 'pass','vrf->vpn routes') + luCommand( + "r1", + 'vtysh -c "show bgp ipv4 vpn"', + "Distinguisher: *10:1.*5.1.0.0/24 *0.0.0.0 .*5.1.1.0/24 *0.0.0.0 .*99.0.0.1/32 *0.0.0.0 ", + "pass", + "vrf->vpn routes", + ) + luCommand( + "r3", + 'vtysh -c "show bgp ipv4 vpn"', + "Distinguisher: *10:3.*5.1.0.0/24 *0.0.0.0 .*5.1.1.0/24 *0.0.0.0 .*99.0.0.2/32 *0.0.0.0 ", + "pass", + "vrf->vpn routes", + ) want = [ - {'rd':'10:41', 'p':'5.1.2.0/24', 'n':'0.0.0.0'}, - {'rd':'10:41', 'p':'5.1.3.0/24', 'n':'0.0.0.0'}, - {'rd':'10:41', 'p':'99.0.0.3/32', 'n':'0.0.0.0'}, - - {'rd':'10:42', 'p':'5.4.2.0/24', 'n':'0.0.0.0'}, - {'rd':'10:42', 'p':'5.4.3.0/24', 'n':'0.0.0.0'}, - {'rd':'10:42', 'p':'99.0.0.4/32', 'n':'0.0.0.0'}, + {"rd": "10:41", "p": "5.1.2.0/24", "n": "0.0.0.0"}, + {"rd": "10:41", "p": "5.1.3.0/24", "n": "0.0.0.0"}, + {"rd": "10:41", "p": "99.0.0.3/32", "n": "0.0.0.0"}, + {"rd": "10:42", "p": "5.4.2.0/24", "n": "0.0.0.0"}, + {"rd": "10:42", "p": "5.4.3.0/24", "n": "0.0.0.0"}, + {"rd": "10:42", "p": "99.0.0.4/32", "n": "0.0.0.0"}, ] - bgpribRequireVpnRoutes('r4','vrf->vpn routes',want) + bgpribRequireVpnRoutes("r4", "vrf->vpn routes", want) else: - luCommand('r1','vtysh -c "show bgp ipv4 vpn"', - r'Distinguisher: *10:1.*5.1.0.0/24 *99.0.0.1\b.*5.1.1.0/24 *99.0.0.1\b.*99.0.0.1/32 *192.168.1.2\b', - 'pass','vrf->vpn routes') - luCommand('r3','vtysh -c "show bgp ipv4 vpn"', - r'Distinguisher: *10:3.*5.1.0.0/24 *99.0.0.2\b.*5.1.1.0/24 *99.0.0.2\b.*99.0.0.2/32 *192.168.1.2\b', - 'pass','vrf->vpn routes') + luCommand( + "r1", + 'vtysh -c "show bgp ipv4 vpn"', + r"Distinguisher: *10:1.*5.1.0.0/24 *99.0.0.1\b.*5.1.1.0/24 *99.0.0.1\b.*99.0.0.1/32 *192.168.1.2\b", + "pass", + "vrf->vpn routes", + ) + luCommand( + "r3", + 'vtysh -c "show bgp ipv4 vpn"', + r"Distinguisher: *10:3.*5.1.0.0/24 *99.0.0.2\b.*5.1.1.0/24 *99.0.0.2\b.*99.0.0.2/32 *192.168.1.2\b", + "pass", + "vrf->vpn routes", + ) want = [ - {'rd':'10:41', 'p':'5.1.2.0/24', 'n':'99.0.0.3'}, - {'rd':'10:41', 'p':'5.1.3.0/24', 'n':'99.0.0.3'}, - {'rd':'10:41', 'p':'99.0.0.3/32', 'n':'192.168.1.2'}, - - {'rd':'10:42', 'p':'5.4.2.0/24', 'n':'99.0.0.4'}, - {'rd':'10:42', 'p':'5.4.3.0/24', 'n':'99.0.0.4'}, - {'rd':'10:42', 'p':'99.0.0.4/32', 'n':'192.168.2.2'}, + {"rd": "10:41", "p": "5.1.2.0/24", "n": "99.0.0.3"}, + {"rd": "10:41", "p": "5.1.3.0/24", "n": "99.0.0.3"}, + {"rd": "10:41", "p": "99.0.0.3/32", "n": "192.168.1.2"}, + {"rd": "10:42", "p": "5.4.2.0/24", "n": "99.0.0.4"}, + {"rd": "10:42", "p": "5.4.3.0/24", "n": "99.0.0.4"}, + {"rd": "10:42", "p": "99.0.0.4/32", "n": "192.168.2.2"}, ] - bgpribRequireVpnRoutes('r4','vrf->vpn routes',want) + bgpribRequireVpnRoutes("r4", "vrf->vpn routes", want) ######################################################################## # PE routers: exporting vrfs set MPLS vrf labels in kernel ######################################################################## -luCommand('r1','vtysh -c "show mpls table"',' 101 *BGP *r1-cust1','pass','vrf labels') -luCommand('r3','vtysh -c "show mpls table"',' 103 *BGP *r3-cust1','pass','vrf labels') -luCommand('r4','vtysh -c "show mpls table"',' 1041 *BGP *r4-cust1 .*1042 *BGP *r4-cust2','pass','vrf labels') +luCommand( + "r1", 'vtysh -c "show mpls table"', " 101 *BGP *r1-cust1", "pass", "vrf labels" +) +luCommand( + "r3", 'vtysh -c "show mpls table"', " 103 *BGP *r3-cust1", "pass", "vrf labels" +) +luCommand( + "r4", + 'vtysh -c "show mpls table"', + " 1041 *BGP *r4-cust1 .*1042 *BGP *r4-cust2", + "pass", + "vrf labels", +) ######################################################################## # Core VPN router: all customer routes ######################################################################## want_rd_routes = [ - {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'}, - {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'}, - {'rd':'10:1', 'p':'99.0.0.1/32', 'n':'1.1.1.1'}, - - {'rd':'10:3', 'p':'5.1.0.0/24', 'n':'3.3.3.3'}, - {'rd':'10:3', 'p':'5.1.0.0/24', 'n':'3.3.3.3'}, - {'rd':'10:3', 'p':'99.0.0.2/32', 'n':'3.3.3.3'}, - - {'rd':'10:41', 'p':'5.1.2.0/24', 'n':'4.4.4.4'}, - {'rd':'10:41', 'p':'5.1.3.0/24', 'n':'4.4.4.4'}, - {'rd':'10:41', 'p':'99.0.0.3/32', 'n':'4.4.4.4'}, - - {'rd':'10:42', 'p':'5.4.2.0/24', 'n':'4.4.4.4'}, - {'rd':'10:42', 'p':'5.4.3.0/24', 'n':'4.4.4.4'}, - {'rd':'10:42', 'p':'99.0.0.4/32', 'n':'4.4.4.4'}, + {"rd": "10:1", "p": "5.1.0.0/24", "n": "1.1.1.1"}, + {"rd": "10:1", "p": "5.1.0.0/24", "n": "1.1.1.1"}, + {"rd": "10:1", "p": "99.0.0.1/32", "n": "1.1.1.1"}, + {"rd": "10:3", "p": "5.1.0.0/24", "n": "3.3.3.3"}, + {"rd": "10:3", "p": "5.1.0.0/24", "n": "3.3.3.3"}, + {"rd": "10:3", "p": "99.0.0.2/32", "n": "3.3.3.3"}, + {"rd": "10:41", "p": "5.1.2.0/24", "n": "4.4.4.4"}, + {"rd": "10:41", "p": "5.1.3.0/24", "n": "4.4.4.4"}, + {"rd": "10:41", "p": "99.0.0.3/32", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "5.4.2.0/24", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "5.4.3.0/24", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "99.0.0.4/32", "n": "4.4.4.4"}, ] -bgpribRequireVpnRoutes('r2','Customer routes in provider vpn core',want_rd_routes) +bgpribRequireVpnRoutes("r2", "Customer routes in provider vpn core", want_rd_routes) ######################################################################## # PE routers: VPN routes from remote customers @@ -165,46 +218,46 @@ bgpribRequireVpnRoutes('r2','Customer routes in provider vpn core',want_rd_route # r1 vtysh -c "show bgp ipv4 vpn" # want_r1_remote_vpn_routes = [ - {'rd':'10:3', 'p':'5.1.0.0/24', 'n':'3.3.3.3'}, - {'rd':'10:3', 'p':'5.1.1.0/24', 'n':'3.3.3.3'}, - {'rd':'10:3', 'p':'99.0.0.2/32', 'n':'3.3.3.3'}, - - {'rd':'10:41', 'p':'5.1.2.0/24', 'n':'4.4.4.4'}, - {'rd':'10:41', 'p':'5.1.3.0/24', 'n':'4.4.4.4'}, - {'rd':'10:41', 'p':'99.0.0.3/32', 'n':'4.4.4.4'}, - - {'rd':'10:42', 'p':'5.4.2.0/24', 'n':'4.4.4.4'}, - {'rd':'10:42', 'p':'5.4.3.0/24', 'n':'4.4.4.4'}, - {'rd':'10:42', 'p':'99.0.0.4/32', 'n':'4.4.4.4'}, + {"rd": "10:3", "p": "5.1.0.0/24", "n": "3.3.3.3"}, + {"rd": "10:3", "p": "5.1.1.0/24", "n": "3.3.3.3"}, + {"rd": "10:3", "p": "99.0.0.2/32", "n": "3.3.3.3"}, + {"rd": "10:41", "p": "5.1.2.0/24", "n": "4.4.4.4"}, + {"rd": "10:41", "p": "5.1.3.0/24", "n": "4.4.4.4"}, + {"rd": "10:41", "p": "99.0.0.3/32", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "5.4.2.0/24", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "5.4.3.0/24", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "99.0.0.4/32", "n": "4.4.4.4"}, ] -bgpribRequireVpnRoutes('r1','Remote Customer routes in R1 vpn',want_r1_remote_vpn_routes) +bgpribRequireVpnRoutes( + "r1", "Remote Customer routes in R1 vpn", want_r1_remote_vpn_routes +) want_r3_remote_vpn_routes = [ - {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'}, - {'rd':'10:1', 'p':'5.1.1.0/24', 'n':'1.1.1.1'}, - {'rd':'10:1', 'p':'99.0.0.1/32', 'n':'1.1.1.1'}, - - {'rd':'10:41', 'p':'5.1.2.0/24', 'n':'4.4.4.4'}, - {'rd':'10:41', 'p':'5.1.3.0/24', 'n':'4.4.4.4'}, - {'rd':'10:41', 'p':'99.0.0.3/32', 'n':'4.4.4.4'}, - - {'rd':'10:42', 'p':'5.4.2.0/24', 'n':'4.4.4.4'}, - {'rd':'10:42', 'p':'5.4.3.0/24', 'n':'4.4.4.4'}, - {'rd':'10:42', 'p':'99.0.0.4/32', 'n':'4.4.4.4'}, + {"rd": "10:1", "p": "5.1.0.0/24", "n": "1.1.1.1"}, + {"rd": "10:1", "p": "5.1.1.0/24", "n": "1.1.1.1"}, + {"rd": "10:1", "p": "99.0.0.1/32", "n": "1.1.1.1"}, + {"rd": "10:41", "p": "5.1.2.0/24", "n": "4.4.4.4"}, + {"rd": "10:41", "p": "5.1.3.0/24", "n": "4.4.4.4"}, + {"rd": "10:41", "p": "99.0.0.3/32", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "5.4.2.0/24", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "5.4.3.0/24", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "99.0.0.4/32", "n": "4.4.4.4"}, ] -bgpribRequireVpnRoutes('r3','Remote Customer routes in R3 vpn',want_r3_remote_vpn_routes) +bgpribRequireVpnRoutes( + "r3", "Remote Customer routes in R3 vpn", want_r3_remote_vpn_routes +) want_r4_remote_vpn_routes = [ - {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'}, - {'rd':'10:1', 'p':'5.1.1.0/24', 'n':'1.1.1.1'}, - {'rd':'10:1', 'p':'99.0.0.1/32', 'n':'1.1.1.1'}, - - {'rd':'10:3', 'p':'5.1.0.0/24', 'n':'3.3.3.3'}, - {'rd':'10:3', 'p':'5.1.1.0/24', 'n':'3.3.3.3'}, - {'rd':'10:3', 'p':'99.0.0.2/32', 'n':'3.3.3.3'}, + {"rd": "10:1", "p": "5.1.0.0/24", "n": "1.1.1.1"}, + {"rd": "10:1", "p": "5.1.1.0/24", "n": "1.1.1.1"}, + {"rd": "10:1", "p": "99.0.0.1/32", "n": "1.1.1.1"}, + {"rd": "10:3", "p": "5.1.0.0/24", "n": "3.3.3.3"}, + {"rd": "10:3", "p": "5.1.1.0/24", "n": "3.3.3.3"}, + {"rd": "10:3", "p": "99.0.0.2/32", "n": "3.3.3.3"}, ] -bgpribRequireVpnRoutes('r4','Remote Customer routes in R4 vpn',want_r4_remote_vpn_routes) - +bgpribRequireVpnRoutes( + "r4", "Remote Customer routes in R4 vpn", want_r4_remote_vpn_routes +) # r1 vtysh -c "show bgp vrf r1-cust1 ipv4" @@ -213,54 +266,58 @@ bgpribRequireVpnRoutes('r4','Remote Customer routes in R4 vpn',want_r4_remote_vp # PE routers: VRFs contain routes from remote customer nets ######################################################################## want_r1_remote_cust1_routes = [ - {'p':'5.1.0.0/24', 'n':'3.3.3.3'}, - {'p':'5.1.1.0/24', 'n':'3.3.3.3'}, - {'p':'99.0.0.2/32', 'n':'3.3.3.3'}, - - {'p':'5.1.2.0/24', 'n':'4.4.4.4'}, - {'p':'5.1.3.0/24', 'n':'4.4.4.4'}, - {'p':'99.0.0.3/32', 'n':'4.4.4.4'}, - - {'p':'5.4.2.0/24', 'n':'4.4.4.4'}, - {'p':'5.4.3.0/24', 'n':'4.4.4.4'}, - {'p':'99.0.0.3/32', 'n':'4.4.4.4'}, + {"p": "5.1.0.0/24", "n": "3.3.3.3"}, + {"p": "5.1.1.0/24", "n": "3.3.3.3"}, + {"p": "99.0.0.2/32", "n": "3.3.3.3"}, + {"p": "5.1.2.0/24", "n": "4.4.4.4"}, + {"p": "5.1.3.0/24", "n": "4.4.4.4"}, + {"p": "99.0.0.3/32", "n": "4.4.4.4"}, + {"p": "5.4.2.0/24", "n": "4.4.4.4"}, + {"p": "5.4.3.0/24", "n": "4.4.4.4"}, + {"p": "99.0.0.3/32", "n": "4.4.4.4"}, ] -bgpribRequireUnicastRoutes('r1','ipv4','r1-cust1','Customer 1 routes in r1 vrf',want_r1_remote_cust1_routes) +bgpribRequireUnicastRoutes( + "r1", "ipv4", "r1-cust1", "Customer 1 routes in r1 vrf", want_r1_remote_cust1_routes +) want_r3_remote_cust1_routes = [ - {'p':'5.1.0.0/24', 'n':'1.1.1.1'}, - {'p':'5.1.1.0/24', 'n':'1.1.1.1'}, - {'p':'99.0.0.1/32', 'n':'1.1.1.1'}, - - {'p':'5.1.2.0/24', 'n':'4.4.4.4'}, - {'p':'5.1.3.0/24', 'n':'4.4.4.4'}, - {'p':'99.0.0.3/32', 'n':'4.4.4.4'}, - - {'p':'5.4.2.0/24', 'n':'4.4.4.4'}, - {'p':'5.4.3.0/24', 'n':'4.4.4.4'}, - {'p':'99.0.0.3/32', 'n':'4.4.4.4'}, + {"p": "5.1.0.0/24", "n": "1.1.1.1"}, + {"p": "5.1.1.0/24", "n": "1.1.1.1"}, + {"p": "99.0.0.1/32", "n": "1.1.1.1"}, + {"p": "5.1.2.0/24", "n": "4.4.4.4"}, + {"p": "5.1.3.0/24", "n": "4.4.4.4"}, + {"p": "99.0.0.3/32", "n": "4.4.4.4"}, + {"p": "5.4.2.0/24", "n": "4.4.4.4"}, + {"p": "5.4.3.0/24", "n": "4.4.4.4"}, + {"p": "99.0.0.3/32", "n": "4.4.4.4"}, ] -bgpribRequireUnicastRoutes('r3','ipv4','r3-cust1','Customer 1 routes in r3 vrf',want_r3_remote_cust1_routes) +bgpribRequireUnicastRoutes( + "r3", "ipv4", "r3-cust1", "Customer 1 routes in r3 vrf", want_r3_remote_cust1_routes +) want_r4_remote_cust1_routes = [ - {'p':'5.1.0.0/24', 'n':'1.1.1.1'}, - {'p':'5.1.1.0/24', 'n':'1.1.1.1'}, - {'p':'5.1.0.0/24', 'n':'3.3.3.3'}, - {'p':'5.1.1.0/24', 'n':'3.3.3.3'}, - {'p':'99.0.0.1/32', 'n':'1.1.1.1'}, - {'p':'99.0.0.2/32', 'n':'3.3.3.3'}, + {"p": "5.1.0.0/24", "n": "1.1.1.1"}, + {"p": "5.1.1.0/24", "n": "1.1.1.1"}, + {"p": "5.1.0.0/24", "n": "3.3.3.3"}, + {"p": "5.1.1.0/24", "n": "3.3.3.3"}, + {"p": "99.0.0.1/32", "n": "1.1.1.1"}, + {"p": "99.0.0.2/32", "n": "3.3.3.3"}, ] -bgpribRequireUnicastRoutes('r4','ipv4','r4-cust1','Customer 1 routes in r4 vrf',want_r4_remote_cust1_routes) +bgpribRequireUnicastRoutes( + "r4", "ipv4", "r4-cust1", "Customer 1 routes in r4 vrf", want_r4_remote_cust1_routes +) want_r4_remote_cust2_routes = [ - {'p':'5.1.0.0/24', 'n':'1.1.1.1'}, - {'p':'5.1.1.0/24', 'n':'1.1.1.1'}, - {'p':'5.1.0.0/24', 'n':'3.3.3.3'}, - {'p':'5.1.1.0/24', 'n':'3.3.3.3'}, - {'p':'99.0.0.1/32', 'n':'1.1.1.1'}, - {'p':'99.0.0.2/32', 'n':'3.3.3.3'}, + {"p": "5.1.0.0/24", "n": "1.1.1.1"}, + {"p": "5.1.1.0/24", "n": "1.1.1.1"}, + {"p": "5.1.0.0/24", "n": "3.3.3.3"}, + {"p": "5.1.1.0/24", "n": "3.3.3.3"}, + {"p": "99.0.0.1/32", "n": "1.1.1.1"}, + {"p": "99.0.0.2/32", "n": "3.3.3.3"}, ] -bgpribRequireUnicastRoutes('r4','ipv4','r4-cust2','Customer 2 routes in r4 vrf',want_r4_remote_cust2_routes) +bgpribRequireUnicastRoutes( + "r4", "ipv4", "r4-cust2", "Customer 2 routes in r4 vrf", want_r4_remote_cust2_routes +) ######################################################################### @@ -270,49 +327,78 @@ bgpribRequireUnicastRoutes('r4','ipv4','r4-cust2','Customer 2 routes in r4 vrf', # r1 vtysh -c "show bgp vrf r1-cust1 ipv4" # r1 vtysh -c "show bgp vrf r1-cust1 ipv4 5.1.2.0/24" -luCommand('ce1','vtysh -c "show bgp ipv4 uni"','10 routes and 10','wait','Local and remote routes', 10) +luCommand( + "ce1", + 'vtysh -c "show bgp ipv4 uni"', + "10 routes and 10", + "wait", + "Local and remote routes", + 10, +) want = [ - {'p':'5.1.2.0/24', 'n':'192.168.1.1'}, - {'p':'5.1.3.0/24', 'n':'192.168.1.1'}, - {'p':'5.4.2.0/24', 'n':'192.168.1.1'}, - {'p':'5.4.3.0/24', 'n':'192.168.1.1'}, + {"p": "5.1.2.0/24", "n": "192.168.1.1"}, + {"p": "5.1.3.0/24", "n": "192.168.1.1"}, + {"p": "5.4.2.0/24", "n": "192.168.1.1"}, + {"p": "5.4.3.0/24", "n": "192.168.1.1"}, ] -bgpribRequireUnicastRoutes('ce1','ipv4','','Cust 1 routes from remote',want) - -luCommand('ce2','vtysh -c "show bgp ipv4 uni"','10 routes and 12','wait','Local and remote routes', 10) +bgpribRequireUnicastRoutes("ce1", "ipv4", "", "Cust 1 routes from remote", want) + +luCommand( + "ce2", + 'vtysh -c "show bgp ipv4 uni"', + "10 routes and 12", + "wait", + "Local and remote routes", + 10, +) want = [ - {'p':'5.1.0.0/24', 'n':'192.168.1.1'}, - {'p':'5.1.1.0/24', 'n':'192.168.1.1'}, - {'p':'5.1.2.0/24', 'n':'192.168.1.1'}, - {'p':'5.1.3.0/24', 'n':'192.168.1.1'}, - {'p':'5.4.2.0/24', 'n':'192.168.1.1'}, - {'p':'5.4.3.0/24', 'n':'192.168.1.1'}, + {"p": "5.1.0.0/24", "n": "192.168.1.1"}, + {"p": "5.1.1.0/24", "n": "192.168.1.1"}, + {"p": "5.1.2.0/24", "n": "192.168.1.1"}, + {"p": "5.1.3.0/24", "n": "192.168.1.1"}, + {"p": "5.4.2.0/24", "n": "192.168.1.1"}, + {"p": "5.4.3.0/24", "n": "192.168.1.1"}, ] -bgpribRequireUnicastRoutes('ce2','ipv4','','Cust 1 routes from remote',want) +bgpribRequireUnicastRoutes("ce2", "ipv4", "", "Cust 1 routes from remote", want) # human readable output for debugging -luCommand('r4','vtysh -c "show bgp vrf r4-cust1 ipv4 uni"') -luCommand('r4','vtysh -c "show bgp vrf r4-cust2 ipv4 uni"') -luCommand('r4','vtysh -c "show bgp ipv4 vpn"') -luCommand('r4','vtysh -c "show ip route vrf r4-cust1"') -luCommand('r4','vtysh -c "show ip route vrf r4-cust2"') - -luCommand('ce3','vtysh -c "show bgp ipv4 uni"','10 routes and 10','wait','Local and remote routes', 10) +luCommand("r4", 'vtysh -c "show bgp vrf r4-cust1 ipv4 uni"') +luCommand("r4", 'vtysh -c "show bgp vrf r4-cust2 ipv4 uni"') +luCommand("r4", 'vtysh -c "show bgp ipv4 vpn"') +luCommand("r4", 'vtysh -c "show ip route vrf r4-cust1"') +luCommand("r4", 'vtysh -c "show ip route vrf r4-cust2"') + +luCommand( + "ce3", + 'vtysh -c "show bgp ipv4 uni"', + "10 routes and 10", + "wait", + "Local and remote routes", + 10, +) # Requires bvl-bug-degenerate-no-label fix (FRR PR #2053) want = [ - {'p':'5.1.0.0/24', 'n':'192.168.1.1'}, - {'p':'5.1.1.0/24', 'n':'192.168.1.1'}, - {'p':'5.4.2.0/24', 'n':'192.168.1.1'}, - {'p':'5.4.3.0/24', 'n':'192.168.1.1'}, + {"p": "5.1.0.0/24", "n": "192.168.1.1"}, + {"p": "5.1.1.0/24", "n": "192.168.1.1"}, + {"p": "5.4.2.0/24", "n": "192.168.1.1"}, + {"p": "5.4.3.0/24", "n": "192.168.1.1"}, ] -bgpribRequireUnicastRoutes('ce3','ipv4','','Cust 1 routes from remote',want) - -luCommand('ce4','vtysh -c "show bgp vrf ce4-cust2 ipv4 uni"','10 routes and 10','wait','Local and remote routes', 10) +bgpribRequireUnicastRoutes("ce3", "ipv4", "", "Cust 1 routes from remote", want) + +luCommand( + "ce4", + 'vtysh -c "show bgp vrf ce4-cust2 ipv4 uni"', + "10 routes and 10", + "wait", + "Local and remote routes", + 10, +) want = [ - {'p':'5.1.0.0/24', 'n':'192.168.2.1'}, - {'p':'5.1.1.0/24', 'n':'192.168.2.1'}, - {'p':'5.1.2.0/24', 'n':'192.168.2.1'}, - {'p':'5.1.3.0/24', 'n':'192.168.2.1'}, + {"p": "5.1.0.0/24", "n": "192.168.2.1"}, + {"p": "5.1.1.0/24", "n": "192.168.2.1"}, + {"p": "5.1.2.0/24", "n": "192.168.2.1"}, + {"p": "5.1.3.0/24", "n": "192.168.2.1"}, ] -bgpribRequireUnicastRoutes('ce4','ipv4','ce4-cust2','Cust 2 routes from remote',want) - +bgpribRequireUnicastRoutes( + "ce4", "ipv4", "ce4-cust2", "Cust 2 routes from remote", want +) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/cleanup_all.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/cleanup_all.py index a721cf21bd..af77ab01c1 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/cleanup_all.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/cleanup_all.py @@ -1,17 +1,120 @@ from lutil import luCommand -luCommand('r1','vtysh -c "clear vrf r1-cust1 prefix 99.0.0.1/32"','.','none','Cleared VRF route') -luCommand('r3','vtysh -c "clear vrf r3-cust1 prefix 99.0.0.2/32"','.','none','Cleared VRF route') -luCommand('r4','vtysh -c "clear vrf r3-cust1 prefix 99.0.0.3/32"','.','none','Cleared VRF route') -luCommand('r1','vtysh -c "show vnc registrations local"','99.0.0.1','fail','Local Registration cleared') -luCommand('r3','vtysh -c "show vnc registrations local"','99.0.0.2','fail','Local Registration cleared') -luCommand('r4','vtysh -c "show vnc registrations local"','99.0.0.3','fail','Local Registration cleared') -luCommand('r1','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated', 10) -luCommand('r2','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Unicast SAFI') -luCommand('r3','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated', 10) -luCommand('r4','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated', 10) -luCommand('ce1','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes', 10) -luCommand('ce2','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes', 10) -luCommand('ce3','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes', 10) -luCommand('r1','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared') -luCommand('r3','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared') -luCommand('r4','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared') + +luCommand( + "r1", + 'vtysh -c "clear vrf r1-cust1 prefix 99.0.0.1/32"', + ".", + "none", + "Cleared VRF route", +) +luCommand( + "r3", + 'vtysh -c "clear vrf r3-cust1 prefix 99.0.0.2/32"', + ".", + "none", + "Cleared VRF route", +) +luCommand( + "r4", + 'vtysh -c "clear vrf r3-cust1 prefix 99.0.0.3/32"', + ".", + "none", + "Cleared VRF route", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations local"', + "99.0.0.1", + "fail", + "Local Registration cleared", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations local"', + "99.0.0.2", + "fail", + "Local Registration cleared", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations local"', + "99.0.0.3", + "fail", + "Local Registration cleared", +) +luCommand( + "r1", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Unicast SAFI updated", + 10, +) +luCommand( + "r2", + 'vtysh -c "show bgp ipv4 uni"', + "No BGP prefixes displayed", + "pass", + "Unicast SAFI", +) +luCommand( + "r3", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Unicast SAFI updated", + 10, +) +luCommand( + "r4", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Unicast SAFI updated", + 10, +) +luCommand( + "ce1", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Local and remote routes", + 10, +) +luCommand( + "ce2", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Local and remote routes", + 10, +) +luCommand( + "ce3", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Local and remote routes", + 10, +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations remote"', + "Prefix ", + "fail", + "Remote Registration cleared", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations remote"', + "Prefix ", + "fail", + "Remote Registration cleared", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations remote"', + "Prefix ", + "fail", + "Remote Registration cleared", +) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/del_bgp_instances.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/del_bgp_instances.py index c25c2d9ec5..477578bdbd 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/del_bgp_instances.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/del_bgp_instances.py @@ -1,7 +1,30 @@ from lutil import luCommand -luCommand('r1','/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5227 vrf r1-cust1" -c "no router bgp 5226"','.','none','Cleared bgp instances') -luCommand('r2','/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5226"','.','none','Cleared bgp instances') -luCommand('r3','/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5227 vrf r3-cust1" -c "no router bgp 5226"','.','none','Cleared bgp instances') -luCommand('r4','/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5228 vrf r4-cust2" -c "no router bgp 5227 vrf r4-cust1" -c "no router bgp 5226"','.','none','Cleared bgp instances') - +luCommand( + "r1", + '/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5227 vrf r1-cust1" -c "no router bgp 5226"', + ".", + "none", + "Cleared bgp instances", +) +luCommand( + "r2", + '/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5226"', + ".", + "none", + "Cleared bgp instances", +) +luCommand( + "r3", + '/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5227 vrf r3-cust1" -c "no router bgp 5226"', + ".", + "none", + "Cleared bgp instances", +) +luCommand( + "r4", + '/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5228 vrf r4-cust2" -c "no router bgp 5227 vrf r4-cust1" -c "no router bgp 5226"', + ".", + "none", + "Cleared bgp instances", +) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/notification_check.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/notification_check.py index d447548783..2b0a85a91a 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/notification_check.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/notification_check.py @@ -1,9 +1,22 @@ from lutil import luCommand -rtrs = ['ce1', 'ce2', 'ce3', 'r1', 'r2', 'r3', 'r4'] + +rtrs = ["ce1", "ce2", "ce3", "r1", "r2", "r3", "r4"] for rtr in rtrs: - ret = luCommand(rtr, 'vtysh -c "show bgp neigh"', 'Notification received .([A-Za-z0-9/ ]*)', 'none', 'collect neighbor stats') + ret = luCommand( + rtr, + 'vtysh -c "show bgp neigh"', + "Notification received .([A-Za-z0-9/ ]*)", + "none", + "collect neighbor stats", + ) found = luLast() if ret != False and found != None: val = found.group(1) - ret = luCommand(rtr, 'vtysh -c "show bgp neigh"', 'Notification received', 'fail', 'Notify RXed! {}'.format(val)) -#done + ret = luCommand( + rtr, + 'vtysh -c "show bgp neigh"', + "Notification received", + "fail", + "Notify RXed! {}".format(val), + ) +# done diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/scale_down.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/scale_down.py index 0279e482ff..b4fa240495 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/scale_down.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/scale_down.py @@ -1,25 +1,87 @@ from lutil import luCommand -ret = luCommand('ce1', 'vtysh -c "show ip route" | grep -c \\ 10\\.\\*/32','(.*)','pass', 'Looking for sharp routes') + +ret = luCommand( + "ce1", + 'vtysh -c "show ip route" | grep -c \\ 10\\.\\*/32', + "(.*)", + "pass", + "Looking for sharp routes", +) found = luLast() if ret != False and found != None: num = int(found.group()) - luCommand('ce3', 'vtysh -c "show bgp sum"', - '.', 'pass', 'See %s sharp routes' % num) + luCommand( + "ce3", 'vtysh -c "show bgp sum"', ".", "pass", "See %s sharp routes" % num + ) if num > 0: - rtrs = ['ce1', 'ce2', 'ce3'] + rtrs = ["ce1", "ce2", "ce3"] for rtr in rtrs: - luCommand(rtr, 'vtysh -c "show bgp ipv4 uni" | grep Display','.', 'none', 'BGP routes pre remove') - luCommand(rtr, 'ip route show | cat -n | tail','.', 'none', 'Linux routes pre remove') - wait = 2*num/500 - luCommand('ce1', 'vtysh -c "sharp remove routes 10.0.0.0 {}"'.format(num),'.','none','Removing {} routes'.format(num)) - luCommand('ce2', 'vtysh -c "sharp remove routes 10.0.0.0 {}"'.format(num),'.','none','Removing {} routes'.format(num)) + luCommand( + rtr, + 'vtysh -c "show bgp ipv4 uni" | grep Display', + ".", + "none", + "BGP routes pre remove", + ) + luCommand( + rtr, + "ip route show | cat -n | tail", + ".", + "none", + "Linux routes pre remove", + ) + wait = 2 * num / 500 + luCommand( + "ce1", + 'vtysh -c "sharp remove routes 10.0.0.0 {}"'.format(num), + ".", + "none", + "Removing {} routes".format(num), + ) + luCommand( + "ce2", + 'vtysh -c "sharp remove routes 10.0.0.0 {}"'.format(num), + ".", + "none", + "Removing {} routes".format(num), + ) for rtr in rtrs: - luCommand(rtr, 'vtysh -c "show bgp ipv4 uni" | grep Display',' 10 route', 'wait', 'BGP routes removed', wait, wait_time=10) - luCommand(rtr, 'vtysh -c "show bgp ipv4 uni"','.', 'none', 'BGP routes post remove') + luCommand( + rtr, + 'vtysh -c "show bgp ipv4 uni" | grep Display', + " 10 route", + "wait", + "BGP routes removed", + wait, + wait_time=10, + ) + luCommand( + rtr, + 'vtysh -c "show bgp ipv4 uni"', + ".", + "none", + "BGP routes post remove", + ) for rtr in rtrs: - luCommand(rtr, 'ip route show | grep -c \\^10\\.','^0$', 'wait', 'Linux routes removed', wait, wait_time=10) - luCommand(rtr, 'ip route show','.', 'none', 'Linux routes post remove') - rtrs = ['r1', 'r3', 'r4'] + luCommand( + rtr, + "ip route show | grep -c \\^10\\.", + "^0$", + "wait", + "Linux routes removed", + wait, + wait_time=10, + ) + luCommand(rtr, "ip route show", ".", "none", "Linux routes post remove") + rtrs = ["r1", "r3", "r4"] for rtr in rtrs: - luCommand(rtr, 'ip route show vrf {}-cust1 | grep -c \\^10\\.'.format(rtr),'^0$','wait','VRF route removed',wait, wait_time=10) -#done + luCommand( + rtr, + "ip route show vrf {}-cust1 | grep -c \\^10\\.".format(rtr), + "^0$", + "wait", + "VRF route removed", + wait, + wait_time=10, + ) +# done diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/scale_up.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/scale_up.py index 4ecaa4c026..3c768640a1 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/scale_up.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/scale_up.py @@ -1,29 +1,38 @@ from lutil import luCommand + num = 50000 -b = int(num/(256*256)) +b = int(num / (256 * 256)) if b > 0: - r = num - b * (256*256) + r = num - b * (256 * 256) else: r = num -c = int(r/256) +c = int(r / 256) if c > 0: - d = r - c * 256 - 1 + d = r - c * 256 - 1 else: d = r -wait = 2*num/1000 +wait = 2 * num / 1000 mem_z = {} mem_b = {} -rtrs = ['ce1', 'ce2', 'ce3', 'r1', 'r2', 'r3', 'r4'] +rtrs = ["ce1", "ce2", "ce3", "r1", "r2", "r3", "r4"] for rtr in rtrs: - mem_z[rtr] = {'value': 0, 'units': 'unknown'} - mem_b[rtr] = {'value': 0, 'units': 'unknown'} - ret = luCommand(rtr, 'vtysh -c "show memory"', 'zebra: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*) .*bgpd: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*)', 'none', 'collect bgpd memory stats') + mem_z[rtr] = {"value": 0, "units": "unknown"} + mem_b[rtr] = {"value": 0, "units": "unknown"} + ret = luCommand( + rtr, + 'vtysh -c "show memory"', + "zebra: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*) .*bgpd: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*)", + "none", + "collect bgpd memory stats", + ) found = luLast() if ret != False and found != None: - mem_z[rtr] = {'value': int(found.group(1)), 'units': found.group(2)} - mem_b[rtr] = {'value': int(found.group(3)), 'units': found.group(4)} + mem_z[rtr] = {"value": int(found.group(1)), "units": found.group(2)} + mem_b[rtr] = {"value": int(found.group(3)), "units": found.group(4)} -luCommand('ce1', 'vtysh -c "show mem"', 'qmem sharpd', 'none','check if sharpd running') +luCommand( + "ce1", 'vtysh -c "show mem"', "qmem sharpd", "none", "check if sharpd running" +) doSharp = False found = luLast() if ret != False and found != None: @@ -31,47 +40,195 @@ if ret != False and found != None: doSharp = True if doSharp != True: - luCommand('ce1', 'vtysh -c "sharp data nexthop"', '.', 'pass','sharpd NOT running, skipping test') + luCommand( + "ce1", + 'vtysh -c "sharp data nexthop"', + ".", + "pass", + "sharpd NOT running, skipping test", + ) else: - luCommand('ce1', 'vtysh -c "sharp install routes 10.0.0.0 nexthop 99.0.0.1 {}"'.format(num),'','pass','Adding {} routes'.format(num)) - luCommand('ce2', 'vtysh -c "sharp install routes 10.0.0.0 nexthop 99.0.0.2 {}"'.format(num),'','pass','Adding {} routes'.format(num)) - rtrs = ['ce1', 'ce2', 'ce3'] + luCommand( + "ce1", + 'vtysh -c "sharp install routes 10.0.0.0 nexthop 99.0.0.1 {}"'.format(num), + "", + "pass", + "Adding {} routes".format(num), + ) + luCommand( + "ce2", + 'vtysh -c "sharp install routes 10.0.0.0 nexthop 99.0.0.2 {}"'.format(num), + "", + "pass", + "Adding {} routes".format(num), + ) + rtrs = ["ce1", "ce2", "ce3"] for rtr in rtrs: - luCommand(rtr, 'vtysh -c "show bgp ipv4 uni 10.{}.{}.{}"'.format(b,c,d), 'Last update:', 'wait', 'RXed last route, 10.{}.{}.{}'.format(b,c,d), wait, wait_time=10) - luCommand(rtr, 'vtysh -c "show bgp ipv4 uni" | grep -c 10\\.\\*/32', str(num), 'wait', 'See all sharp routes in BGP', wait, wait_time=10) - luCommand('r1', 'vtysh -c "show bgp vrf r1-cust1 ipv4 uni 10.{}.{}.{}"'.format(b,c,d),'99.0.0.1','wait','RXed -> 10.{}.{}.{} from CE1'.format(b,c,d), wait, wait_time=10) - luCommand('r3', 'vtysh -c "show bgp vrf r3-cust1 ipv4 uni 10.{}.{}.{}"'.format(b,c,d),'99.0.0.2','wait','RXed -> 10.{}.{}.{} from CE2'.format(b,c,d), wait, wait_time=10) - luCommand('r1', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'99.0.0.1','wait','see VPN safi -> 10.{}.{}.{} from CE1'.format(b,c,d)) - luCommand('r3', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'99.0.0.2','wait','see VPN safi -> 10.{}.{}.{} from CE2'.format(b,c,d)) - luCommand('r3', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'1.1.1.1','wait','see VPN safi -> 10.{}.{}.{} from CE1'.format(b,c,d)) - luCommand('r1', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'3.3.3.3','wait','see VPN safi -> 10.{}.{}.{} from CE2'.format(b,c,d)) - luCommand('r4', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'1.1.1.1','wait','see VPN safi -> 10.{}.{}.{} from CE1'.format(b,c,d)) - luCommand('r4', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'3.3.3.3','wait','see VPN safi -> 10.{}.{}.{} from CE2'.format(b,c,d)) - rtrs = ['ce1', 'ce2', 'ce3'] + luCommand( + rtr, + 'vtysh -c "show bgp ipv4 uni 10.{}.{}.{}"'.format(b, c, d), + "Last update:", + "wait", + "RXed last route, 10.{}.{}.{}".format(b, c, d), + wait, + wait_time=10, + ) + luCommand( + rtr, + 'vtysh -c "show bgp ipv4 uni" | grep -c 10\\.\\*/32', + str(num), + "wait", + "See all sharp routes in BGP", + wait, + wait_time=10, + ) + luCommand( + "r1", + 'vtysh -c "show bgp vrf r1-cust1 ipv4 uni 10.{}.{}.{}"'.format(b, c, d), + "99.0.0.1", + "wait", + "RXed -> 10.{}.{}.{} from CE1".format(b, c, d), + wait, + wait_time=10, + ) + luCommand( + "r3", + 'vtysh -c "show bgp vrf r3-cust1 ipv4 uni 10.{}.{}.{}"'.format(b, c, d), + "99.0.0.2", + "wait", + "RXed -> 10.{}.{}.{} from CE2".format(b, c, d), + wait, + wait_time=10, + ) + luCommand( + "r1", + 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d), + "99.0.0.1", + "wait", + "see VPN safi -> 10.{}.{}.{} from CE1".format(b, c, d), + ) + luCommand( + "r3", + 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d), + "99.0.0.2", + "wait", + "see VPN safi -> 10.{}.{}.{} from CE2".format(b, c, d), + ) + luCommand( + "r3", + 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d), + "1.1.1.1", + "wait", + "see VPN safi -> 10.{}.{}.{} from CE1".format(b, c, d), + ) + luCommand( + "r1", + 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d), + "3.3.3.3", + "wait", + "see VPN safi -> 10.{}.{}.{} from CE2".format(b, c, d), + ) + luCommand( + "r4", + 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d), + "1.1.1.1", + "wait", + "see VPN safi -> 10.{}.{}.{} from CE1".format(b, c, d), + ) + luCommand( + "r4", + 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d), + "3.3.3.3", + "wait", + "see VPN safi -> 10.{}.{}.{} from CE2".format(b, c, d), + ) + rtrs = ["ce1", "ce2", "ce3"] for rtr in rtrs: - luCommand(rtr, 'ip route get 10.{}.{}.{}'.format(b,c,d),'dev','wait','Route to 10.{}.{}.{} available'.format(b,c,d), wait, wait_time=10) - luCommand(rtr, 'ip route show | grep -c \\^10\\.', str(num), 'wait', 'See {} linux routes'.format(num), wait, wait_time=10) + luCommand( + rtr, + "ip route get 10.{}.{}.{}".format(b, c, d), + "dev", + "wait", + "Route to 10.{}.{}.{} available".format(b, c, d), + wait, + wait_time=10, + ) + luCommand( + rtr, + "ip route show | grep -c \\^10\\.", + str(num), + "wait", + "See {} linux routes".format(num), + wait, + wait_time=10, + ) - rtrs = ['r1', 'r3', 'r4'] + rtrs = ["r1", "r3", "r4"] for rtr in rtrs: - luCommand(rtr, 'ip route get vrf {}-cust1 10.{}.{}.{}'.format(rtr,b,c,d),'dev','wait','VRF route available',wait, wait_time=10) - luCommand(rtr, 'ip route show vrf {}-cust1 | grep -c \\^10\\.'.format(rtr), str(num), 'wait','See {} linux routes'.format(num), wait, wait_time=10) - rtrs = ['ce1', 'ce2', 'ce3', 'r1', 'r2', 'r3', 'r4'] + luCommand( + rtr, + "ip route get vrf {}-cust1 10.{}.{}.{}".format(rtr, b, c, d), + "dev", + "wait", + "VRF route available", + wait, + wait_time=10, + ) + luCommand( + rtr, + "ip route show vrf {}-cust1 | grep -c \\^10\\.".format(rtr), + str(num), + "wait", + "See {} linux routes".format(num), + wait, + wait_time=10, + ) + rtrs = ["ce1", "ce2", "ce3", "r1", "r2", "r3", "r4"] for rtr in rtrs: - ret = luCommand(rtr, 'vtysh -c "show memory"', 'zebra: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*) .*bgpd: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*)', 'none', 'collect bgpd memory stats') + ret = luCommand( + rtr, + 'vtysh -c "show memory"', + "zebra: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*) .*bgpd: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*)", + "none", + "collect bgpd memory stats", + ) found = luLast() if ret != False and found != None: val_z = int(found.group(1)) - if mem_z[rtr]['units'] != found.group(2): + if mem_z[rtr]["units"] != found.group(2): val_z *= 1000 - delta_z = val_z - int(mem_z[rtr]['value']) - ave_z = float(delta_z)/float(num) + delta_z = val_z - int(mem_z[rtr]["value"]) + ave_z = float(delta_z) / float(num) val_b = int(found.group(3)) - if mem_b[rtr]['units'] != found.group(4): + if mem_b[rtr]["units"] != found.group(4): val_b *= 1000 - delta_b = val_b - int(mem_b[rtr]['value']) - ave_b = float(delta_b)/float(num) - luCommand(rtr, 'vtysh -c "show thread cpu"', '.', 'pass', 'BGPd heap: {0} {1} --> {2} {3} ({4} {1}/vpn route)'.format(mem_b[rtr]['value'], mem_b[rtr]['units'], found.group(3), found.group(4), round(ave_b,4))) - luCommand(rtr, 'vtysh -c "show thread cpu"', '.', 'pass', 'Zebra heap: {0} {1} --> {2} {3} ({4} {1}/vpn route)'.format(mem_z[rtr]['value'], mem_z[rtr]['units'], found.group(1), found.group(2), round(ave_z,4))) -#done + delta_b = val_b - int(mem_b[rtr]["value"]) + ave_b = float(delta_b) / float(num) + luCommand( + rtr, + 'vtysh -c "show thread cpu"', + ".", + "pass", + "BGPd heap: {0} {1} --> {2} {3} ({4} {1}/vpn route)".format( + mem_b[rtr]["value"], + mem_b[rtr]["units"], + found.group(3), + found.group(4), + round(ave_b, 4), + ), + ) + luCommand( + rtr, + 'vtysh -c "show thread cpu"', + ".", + "pass", + "Zebra heap: {0} {1} --> {2} {3} ({4} {1}/vpn route)".format( + mem_z[rtr]["value"], + mem_z[rtr]["units"], + found.group(1), + found.group(2), + round(ave_z, 4), + ), + ) +# done diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py index 7e36398298..b537735c65 100755 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py @@ -25,119 +25,132 @@ import os import sys import pytest -sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')) +sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")) from lib.ltemplate import * + def test_check_linux_vrf(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/check_linux_vrf.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/check_linux_vrf.py", False, CliOnFail, CheckFunc) + def test_adjacencies(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' - ltemplateTest('scripts/adjacencies.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' + ltemplateTest("scripts/adjacencies.py", False, CliOnFail, CheckFunc) + def test_notification_check(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/notification_check.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/notification_check.py", False, CliOnFail, CheckFunc) + def SKIP_test_add_routes(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' - ltemplateTest('scripts/add_routes.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' + ltemplateTest("scripts/add_routes.py", False, CliOnFail, CheckFunc) + def test_check_routes(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' - ltemplateTest('scripts/check_routes.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' + ltemplateTest("scripts/check_routes.py", False, CliOnFail, CheckFunc) + -#manual data path setup test - remove once have bgp/zebra vrf path working +# manual data path setup test - remove once have bgp/zebra vrf path working def test_check_linux_mpls(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/check_linux_mpls.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/check_linux_mpls.py", False, CliOnFail, CheckFunc) + def test_notification_check(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/notification_check.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/notification_check.py", False, CliOnFail, CheckFunc) + def test_check_scale_up(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/scale_up.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/scale_up.py", False, CliOnFail, CheckFunc) + def test_notification_check(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/notification_check.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/notification_check.py", False, CliOnFail, CheckFunc) + def test_check_scale_down(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/scale_down.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/scale_down.py", False, CliOnFail, CheckFunc) + def test_notification_check(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/notification_check.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/notification_check.py", False, CliOnFail, CheckFunc) + def SKIP_test_cleanup_all(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' - ltemplateTest('scripts/cleanup_all.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' + ltemplateTest("scripts/cleanup_all.py", False, CliOnFail, CheckFunc) + -if __name__ == '__main__': +if __name__ == "__main__": retval = pytest.main(["-s"]) sys.exit(retval) diff --git a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py index 83ec1e784d..334aaebb4b 100755 --- a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py +++ b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py @@ -57,17 +57,19 @@ from lib.topogen import Topogen, get_topogen from mininet.topo import Topo from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, reset_config_on_routers, - create_route_maps, create_bgp_community_lists, - create_prefix_lists, verify_bgp_community, step, - check_address_types + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + create_route_maps, + create_bgp_community_lists, + create_prefix_lists, + verify_bgp_community, + step, + check_address_types, ) from lib.topolog import logger -from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp_and_verify -) +from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify from lib.topojson import build_topo_from_json, build_config_from_json # Save the Current Working Directory to find configuration files. @@ -87,7 +89,7 @@ except IOError: bgp_convergence = False NETWORK = { "ipv4": ["200.50.2.0", "200.50.2.1", "200.50.2.0"], - "ipv6": ["1::1", "1::2", "1::0"] + "ipv6": ["1::1", "1::2", "1::0"], } MASK = {"ipv4": "32", "ipv6": "128"} NET_MASK = {"ipv4": "24", "ipv6": "120"} @@ -104,9 +106,8 @@ LARGE_COMM = { "pf_list_1": "0:0:1 0:0:10 0:0:100", "pf_list_2": "0:0:2 0:0:20 0:0:200", "agg_1": "0:0:1 0:0:2 0:0:10 0:0:20 0:0:100 0:0:200 2:1:1 " - "2:2:1 2:3:1 2:4:1 2:5:1", - "agg_2": "0:0:2 0:0:20 0:0:200 2:1:1 " - "2:2:1 2:3:1 2:4:1 2:5:1" + "2:2:1 2:3:1 2:4:1 2:5:1", + "agg_2": "0:0:2 0:0:20 0:0:200 2:1:1 " "2:2:1 2:3:1 2:4:1 2:5:1", } STANDARD_COMM = { "r1": "1:1 1:2 1:3 1:4 1:5", @@ -115,7 +116,7 @@ STANDARD_COMM = { "pf_list_1": "0:1 0:10 0:100", "pf_list_2": "0:2 0:20 0:200", "agg_1": "0:1 0:2 0:10 0:20 0:100 0:200 2:1 2:2 2:3 2:4 2:5", - "agg_2": "0:2 0:20 0:200 2:1 2:2 2:3 2:4 2:5" + "agg_2": "0:2 0:20 0:200 2:1 2:2 2:3 2:4 2:5", } @@ -169,8 +170,9 @@ def setup_module(mod): ##tgen.mininet_cli() # Api call verify whether BGP is converged bgp_convergence = verify_bgp_convergence(tgen, topo) - assert bgp_convergence is True, ("setup_module :Failed \n Error:" - " {}".format(bgp_convergence)) + assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format( + bgp_convergence + ) ADDR_TYPES = check_address_types() logger.info("Running setup_module() done") @@ -190,8 +192,9 @@ def teardown_module(): # Stop toplogy and Remove tmp files tgen.stop_topology() - logger.info("Testsuite end time: {}". - format(time.asctime(time.localtime(time.time())))) + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) logger.info("=" * 40) @@ -206,13 +209,9 @@ def config_router_r1(tgen, topo, tc_name): "action": "permit", "seq_id": "10", "set": { - "large_community": { - "num": LARGE_COMM["r1"] - }, - "community": { - "num": STANDARD_COMM["r1"] - } - } + "large_community": {"num": LARGE_COMM["r1"]}, + "community": {"num": STANDARD_COMM["r1"]}, + }, } ] } @@ -221,8 +220,7 @@ def config_router_r1(tgen, topo, tc_name): step("Configuring LC1 on r1") result = create_route_maps(tgen, input_dict_1) - assert result is True, "Test case {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) # Configure neighbor for route map input_dict_2 = { @@ -233,68 +231,64 @@ def config_router_r1(tgen, topo, tc_name): "unicast": { "advertise_networks": [ { - "network": "%s/%s" % ( - NETWORK["ipv4"][0], MASK["ipv4"]), - "no_of_network": 4 + "network": "%s/%s" + % (NETWORK["ipv4"][0], MASK["ipv4"]), + "no_of_network": 4, } ], "neighbor": { "r2": { "dest_link": { "r1-link1": { - "route_maps": [{ - "name": "LC1", - "direction": "out" - }] + "route_maps": [ + {"name": "LC1", "direction": "out"} + ] } } }, "r3": { "dest_link": { "r1-link1": { - "route_maps": [{ - "name": "LC1", - "direction": "out" - }] + "route_maps": [ + {"name": "LC1", "direction": "out"} + ] } } - } - } + }, + }, } }, "ipv6": { "unicast": { "advertise_networks": [ { - "network": "%s/%s" % ( - NETWORK["ipv6"][0], MASK["ipv6"]), - "no_of_network": 4 + "network": "%s/%s" + % (NETWORK["ipv6"][0], MASK["ipv6"]), + "no_of_network": 4, } ], "neighbor": { "r2": { "dest_link": { "r1-link1": { - "route_maps": [{ - "name": "LC1", - "direction": "out" - }] + "route_maps": [ + {"name": "LC1", "direction": "out"} + ] } } }, "r3": { "dest_link": { "r1-link1": { - "route_maps": [{ - "name": "LC1", - "direction": "out" - }] + "route_maps": [ + {"name": "LC1", "direction": "out"} + ] } } - } - } + }, + }, } - } + }, } } } @@ -302,8 +296,7 @@ def config_router_r1(tgen, topo, tc_name): step("Applying LC1 on r1 neighbors and advertising networks") result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Test case {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) CONFIG_ROUTER_R1 = True @@ -319,13 +312,9 @@ def config_router_r2(tgen, topo, tc_name): "action": "permit", "seq_id": "10", "set": { - "large_community": { - "num": LARGE_COMM["r2"] - }, - "community": { - "num": STANDARD_COMM["r2"] - } - } + "large_community": {"num": LARGE_COMM["r2"]}, + "community": {"num": STANDARD_COMM["r2"]}, + }, } ] } @@ -334,8 +323,7 @@ def config_router_r2(tgen, topo, tc_name): step("Configuring route-maps LC2 on r2") result = create_route_maps(tgen, input_dict) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) input_dict_1 = { "r2": { @@ -347,10 +335,9 @@ def config_router_r2(tgen, topo, tc_name): "r4": { "dest_link": { "r2-link1": { - "route_maps": [{ - "name": "LC2", - "direction": "out" - }] + "route_maps": [ + {"name": "LC2", "direction": "out"} + ] } } } @@ -363,16 +350,15 @@ def config_router_r2(tgen, topo, tc_name): "r4": { "dest_link": { "r2-link1": { - "route_maps": [{ - "name": "LC2", - "direction": "out" - }] + "route_maps": [ + {"name": "LC2", "direction": "out"} + ] } } } } } - } + }, } } } @@ -380,8 +366,7 @@ def config_router_r2(tgen, topo, tc_name): step("Applying LC2 on r2 neighbors in out direction") result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) CONFIG_ROUTER_R2 = True @@ -399,13 +384,13 @@ def config_router_additive(tgen, topo, tc_name): "set": { "large_community": { "num": LARGE_COMM["r2"], - "action": "additive" + "action": "additive", }, "community": { "num": STANDARD_COMM["r2"], - "action": "additive" - } - } + "action": "additive", + }, + }, } ] } @@ -414,8 +399,7 @@ def config_router_additive(tgen, topo, tc_name): step("Configuring LC2 with community attributes as additive") result = create_route_maps(tgen, input_dict) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) # tgen.mininet_cli() CONFIG_ROUTER_ADDITIVE = True @@ -434,47 +418,41 @@ def config_for_as_path(tgen, topo, tc_name): "pf_list_1": [ { "seqid": "10", - "network": "%s/%s" % (NETWORK["ipv4"][0], - MASK["ipv4"]), - "action": "permit" + "network": "%s/%s" % (NETWORK["ipv4"][0], MASK["ipv4"]), + "action": "permit", } ], "pf_list_2": [ { "seqid": "10", - "network": "%s/%s" % (NETWORK["ipv4"][1], - MASK["ipv4"]), - "action": "permit" + "network": "%s/%s" % (NETWORK["ipv4"][1], MASK["ipv4"]), + "action": "permit", } - ] + ], }, "ipv6": { "pf_list_3": [ { "seqid": "10", - "network": "%s/%s" % (NETWORK["ipv6"][0], - MASK["ipv6"]), - "action": "permit" + "network": "%s/%s" % (NETWORK["ipv6"][0], MASK["ipv6"]), + "action": "permit", } ], "pf_list_4": [ { "seqid": "10", - "network": "%s/%s" % (NETWORK["ipv6"][1], - MASK["ipv6"]), - "action": "permit" + "network": "%s/%s" % (NETWORK["ipv6"][1], MASK["ipv6"]), + "action": "permit", } - ] - } - + ], + }, } } } step("Configuring prefix-lists on r1 to filter networks") result = create_prefix_lists(tgen, input_dict_1) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) input_dict_2 = { "r1": { @@ -483,81 +461,50 @@ def config_for_as_path(tgen, topo, tc_name): { "action": "permit", "seq_id": 10, - "match": { - "ipv4": { - "prefix_lists": "pf_list_1" - } - }, + "match": {"ipv4": {"prefix_lists": "pf_list_1"}}, "set": { - "large_community": { - "num": LARGE_COMM["pf_list_1"] - }, - "community": { - "num": STANDARD_COMM["pf_list_1"] - } - } + "large_community": {"num": LARGE_COMM["pf_list_1"]}, + "community": {"num": STANDARD_COMM["pf_list_1"]}, + }, }, { "action": "permit", "seq_id": 20, - "match": { - "ipv6": { - "prefix_lists": "pf_list_3" - } - }, + "match": {"ipv6": {"prefix_lists": "pf_list_3"}}, "set": { - "large_community": { - "num": LARGE_COMM["pf_list_1"] - }, - "community": { - "num": STANDARD_COMM["pf_list_1"] - } - } + "large_community": {"num": LARGE_COMM["pf_list_1"]}, + "community": {"num": STANDARD_COMM["pf_list_1"]}, + }, }, { "action": "permit", "seq_id": 30, - "match": { - "ipv4": { - "prefix_lists": "pf_list_2" - } - }, + "match": {"ipv4": {"prefix_lists": "pf_list_2"}}, "set": { - "large_community": { - "num": LARGE_COMM["pf_list_2"] - }, - "community": { - "num": STANDARD_COMM["pf_list_2"] - } - } + "large_community": {"num": LARGE_COMM["pf_list_2"]}, + "community": {"num": STANDARD_COMM["pf_list_2"]}, + }, }, { "action": "permit", "seq_id": 40, - "match": { - "ipv6": { - "prefix_lists": "pf_list_4" - } - }, + "match": {"ipv6": {"prefix_lists": "pf_list_4"}}, "set": { - "large_community": { - "num": LARGE_COMM["pf_list_2"] - }, - "community": { - "num": STANDARD_COMM["pf_list_2"] - } - } - } + "large_community": {"num": LARGE_COMM["pf_list_2"]}, + "community": {"num": STANDARD_COMM["pf_list_2"]}, + }, + }, ] } } } - step("Applying prefix-lists match in route-map LC1 on r1. Setting" - " community attritbute for filtered networks") + step( + "Applying prefix-lists match in route-map LC1 on r1. Setting" + " community attritbute for filtered networks" + ) result = create_route_maps(tgen, input_dict_2) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) config_router_additive(tgen, topo, tc_name) @@ -569,22 +516,21 @@ def config_for_as_path(tgen, topo, tc_name): "action": "permit", "name": "ANY", "value": LARGE_COMM["pf_list_1"], - "large": True + "large": True, }, { "community_type": "standard", "action": "permit", "name": "ANY", "value": STANDARD_COMM["pf_list_1"], - } + }, ] } } step("Configuring bgp community lists on r4") result = create_bgp_community_lists(tgen, input_dict_3) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) input_dict_4 = { "r4": { @@ -595,14 +541,9 @@ def config_for_as_path(tgen, topo, tc_name): "seq_id": "10", "match": { "large_community_list": {"id": "ANY"}, - "community_list": {"id": "ANY"} + "community_list": {"id": "ANY"}, }, - "set": { - "aspath": { - "as_num": "4000000", - "as_action": "prepend" - } - } + "set": {"path": {"as_num": "4000000", "as_action": "prepend"}}, } ] } @@ -611,8 +552,7 @@ def config_for_as_path(tgen, topo, tc_name): step("Applying community list on route-map on r4") result = create_route_maps(tgen, input_dict_4) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) input_dict_5 = { "r4": { @@ -624,10 +564,9 @@ def config_for_as_path(tgen, topo, tc_name): "r5": { "dest_link": { "r4-link1": { - "route_maps": [{ - "name": "LC4", - "direction": "out" - }] + "route_maps": [ + {"name": "LC4", "direction": "out"} + ] } } } @@ -640,16 +579,15 @@ def config_for_as_path(tgen, topo, tc_name): "r5": { "dest_link": { "r4-link1": { - "route_maps": [{ - "name": "LC4", - "direction": "out" - }] + "route_maps": [ + {"name": "LC4", "direction": "out"} + ] } } } } } - } + }, } } } @@ -657,8 +595,7 @@ def config_for_as_path(tgen, topo, tc_name): step("Applying route-map LC4 out from r4 to r5 ") result = create_router_bgp(tgen, topo, input_dict_5) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) ##################################################### @@ -690,8 +627,8 @@ def test_large_community_set(request): "seq_id": "10", "set": { "large_community": {"num": LARGE_COMM["r1"]}, - "community": {"num": STANDARD_COMM["r1"]} - } + "community": {"num": STANDARD_COMM["r1"]}, + }, } ] } @@ -700,8 +637,7 @@ def test_large_community_set(request): step("Trying to set bgp communities") result = create_route_maps(tgen, input_dict) - assert result is True, "Test case {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -728,15 +664,15 @@ def test_large_community_advertise(request): } for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]], - input_dict) + result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]], input_dict) assert result is True, "Test case {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - result = verify_bgp_community(tgen, adt, "r3", [NETWORK[adt][0]], - input_dict) + result = verify_bgp_community(tgen, adt, "r3", [NETWORK[adt][0]], input_dict) assert result is True, "Test case {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -759,14 +695,14 @@ def test_large_community_transitive(request): input_dict_1 = { "largeCommunity": LARGE_COMM["r1"], - "community": STANDARD_COMM["r1"] + "community": STANDARD_COMM["r1"], } for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]], - input_dict_1) + result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]], input_dict_1) assert result is True, "Test case {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -790,14 +726,14 @@ def test_large_community_override(request): input_dict_3 = { "largeCommunity": LARGE_COMM["r2"], - "community": STANDARD_COMM["r2"] + "community": STANDARD_COMM["r2"], } for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][1]], - input_dict_3) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][1]], input_dict_3) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) @@ -823,14 +759,14 @@ def test_large_community_additive(request): input_dict_1 = { "largeCommunity": "%s %s" % (LARGE_COMM["r1"], LARGE_COMM["r2"]), - "community": "%s %s" % (STANDARD_COMM["r1"], STANDARD_COMM["r2"]) + "community": "%s %s" % (STANDARD_COMM["r1"], STANDARD_COMM["r2"]), } for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]], - input_dict_1) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]], input_dict_1) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) @@ -853,30 +789,28 @@ def test_large_community_match_as_path(request): config_for_as_path(tgen, topo, tc_name) input_dict = { - "largeCommunity": "%s %s" % ( - LARGE_COMM["pf_list_1"], LARGE_COMM["r2"]), - "community": "%s %s" % ( - STANDARD_COMM["pf_list_1"], STANDARD_COMM["r2"]), + "largeCommunity": "%s %s" % (LARGE_COMM["pf_list_1"], LARGE_COMM["r2"]), + "community": "%s %s" % (STANDARD_COMM["pf_list_1"], STANDARD_COMM["r2"]), } input_dict_1 = { - "largeCommunity": "%s %s" % ( - LARGE_COMM["pf_list_2"], LARGE_COMM["r2"]), - "community": "%s %s" % ( - STANDARD_COMM["pf_list_2"], STANDARD_COMM["r2"]), + "largeCommunity": "%s %s" % (LARGE_COMM["pf_list_2"], LARGE_COMM["r2"]), + "community": "%s %s" % (STANDARD_COMM["pf_list_2"], STANDARD_COMM["r2"]), } for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r5", [NETWORK[adt][0]], - input_dict) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_community(tgen, adt, "r5", [NETWORK[adt][0]], input_dict) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) - result = verify_bgp_community(tgen, adt, "r5", [NETWORK[adt][1]], - input_dict_1, expected=False) + result = verify_bgp_community( + tgen, adt, "r5", [NETWORK[adt][1]], input_dict_1, expected=False + ) - assert result is not True, "Test case {} : Should fail \n Error: {}". \ - format(tc_name, result) + assert result is not True, "Test case {} : Should fail \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) @@ -909,22 +843,22 @@ def test_large_community_match_all(request): "action": "permit", "name": "ANY", "value": "1:1:1", - "large": True + "large": True, }, { "community_type": "standard", "action": "permit", "name": "ALL", "value": "1:1:1 1:2:1 1:3:1 1:4:1 1:5:1 2:1:1 2:2:1", - "large": True + "large": True, }, { "community_type": "expanded", "action": "permit", "name": "EXP_ALL", "value": "1:1:1 1:2:1 1:3:1 1:4:1 1:5:1 2:[1-5]:1", - "large": True - } + "large": True, + }, ] } } @@ -932,8 +866,7 @@ def test_large_community_match_all(request): step("Create bgp community lists for ANY, EXACT and EXP_ALL match") result = create_bgp_community_lists(tgen, input_dict_1) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) input_dict_2 = { "r4": { @@ -942,18 +875,18 @@ def test_large_community_match_all(request): { "action": "permit", "seq_id": "10", - "match": {"large-community-list": {"id": "ANY"}} + "match": {"large-community-list": {"id": "ANY"}}, }, { "action": "permit", "seq_id": "20", - "match": {"large-community-list": {"id": "EXACT"}} + "match": {"large-community-list": {"id": "EXACT"}}, }, { "action": "permit", "seq_id": "30", - "match": {"large-community-list": {"id": "EXP_ALL"}} - } + "match": {"large-community-list": {"id": "EXP_ALL"}}, + }, ] } } @@ -961,8 +894,7 @@ def test_large_community_match_all(request): step("Applying bgp community lits on LC4 route-map") result = create_route_maps(tgen, input_dict_2) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) input_dict_3 = { "r4": { @@ -974,10 +906,9 @@ def test_large_community_match_all(request): "r5": { "dest_link": { "r4-link1": { - "route_maps": [{ - "name": "LC4", - "direction": "in" - }] + "route_maps": [ + {"name": "LC4", "direction": "in"} + ] } } } @@ -990,16 +921,15 @@ def test_large_community_match_all(request): "r5": { "dest_link": { "r4-link1": { - "route_maps": [{ - "name": "LC4", - "direction": "in" - }] + "route_maps": [ + {"name": "LC4", "direction": "in"} + ] } } } } } - } + }, } } } @@ -1008,24 +938,23 @@ def test_large_community_match_all(request): step("Apply route-mpa LC4 on r4 for r2 neighbor, direction 'in'") result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) input_dict_4 = { "largeCommunity": "1:1:1 1:2:1 1:3:1 1:4:1 1:5:1 2:1:1 2:2:1 2:3:1 " - "2:4:1 2:5:1" + "2:4:1 2:5:1" } for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]], - input_dict_4) - assert result is True, "Test case {} : Should fail \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]], input_dict_4) + assert result is True, "Test case {} : Should fail \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) -#@pytest.mark.skip(reason="as-set not working for ipv6") +# @pytest.mark.skip(reason="as-set not working for ipv6") def test_large_community_aggregate_network(request): """ Restart router and check if large community and community @@ -1047,7 +976,7 @@ def test_large_community_aggregate_network(request): input_dict = { "community": STANDARD_COMM["agg_1"], - "largeCommunity": LARGE_COMM["agg_1"] + "largeCommunity": LARGE_COMM["agg_1"], } input_dict_1 = { @@ -1058,9 +987,9 @@ def test_large_community_aggregate_network(request): "unicast": { "aggregate_address": [ { - "network": "%s/%s" % ( - NETWORK["ipv4"][2], NET_MASK["ipv4"]), - "as_set": True + "network": "%s/%s" + % (NETWORK["ipv4"][2], NET_MASK["ipv4"]), + "as_set": True, } ] } @@ -1069,13 +998,13 @@ def test_large_community_aggregate_network(request): "unicast": { "aggregate_address": [ { - "network": "%s/%s" % ( - NETWORK["ipv6"][2], NET_MASK["ipv6"]), - "as_set": True + "network": "%s/%s" + % (NETWORK["ipv6"][2], NET_MASK["ipv6"]), + "as_set": True, } ] } - } + }, } } } @@ -1083,16 +1012,15 @@ def test_large_community_aggregate_network(request): step("Configuring aggregate address as-set on r2") result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Test case {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r4", - ["%s/%s" % (NETWORK[adt][2], - NET_MASK[adt])], - input_dict) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_community( + tgen, adt, "r4", ["%s/%s" % (NETWORK[adt][2], NET_MASK[adt])], input_dict + ) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) input_dict_2 = { "r1": { @@ -1102,10 +1030,10 @@ def test_large_community_aggregate_network(request): "unicast": { "advertise_networks": [ { - "network": "%s/%s" % ( - NETWORK["ipv4"][0], MASK["ipv4"]), + "network": "%s/%s" + % (NETWORK["ipv4"][0], MASK["ipv4"]), "no_of_network": 1, - "delete": True + "delete": True, } ] } @@ -1114,14 +1042,14 @@ def test_large_community_aggregate_network(request): "unicast": { "advertise_networks": [ { - "network": "%s/%s" % ( - NETWORK["ipv6"][0], MASK["ipv6"]), + "network": "%s/%s" + % (NETWORK["ipv6"][0], MASK["ipv6"]), "no_of_network": 1, - "delete": True + "delete": True, } ] } - } + }, } } } @@ -1129,22 +1057,21 @@ def test_large_community_aggregate_network(request): step("Stop advertising one of the networks") result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Test case {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) input_dict_3 = { "community": STANDARD_COMM["agg_2"], - "largeCommunity": LARGE_COMM["agg_2"] + "largeCommunity": LARGE_COMM["agg_2"], } for adt in ADDR_TYPES: step("Verifying bgp community values on r5 is also modified") - result = verify_bgp_community(tgen, adt, "r4", - ["%s/%s" % (NETWORK[adt][2], - NET_MASK[adt])], - input_dict_3) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_community( + tgen, adt, "r4", ["%s/%s" % (NETWORK[adt][2], NET_MASK[adt])], input_dict_3 + ) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) @@ -1168,7 +1095,7 @@ def test_large_community_boundary_values(request): "community_type": "standard", "action": "permit", "name": "ANY", - "value": "0:-1" + "value": "0:-1", } ] } @@ -1176,8 +1103,9 @@ def test_large_community_boundary_values(request): step("Checking boundary value for community 0:-1") result = create_bgp_community_lists(tgen, input_dict) - assert result is not True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is not True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) step("Checking community attribute 0:65536") input_dict_2 = { @@ -1187,7 +1115,7 @@ def test_large_community_boundary_values(request): "community_type": "standard", "action": "permit", "name": "ANY", - "value": "0:65536" + "value": "0:65536", } ] } @@ -1195,8 +1123,9 @@ def test_large_community_boundary_values(request): step("Checking boundary value for community 0:65536") result = create_bgp_community_lists(tgen, input_dict_2) - assert result is not True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is not True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) step("Checking boundary value for community 0:4294967296") input_dict_3 = { @@ -1207,15 +1136,16 @@ def test_large_community_boundary_values(request): "action": "permit", "name": "ANY", "value": "0:4294967296", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_3) - assert result is not True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is not True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) step("Checking boundary value for community 0:-1:1") input_dict_4 = { @@ -1226,15 +1156,16 @@ def test_large_community_boundary_values(request): "action": "permit", "name": "ANY", "value": "0:-1:1", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_4) - assert result is not True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is not True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) def test_large_community_after_clear_bgp(request): @@ -1253,25 +1184,22 @@ def test_large_community_after_clear_bgp(request): reset_config_on_routers(tgen) config_router_r1(tgen, topo, tc_name) - input_dict = { - "largeCommunity": LARGE_COMM["r1"], - "community": STANDARD_COMM["r1"] - } + input_dict = {"largeCommunity": LARGE_COMM["r1"], "community": STANDARD_COMM["r1"]} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]], - input_dict) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]], input_dict) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) step("Clearing BGP on r1") clear_bgp_and_verify(tgen, topo, "r1") for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]], - input_dict) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]], input_dict) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) diff --git a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py index cba20551cd..502a9a9ec4 100755 --- a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py +++ b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py @@ -3,7 +3,7 @@ # # Copyright (c) 2019 by VMware, Inc. ("VMware") # Used Copyright (c) 2018 by Network Device Education Foundation, -#Inc. ("NetDEF") in this file. +# Inc. ("NetDEF") in this file. # # Permission to use, copy, modify, and/or distribute this software # for any purpose with or without fee is hereby granted, provided @@ -77,19 +77,23 @@ from lib.topogen import Topogen, get_topogen from mininet.topo import Topo from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, reset_config_on_routers, - create_route_maps, create_bgp_community_lists, - create_prefix_lists, verify_bgp_community, step, - verify_create_community_list, delete_route_maps, - verify_route_maps, create_static_routes, - check_address_types + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + create_route_maps, + create_bgp_community_lists, + create_prefix_lists, + verify_bgp_community, + step, + verify_create_community_list, + delete_route_maps, + verify_route_maps, + create_static_routes, + check_address_types, ) from lib.topolog import logger -from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp_and_verify -) +from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology and configuration creation @@ -121,6 +125,7 @@ class GenerateTopo(Topo): # Building topology from json file build_topo_from_json(tgen, topo) + def setup_module(mod): """ Sets up the pytest environment @@ -130,7 +135,7 @@ def setup_module(mod): testsuite_run_time = time.asctime(time.localtime(time.time())) logger.info("Testsuite start time: {}".format(testsuite_run_time)) - logger.info("="*40) + logger.info("=" * 40) logger.info("Running setup_module to create topology") @@ -155,8 +160,9 @@ def setup_module(mod): # Api call verify whether BGP is converged # Ipv4 bgp_convergence = verify_bgp_convergence(tgen, topo) - assert bgp_convergence is True, ("setup_module :Failed \n Error:" - " {}".format(bgp_convergence)) + assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format( + bgp_convergence + ) ADDR_TYPES = check_address_types() logger.info("Running setup_module() done") @@ -176,9 +182,11 @@ def teardown_module(mod): # Stop toplogy and Remove tmp files tgen.stop_topology() - logger.info("Testsuite end time: {}".\ - format(time.asctime(time.localtime(time.time())))) - logger.info("="*40) + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + ##################################################### # @@ -213,26 +221,24 @@ def test_create_bgp_standard_large_community_list(request): "action": "permit", "name": "LC_1_STD", "value": "2:1:1 2:1:2 1:2:3", - "large": True + "large": True, }, { "community_type": "standard", "action": "permit", "name": "LC_2_STD", "value": "3:1:1 3:1:2", - "large": True - } + "large": True, + }, ] } } result = create_bgp_community_lists(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify BGP large community is created") result = verify_create_community_list(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create srtandard large community list with in-correct values") input_dict = { @@ -243,20 +249,18 @@ def test_create_bgp_standard_large_community_list(request): "action": "permit", "name": "LC_1_STD_ERR", "value": "0:0:0", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) ## TODO should fail step("Verify BGP large community is created") result = verify_create_community_list(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -288,19 +292,17 @@ def test_create_bgp_expanded_large_community_list(request): "action": "permit", "name": "LC_1_EXP", "value": "1:1:200 1:2:* 3:2:1", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify BGP large community is created") result = verify_create_community_list(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -331,14 +333,13 @@ def test_modify_large_community_lists_referenced_by_rmap(request): "action": "permit", "name": "LC_DEL", "value": "1:2:1 1:3:1 2:1:1 2:2:2 3:3:3", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create route map") input_dict_2 = { @@ -351,9 +352,9 @@ def test_modify_large_community_lists_referenced_by_rmap(request): "set": { "large_community": { "num": "1:2:1 1:3:1 2:10:1 3:3:3 4:4:4 5:5:5", - "action": "additive" + "action": "additive", } - } + }, } ] } @@ -364,20 +365,14 @@ def test_modify_large_community_lists_referenced_by_rmap(request): { "action": "permit", "seq_id": "10", - "set": { - "large_comm_list": { - "id": "LC_DEL", - "delete": True - } - } + "set": {"large_comm_list": {"id": "LC_DEL", "delete": True}}, } ] } - } + }, } result = create_route_maps(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map and advertise networks") input_dict_3 = { @@ -386,42 +381,42 @@ def test_modify_large_community_lists_referenced_by_rmap(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ], + "advertise_networks": [{"network": "200.50.2.0/32"}], "neighbor": { "r2": { "dest_link": { "r1": { - "route_maps": [{ - "name": "RM_R2_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R2_OUT", + "direction": "out", + } + ] } } } - } + }, } }, "ipv6": { "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ], + "advertise_networks": [{"network": "1::1/128"}], "neighbor": { "r2": { "dest_link": { "r1": { - "route_maps": [{ - "name": "RM_R2_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R2_OUT", + "direction": "out", + } + ] } } } - } + }, } - } + }, } } }, @@ -434,10 +429,9 @@ def test_modify_large_community_lists_referenced_by_rmap(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } @@ -450,35 +444,31 @@ def test_modify_large_community_lists_referenced_by_rmap(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify Community-list") dut = "r4" - input_dict_4 = { - "largeCommunity": "2:10:1 4:4:4 5:5:5" - } + input_dict_4 = {"largeCommunity": "2:10:1 4:4:4 5:5:5"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_4) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -511,17 +501,16 @@ def test_large_community_lists_with_rmap_apply_and_remove(request): "set": { "large_community": { "num": "200:200:1 200:200:10 200:200:20000", - "action": "additive" + "action": "additive", } - } + }, } ] } } } result = create_route_maps(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map and advertise networks") input_dict_2 = { @@ -530,18 +519,12 @@ def test_large_community_lists_with_rmap_apply_and_remove(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -554,10 +537,9 @@ def test_large_community_lists_with_rmap_apply_and_remove(request): "r6": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_LC1", - "direction": "out" - }] + "route_maps": [ + {"name": "RM_LC1", "direction": "out"} + ] } } } @@ -570,57 +552,49 @@ def test_large_community_lists_with_rmap_apply_and_remove(request): "r6": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_LC1", - "direction": "out" - }] + "route_maps": [ + {"name": "RM_LC1", "direction": "out"} + ] } } } } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r6" - input_dict_4 = { - "largeCommunity": "200:200:1 200:200:10 200:200:20000" - } + input_dict_4 = {"largeCommunity": "200:200:1 200:200:10 200:200:20000"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_4) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) step("Delete route map reference by community-list") - input_dict_3 = { - "r4": { - "route_maps": ["RM_LC1"] - } - } + input_dict_3 = {"r4": {"route_maps": ["RM_LC1"]}} result = delete_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify route map is deleted") result = verify_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_4, expected=False) + result = verify_bgp_community( + tgen, adt, dut, NETWORKS[adt], input_dict_4, expected=False + ) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -653,10 +627,10 @@ def test_duplicate_large_community_list_attributes_not_transitive(request): "set": { "large_community": { "num": "0:0:1 0:0:10 0:0:100 2:0:1 2:0:2 2:0:3" - " 2:0:4 2:0:5", - "action": "additive" + " 2:0:4 2:0:5", + "action": "additive", } - } + }, } ], "RM_R4_OUT": [ @@ -666,17 +640,16 @@ def test_duplicate_large_community_list_attributes_not_transitive(request): "set": { "large_community": { "num": "0:0:1 0:0:10 0:0:10000 2:0:1 2:0:2", - "action": "additive" + "action": "additive", } - } + }, } - ] + ], } } } result = create_route_maps(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map and advertise networks") input_dict_2 = { @@ -685,18 +658,12 @@ def test_duplicate_large_community_list_attributes_not_transitive(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -709,23 +676,24 @@ def test_duplicate_large_community_list_attributes_not_transitive(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } }, "r6": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } - } + }, } } }, @@ -735,45 +703,44 @@ def test_duplicate_large_community_list_attributes_not_transitive(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } }, "r6": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } - } + }, } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r6" input_dict_4 = { - "largeCommunity": - "0:0:1 0:0:10 0:0:100 0:0:10000 2:0:1 2:0:2 2:0:3 2:0:4 2:0:5" + "largeCommunity": "0:0:1 0:0:10 0:0:100 0:0:10000 2:0:1 2:0:2 2:0:3 2:0:4 2:0:5" } for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_4) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -806,10 +773,10 @@ def test_large_community_lists_with_rmap_set_none(request): "set": { "large_community": { "num": "0:0:1 0:0:10 0:0:100 2:0:1 2:0:2 2:0:3" - " 2:0:4", - "action": "additive" + " 2:0:4", + "action": "additive", } - } + }, } ] } @@ -820,19 +787,14 @@ def test_large_community_lists_with_rmap_set_none(request): { "action": "permit", "seq_id": "10", - "set": { - "large_community": { - "num": "none" - } - } + "set": {"large_community": {"num": "none"}}, } ] } - } + }, } result = create_route_maps(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map") input_dict_2 = { @@ -841,18 +803,12 @@ def test_large_community_lists_with_rmap_set_none(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -865,10 +821,9 @@ def test_large_community_lists_with_rmap_set_none(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } @@ -881,16 +836,15 @@ def test_large_community_lists_with_rmap_set_none(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } } } - } + }, } } }, @@ -903,10 +857,9 @@ def test_large_community_lists_with_rmap_set_none(request): "r4": { "dest_link": { "r6": { - "route_maps": [{ - "name": "RM_R6_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R6_IN", "direction": "in"} + ] } } } @@ -919,31 +872,29 @@ def test_large_community_lists_with_rmap_set_none(request): "r4": { "dest_link": { "r6": { - "route_maps": [{ - "name": "RM_R6_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R6_IN", "direction": "in"} + ] } } } } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify Community-list") dut = "r6" for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - expected=False) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], expected=False) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -969,24 +920,17 @@ def test_lcomm_lists_with_redistribute_static_connected_rmap(request): input_dict = { "r1": { "static_routes": [ - { - "network": "200.50.2.0/32", - "next_hop": "10.0.0.6" - }, - { - "network": "1::1/128", - "next_hop": "fd00:0:0:1::2" - } + {"network": "200.50.2.0/32", "next_hop": "10.0.0.6"}, + {"network": "1::1/128", "next_hop": "fd00:0:0:1::2"}, ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("redistribute static routes") input_dict_1 = { - "r1":{ + "r1": { "bgp": { "address_family": { "ipv4": { @@ -994,12 +938,12 @@ def test_lcomm_lists_with_redistribute_static_connected_rmap(request): "redistribute": [ { "redist_type": "static", - "attribute": "route-map RM_R2_OUT" + "attribute": "route-map RM_R2_OUT", }, { "redist_type": "connected", - "attribute": "route-map RM_R2_OUT" - } + "attribute": "route-map RM_R2_OUT", + }, ] } }, @@ -1008,82 +952,74 @@ def test_lcomm_lists_with_redistribute_static_connected_rmap(request): "redistribute": [ { "redist_type": "static", - "attribute": "route-map RM_R2_OUT" + "attribute": "route-map RM_R2_OUT", }, { "redist_type": "connected", - "attribute": "route-map RM_R2_OUT" - } + "attribute": "route-map RM_R2_OUT", + }, ] } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create route map") input_dict_3 = { "r1": { "route_maps": { - "RM_R2_OUT": [{ - "action": "permit", - "set": { - "large_community": {"num":"55:55:55 555:555:555"} - } - }] + "RM_R2_OUT": [ + { + "action": "permit", + "set": {"large_community": {"num": "55:55:55 555:555:555"}}, + } + ] } - } + } } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) - step("Verify large-community-list for static and connected ipv4 route on" - " r2") + step("Verify large-community-list for static and connected ipv4 route on" " r2") - input_dict_5 = { - "largeCommunity": "55:55:55 555:555:555" - } + input_dict_5 = {"largeCommunity": "55:55:55 555:555:555"} if "ipv4" in ADDR_TYPES: dut = "r2" networks = ["200.50.2.0/32", "1.0.1.17/32"] - result = verify_bgp_community(tgen, "ipv4", dut, networks, - input_dict_5) + result = verify_bgp_community(tgen, "ipv4", dut, networks, input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - step("Verify large-community-list for static and connected ipv4 route" - " on r4") + step("Verify large-community-list for static and connected ipv4 route" " on r4") dut = "r4" networks = ["200.50.2.0/32", "1.0.1.17/32"] - result = verify_bgp_community(tgen, "ipv4", dut, networks, - input_dict_5) + result = verify_bgp_community(tgen, "ipv4", dut, networks, input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) if "ipv6" in ADDR_TYPES: - step("Verify large-community-list for static and connected ipv6 route" - " on r2") + step("Verify large-community-list for static and connected ipv6 route" " on r2") dut = "r2" networks = ["1::1/128", "2001:db8:f::1:17/128"] - result = verify_bgp_community(tgen, "ipv6", dut, networks, - input_dict_5) + result = verify_bgp_community(tgen, "ipv6", dut, networks, input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - step("Verify large-community-list for static and connected ipv6 route" - " on r4") + step("Verify large-community-list for static and connected ipv6 route" " on r4") dut = "r4" networks = ["1::1/128", "2001:db8:f::1:17/128"] - result = verify_bgp_community(tgen, "ipv6", dut, networks, - input_dict_5) + result = verify_bgp_community(tgen, "ipv6", dut, networks, input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1115,14 +1051,13 @@ def test_large_community_lists_with_rmap_set_delete(request): "action": "permit", "name": "Test", "value": "1:2:1 1:1:10 1:3:100", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create route map") input_dict_3 = { @@ -1132,12 +1067,7 @@ def test_large_community_lists_with_rmap_set_delete(request): { "action": "permit", "seq_id": "10", - "set": { - "large_comm_list": { - "id": "Test", - "delete": True - } - } + "set": {"large_comm_list": {"id": "Test", "delete": True}}, } ] } @@ -1151,18 +1081,17 @@ def test_large_community_lists_with_rmap_set_delete(request): "set": { "large_community": { "num": "1:2:1 1:1:10 1:3:100 2:1:1 2:2:2 2:3:3" - " 2:4:4 2:5:5", - "action": "additive" + " 2:4:4 2:5:5", + "action": "additive", } - } + }, } ] } - } + }, } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map and advertise networks") input_dict_4 = { @@ -1171,18 +1100,12 @@ def test_large_community_lists_with_rmap_set_delete(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -1195,10 +1118,9 @@ def test_large_community_lists_with_rmap_set_delete(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } @@ -1211,16 +1133,15 @@ def test_large_community_lists_with_rmap_set_delete(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } } } - } + }, } } }, @@ -1233,10 +1154,9 @@ def test_large_community_lists_with_rmap_set_delete(request): "r4": { "dest_link": { "r6": { - "route_maps": [{ - "name": "RM_R6_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R6_IN", "direction": "in"} + ] } } } @@ -1249,34 +1169,30 @@ def test_large_community_lists_with_rmap_set_delete(request): "r4": { "dest_link": { "r6": { - "route_maps": [{ - "name": "RM_R6_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R6_IN", "direction": "in"} + ] } } } } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r6" - input_dict_5 = { - "largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } + input_dict_5 = {"largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_5) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1308,18 +1224,15 @@ def test_large_community_lists_with_no_send_community(request): "action": "permit", "seq_id": "10", "set": { - "large_community": { - "num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } - } + "large_community": {"num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"} + }, } ] } } } result = create_route_maps(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map and advertise networks") input_dict_3 = { @@ -1328,18 +1241,12 @@ def test_large_community_lists_with_no_send_community(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -1352,10 +1259,12 @@ def test_large_community_lists_with_no_send_community(request): "r6": { "dest_link": { "r5": { - "route_maps": [{ - "name": "RM_R6_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R6_OUT", + "direction": "out", + } + ] } } } @@ -1368,34 +1277,33 @@ def test_large_community_lists_with_no_send_community(request): "r6": { "dest_link": { "r5": { - "route_maps": [{ - "name": "RM_R6_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R6_OUT", + "direction": "out", + } + ] } } } } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r6" - input_dict_4 = { - "largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } + input_dict_4 = {"largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_4) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) step("Configure neighbor for no-send-community") input_dict_5 = { @@ -1406,11 +1314,7 @@ def test_large_community_lists_with_no_send_community(request): "unicast": { "neighbor": { "r6": { - "dest_link": { - "r5": { - "no_send_community": "large" - } - } + "dest_link": {"r5": {"no_send_community": "large"}} } } } @@ -1419,29 +1323,26 @@ def test_large_community_lists_with_no_send_community(request): "unicast": { "neighbor": { "r6": { - "dest_link": { - "r5": { - "no_send_community": "large" - } - } + "dest_link": {"r5": {"no_send_community": "large"}} } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_5) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify Community-list") for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_4, expected=False) + result = verify_bgp_community( + tgen, adt, dut, NETWORKS[adt], input_dict_4, expected=False + ) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1472,14 +1373,15 @@ def test_create_large_community_lists_with_no_attribute_values(request): "community_type": "standard", "action": "permit", "name": "Test1", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_1) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1512,18 +1414,15 @@ def test_large_community_lists_with_rmap_match_exact(request): "action": "permit", "seq_id": "10", "set": { - "large_community": { - "num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } - } + "large_community": {"num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"} + }, } ] } } } result = create_route_maps(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map and advertise networks") input_dict_3 = { @@ -1532,18 +1431,12 @@ def test_large_community_lists_with_rmap_match_exact(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -1556,10 +1449,12 @@ def test_large_community_lists_with_rmap_match_exact(request): "r4": { "dest_link": { "r2": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } } @@ -1572,24 +1467,25 @@ def test_large_community_lists_with_rmap_match_exact(request): "r4": { "dest_link": { "r2": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } } } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create standard large commumity-list") input_dict_4 = { @@ -1600,19 +1496,17 @@ def test_large_community_lists_with_rmap_match_exact(request): "action": "permit", "name": "EXACT", "value": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify BGP large community is created") result = verify_create_community_list(tgen, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create route map") input_dict_5 = { @@ -1624,16 +1518,15 @@ def test_large_community_lists_with_rmap_match_exact(request): "seq_id": "10", "match": { "large-community-list": ["EXACT"], - "match_exact": True - } + "match_exact": True, + }, } ] } } } result = create_route_maps(tgen, input_dict_5) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map") input_dict_6 = { @@ -1646,10 +1539,9 @@ def test_large_community_lists_with_rmap_match_exact(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } @@ -1662,34 +1554,30 @@ def test_large_community_lists_with_rmap_match_exact(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_6) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r4" - input_dict_4 = { - "largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } + input_dict_4 = {"largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_4) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1718,20 +1606,21 @@ def test_large_community_lists_with_rmap_match_all(request): input_dict_2 = { "r2": { "route_maps": { - "RM_R4_OUT": [{ - "action": "permit", - "set": { - "large_community": { - "num": "1:1:1 1:2:3 2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } + "RM_R4_OUT": [ + { + "action": "permit", + "set": { + "large_community": { + "num": "1:1:1 1:2:3 2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" + } + }, } - }] + ] } } } result = create_route_maps(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map") input_dict_3 = { @@ -1740,18 +1629,12 @@ def test_large_community_lists_with_rmap_match_all(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -1764,10 +1647,12 @@ def test_large_community_lists_with_rmap_match_all(request): "r4": { "dest_link": { "r2": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } } @@ -1780,23 +1665,24 @@ def test_large_community_lists_with_rmap_match_all(request): "r4": { "dest_link": { "r2": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } } } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create standard large commumity-list") input_dict_4 = { @@ -1807,19 +1693,17 @@ def test_large_community_lists_with_rmap_match_all(request): "action": "permit", "name": "ALL", "value": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify BGP large community is created") result = verify_create_community_list(tgen, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create route map") input_dict_5 = { @@ -1829,19 +1713,14 @@ def test_large_community_lists_with_rmap_match_all(request): { "action": "permit", "seq_id": "10", - "match": { - "large-community-list": { - "id": "ALL" - } - } + "match": {"large-community-list": {"id": "ALL"}}, } ] } } } result = create_route_maps(tgen, input_dict_5) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map") input_dict_6 = { @@ -1854,10 +1733,9 @@ def test_large_community_lists_with_rmap_match_all(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } @@ -1870,34 +1748,30 @@ def test_large_community_lists_with_rmap_match_all(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_6) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r4" - input_dict_4 = { - "largeCommunity": "1:1:1 1:2:3 2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } + input_dict_4 = {"largeCommunity": "1:1:1 1:2:3 2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_4) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1929,18 +1803,15 @@ def test_large_community_lists_with_rmap_match_any(request): "action": "permit", "seq_id": "10", "set": { - "large_community": { - "num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } - } + "large_community": {"num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"} + }, } ] } } } result = create_route_maps(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map") input_dict_3 = { @@ -1949,18 +1820,12 @@ def test_large_community_lists_with_rmap_match_any(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -1973,10 +1838,12 @@ def test_large_community_lists_with_rmap_match_any(request): "r4": { "dest_link": { "r2": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } } @@ -1989,23 +1856,24 @@ def test_large_community_lists_with_rmap_match_any(request): "r4": { "dest_link": { "r2": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } } } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create standard large commumity-list") input_dict_4 = { @@ -2016,40 +1884,38 @@ def test_large_community_lists_with_rmap_match_any(request): "action": "permit", "name": "ANY", "value": "2:1:1", - "large": True + "large": True, }, { "community_type": "standard", "action": "permit", "name": "ANY", "value": "2:2:1", - "large": True + "large": True, }, { "community_type": "standard", "action": "permit", "name": "ANY", "value": "2:3:1", - "large": True + "large": True, }, { "community_type": "standard", "action": "permit", "name": "ANY", "value": "2:4:1", - "large": True - } + "large": True, + }, ] } } result = create_bgp_community_lists(tgen, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify BGP large community is created") result = verify_create_community_list(tgen, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create route map") input_dict_5 = { @@ -2059,19 +1925,14 @@ def test_large_community_lists_with_rmap_match_any(request): { "action": "permit", "seq_id": "10", - "match": { - "large-community-list": { - "id": "ANY" - } - } + "match": {"large-community-list": {"id": "ANY"}}, } ] } } } result = create_route_maps(tgen, input_dict_5) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map") input_dict_6 = { @@ -2084,10 +1945,9 @@ def test_large_community_lists_with_rmap_match_any(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } @@ -2100,34 +1960,30 @@ def test_large_community_lists_with_rmap_match_any(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_6) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r4" - input_dict_7 = { - "largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } + input_dict_7 = {"largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_7) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_7) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -2162,18 +2018,15 @@ def test_large_community_lists_with_rmap_match_regex(request): "large_community": { "num": "1:1:1 1:1:2 2:1:3 2:1:4 2:1:5", }, - "community": { - "num": "1:1 1:2 1:3 1:4 1:5" - } - } + "community": {"num": "1:1 1:2 1:3 1:4 1:5"}, + }, } ] } } } result = create_route_maps(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map") input_dict_3 = { @@ -2182,18 +2035,12 @@ def test_large_community_lists_with_rmap_match_regex(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -2206,10 +2053,12 @@ def test_large_community_lists_with_rmap_match_regex(request): "r4": { "dest_link": { "r2": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } } @@ -2222,23 +2071,24 @@ def test_large_community_lists_with_rmap_match_regex(request): "r4": { "dest_link": { "r2": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } } } } - } + }, } } - } + }, } - result = create_router_bgp(tgen, topo,input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + result = create_router_bgp(tgen, topo, input_dict_3) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create standard large commumity-list") input_dict_4 = { @@ -2249,26 +2099,24 @@ def test_large_community_lists_with_rmap_match_regex(request): "action": "permit", "name": "ALL", "value": "1:1:1 2:1:3 2:1:4 2:1:5", - "large": True + "large": True, }, { "community_type": "expanded", "action": "permit", "name": "EXP_ALL", "value": "1:1:1 2:1:[3-5]", - "large": True - } + "large": True, + }, ] } } result = create_bgp_community_lists(tgen, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify BGP large community is created") result = verify_create_community_list(tgen, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create route map") input_dict_5 = { @@ -2278,19 +2126,14 @@ def test_large_community_lists_with_rmap_match_regex(request): { "action": "permit", "seq_id": "10", - "match": { - "large_community_list": { - "id": "ALL", - }, - }, + "match": {"large_community_list": {"id": "ALL",},}, } ] } } } result = create_route_maps(tgen, input_dict_5) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map") input_dict_6 = { @@ -2303,10 +2146,9 @@ def test_large_community_lists_with_rmap_match_regex(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } @@ -2319,48 +2161,38 @@ def test_large_community_lists_with_rmap_match_regex(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_6) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r4" - input_dict_7 = { - "largeCommunity": "1:1:1 1:1:2 2:1:3 2:1:4 2:1:5" - } + input_dict_7 = {"largeCommunity": "1:1:1 1:1:2 2:1:3 2:1:4 2:1:5"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_7) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_7) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) step("Delete route map reference by community-list") - input_dict_3 = { - "r4": { - "route_maps": ["RM_R4_IN"] - } - } + input_dict_3 = {"r4": {"route_maps": ["RM_R4_IN"]}} result = delete_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create route map") input_dict_5 = { @@ -2370,35 +2202,29 @@ def test_large_community_lists_with_rmap_match_regex(request): { "action": "permit", "seq_id": "20", - "match": { - "large_community_list": { - "id": "EXP_ALL", - }, - }, + "match": {"large_community_list": {"id": "EXP_ALL",},}, } ] } } } result = create_route_maps(tgen, input_dict_5) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("clear ip bgp") - result = clear_bgp_and_verify(tgen, topo, 'r4') - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + result = clear_bgp_and_verify(tgen, topo, "r4") + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r4" - input_dict_7 = { - "largeCommunity": "1:1:1 1:1:2 2:1:3 2:1:4 2:1:5" - } + input_dict_7 = {"largeCommunity": "1:1:1 1:1:2 2:1:3 2:1:4 2:1:5"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_7, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: {}".\ - format(tc_name, result) + result = verify_bgp_community( + tgen, adt, dut, NETWORKS[adt], input_dict_7, expected=False + ) + assert result is not True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) diff --git a/tests/topotests/bgp_link_bw_ip/__init__.py b/tests/topotests/bgp_link_bw_ip/__init__.py new file mode 100755 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/__init__.py diff --git a/tests/topotests/bgp_link_bw_ip/r1/bgp-route-1.json b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-1.json new file mode 100644 index 0000000000..3e3c35ee08 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-1.json @@ -0,0 +1,19 @@ +{ + "prefix":"198.10.1.1\/32", + "paths":[ + { + "valid":true, + "bestpath":{ + "overall":true + }, + "extendedCommunity":{ + "string":"LB:65301:125000 (1.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.1.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/bgp-route-2.json b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-2.json new file mode 100644 index 0000000000..f07e89b495 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-2.json @@ -0,0 +1,32 @@ +{ + "prefix":"198.10.1.1\/32", + "paths":[ + { + "valid":true, + "multipath":true, + "extendedCommunity":{ + "string":"LB:65303:125000 (1.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.1.6" + } + ] + }, + { + "valid":true, + "multipath":true, + "bestpath":{ + "overall":true + }, + "extendedCommunity":{ + "string":"LB:65201:375000 (3.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.1.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/bgp-route-3.json b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-3.json new file mode 100644 index 0000000000..3501d12e70 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-3.json @@ -0,0 +1,32 @@ +{ + "prefix":"198.10.1.1\/32", + "paths":[ + { + "valid":true, + "multipath":true, + "extendedCommunity":{ + "string":"LB:65303:125000 (1.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.1.6" + } + ] + }, + { + "valid":true, + "multipath":true, + "bestpath":{ + "overall":true + }, + "extendedCommunity":{ + "string":"LB:65301:250000 (2.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.1.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/bgp-route-4.json b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-4.json new file mode 100644 index 0000000000..b1ed004490 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-4.json @@ -0,0 +1,32 @@ +{ + "prefix":"198.10.1.11\/32", + "paths":[ + { + "valid":true, + "multipath":true, + "extendedCommunity":{ + "string":"LB:65303:125000 (1.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.1.6" + } + ] + }, + { + "valid":true, + "multipath":true, + "bestpath":{ + "overall":true + }, + "extendedCommunity":{ + "string":"LB:65201:250000 (2.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.1.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/bgp-route-5.json b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-5.json new file mode 100644 index 0000000000..89469b8ace --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-5.json @@ -0,0 +1,29 @@ +{ + "prefix":"198.10.1.1\/32", + "paths":[ + { + "valid":true, + "multipath":true, + "nexthops":[ + { + "ip":"11.1.1.6" + } + ] + }, + { + "valid":true, + "multipath":true, + "bestpath":{ + "overall":true + }, + "extendedCommunity":{ + "string":"LB:65201:375000 (3.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.1.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r1/bgpd.conf new file mode 100644 index 0000000000..40c19062a2 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/bgpd.conf @@ -0,0 +1,9 @@ +hostname r1 +! +router bgp 65101 + bgp router-id 11.1.1.1 + no bgp ebgp-requires-policy + bgp bestpath as-path multipath-relax + neighbor 11.1.1.2 remote-as external + neighbor 11.1.1.6 remote-as external +! diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-1.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-1.json new file mode 100644 index 0000000000..3c02e2675d --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-1.json @@ -0,0 +1,20 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.6", + "weight":25 + }, + { + "fib":true, + "ip":"11.1.1.2", + "weight":75 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-2.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-2.json new file mode 100644 index 0000000000..3c2d42caac --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-2.json @@ -0,0 +1,20 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.6", + "weight":33 + }, + { + "fib":true, + "ip":"11.1.1.2", + "weight":66 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-3.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-3.json new file mode 100644 index 0000000000..3d80018cea --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-3.json @@ -0,0 +1,20 @@ +{ + "198.10.1.11\/32":[ + { + "prefix":"198.10.1.11\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.6", + "weight":33 + }, + { + "fib":true, + "ip":"11.1.1.2", + "weight":66 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-4.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-4.json new file mode 100644 index 0000000000..6b757ef9ed --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-4.json @@ -0,0 +1,20 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.2", + "weight":1 + }, + { + "fib":true, + "ip":"11.1.1.6", + "weight":1 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-5.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-5.json new file mode 100644 index 0000000000..641ecabf47 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-5.json @@ -0,0 +1,20 @@ +{ + "198.10.1.11\/32":[ + { + "prefix":"198.10.1.11\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.2", + "weight":1 + }, + { + "fib":true, + "ip":"11.1.1.6", + "weight":1 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-6.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-6.json new file mode 100644 index 0000000000..6ed3f8ef55 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-6.json @@ -0,0 +1,15 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.2", + "weight":100 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-7.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-7.json new file mode 100644 index 0000000000..95531d99be --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-7.json @@ -0,0 +1,15 @@ +{ + "198.10.1.11\/32":[ + { + "prefix":"198.10.1.11\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.2", + "weight":100 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-8.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-8.json new file mode 100644 index 0000000000..beac501360 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-8.json @@ -0,0 +1,20 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.6", + "weight":1 + }, + { + "fib":true, + "ip":"11.1.1.2", + "weight":100 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-9.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-9.json new file mode 100644 index 0000000000..eb27ce2633 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-9.json @@ -0,0 +1,20 @@ +{ + "198.10.1.11\/32":[ + { + "prefix":"198.10.1.11\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.6", + "weight":1 + }, + { + "fib":true, + "ip":"11.1.1.2", + "weight":100 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/v4_route.json b/tests/topotests/bgp_link_bw_ip/r1/v4_route.json new file mode 100644 index 0000000000..d40a06d872 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/v4_route.json @@ -0,0 +1,104 @@ +{ + "10.0.1.1\/32":[ + { + "prefix":"10.0.1.1\/32", + "protocol":"ospf", + "distance":110, + "metric":10, + "table":254, + "internalStatus":0, + "internalFlags":0, + "internalNextHopNum":1, + "internalNextHopActiveNum":1, + "nexthops":[ + { + "flags":9, + "ip":"0.0.0.0", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true, + "onLink":true + } + ] + }, + { + "prefix":"10.0.1.1\/32", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "table":254, + "internalStatus":16, + "internalFlags":8, + "internalNextHopNum":1, + "internalNextHopActiveNum":1, + "nexthops":[ + { + "flags":3, + "fib":true, + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + } + ], + "10.0.3.4\/32":[ + { + "prefix":"10.0.3.4\/32", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "table":254, + "internalStatus":16, + "internalFlags":8, + "internalNextHopNum":1, + "internalNextHopActiveNum":1, + "nexthops":[ + { + "flags":3, + "fib":true, + "directlyConnected":true, + "interfaceIndex":3, + "interfaceName":"r1-eth1", + "active":true + } + ] + } + ], + "10.0.20.1\/32":[ + { + "prefix":"10.0.20.1\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":20, + "installed":true, + "table":254, + "internalStatus":16, + "internalFlags":8, + "internalNextHopNum":1, + "internalNextHopActiveNum":1, + "nexthops":[ + { + "flags":11, + "fib":true, + "ip":"10.0.3.2", + "afi":"ipv4", + "interfaceIndex":3, + "interfaceName":"r1-eth1", + "active":true, + "onLink":true + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/zebra.conf b/tests/topotests/bgp_link_bw_ip/r1/zebra.conf new file mode 100644 index 0000000000..0fc81f9bac --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/zebra.conf @@ -0,0 +1,7 @@ +! +interface r1-eth0 + ip address 11.1.1.1/30 +! +interface r1-eth1 + ip address 11.1.1.5/30 +! diff --git a/tests/topotests/bgp_link_bw_ip/r10/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r10/bgpd.conf new file mode 100644 index 0000000000..80588e7961 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r10/bgpd.conf @@ -0,0 +1,16 @@ +hostname r10 +! +ip prefix-list redist seq 10 permit 0.0.0.0/0 ge 32 +! +route-map redist permit 10 + match ip address prefix-list redist +! +router bgp 65354 + bgp router-id 11.1.6.2 + no bgp ebgp-requires-policy + neighbor 11.1.6.1 remote-as external + ! + address-family ipv4 unicast + redistribute connected route-map redist + ! +! diff --git a/tests/topotests/bgp_link_bw_ip/r10/zebra.conf b/tests/topotests/bgp_link_bw_ip/r10/zebra.conf new file mode 100644 index 0000000000..1a24fdaea7 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r10/zebra.conf @@ -0,0 +1,6 @@ +interface r10-eth0 + ip address 11.1.6.2/30 +! +interface r10-eth1 + ip address 50.1.1.10/32 +! diff --git a/tests/topotests/bgp_link_bw_ip/r2/bgp-route-1.json b/tests/topotests/bgp_link_bw_ip/r2/bgp-route-1.json new file mode 100644 index 0000000000..3c38689a37 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r2/bgp-route-1.json @@ -0,0 +1,19 @@ +{ + "prefix":"198.10.1.1\/32", + "paths":[ + { + "valid":true, + "bestpath":{ + "overall":true + }, + "extendedCommunity":{ + "string":"LB:65301:125000 (1.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.2.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r2/bgp-route-2.json b/tests/topotests/bgp_link_bw_ip/r2/bgp-route-2.json new file mode 100644 index 0000000000..1895cd822e --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r2/bgp-route-2.json @@ -0,0 +1,19 @@ +{ + "prefix":"198.10.1.1\/32", + "paths":[ + { + "valid":true, + "bestpath":{ + "overall":true + }, + "extendedCommunity":{ + "string":"LB:65301:250000 (2.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.2.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r2/bgp-route-3.json b/tests/topotests/bgp_link_bw_ip/r2/bgp-route-3.json new file mode 100644 index 0000000000..dfc4171bad --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r2/bgp-route-3.json @@ -0,0 +1,32 @@ +{ + "prefix":"198.10.1.1\/32", + "paths":[ + { + "valid":true, + "multipath":true, + "extendedCommunity":{ + "string":"LB:65302:125000 (1.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.2.6" + } + ] + }, + { + "valid":true, + "multipath":true, + "bestpath":{ + "overall":true + }, + "extendedCommunity":{ + "string":"LB:65301:250000 (2.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.2.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r2/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r2/bgpd.conf new file mode 100644 index 0000000000..6fec1913c8 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r2/bgpd.conf @@ -0,0 +1,10 @@ +hostname r2 +! +router bgp 65201 + bgp router-id 11.1.2.1 + bgp bestpath as-path multipath-relax + no bgp ebgp-requires-policy + neighbor 11.1.1.1 remote-as external + neighbor 11.1.2.2 remote-as external + neighbor 11.1.2.6 remote-as external +! diff --git a/tests/topotests/bgp_link_bw_ip/r2/ip-route-1.json b/tests/topotests/bgp_link_bw_ip/r2/ip-route-1.json new file mode 100644 index 0000000000..131100a684 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r2/ip-route-1.json @@ -0,0 +1,19 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "protocol":"bgp", + "selected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.2.2", + "interfaceName":"r2-eth1", + "active":true, + "weight":1 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r2/ip-route-2.json b/tests/topotests/bgp_link_bw_ip/r2/ip-route-2.json new file mode 100644 index 0000000000..7e2fa6be25 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r2/ip-route-2.json @@ -0,0 +1,20 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.2.6", + "weight":33 + }, + { + "fib":true, + "ip":"11.1.2.2", + "weight":66 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r2/ip-route-3.json b/tests/topotests/bgp_link_bw_ip/r2/ip-route-3.json new file mode 100644 index 0000000000..d0509bbd29 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r2/ip-route-3.json @@ -0,0 +1,15 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.2.2", + "weight":1 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r2/zebra.conf b/tests/topotests/bgp_link_bw_ip/r2/zebra.conf new file mode 100644 index 0000000000..23573a108d --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r2/zebra.conf @@ -0,0 +1,10 @@ +! +interface r2-eth0 + ip address 11.1.1.2/30 +! +interface r2-eth1 + ip address 11.1.2.1/30 +! +interface r2-eth2 + ip address 11.1.2.5/30 +! diff --git a/tests/topotests/bgp_link_bw_ip/r3/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r3/bgpd.conf new file mode 100644 index 0000000000..1c2ca88306 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r3/bgpd.conf @@ -0,0 +1,9 @@ +hostname r3 +! +router bgp 65202 + bgp router-id 11.1.3.1 + bgp bestpath as-path multipath-relax + no bgp ebgp-requires-policy + neighbor 11.1.1.5 remote-as external + neighbor 11.1.3.2 remote-as external +! diff --git a/tests/topotests/bgp_link_bw_ip/r3/zebra.conf b/tests/topotests/bgp_link_bw_ip/r3/zebra.conf new file mode 100644 index 0000000000..d667669821 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r3/zebra.conf @@ -0,0 +1,7 @@ +! +interface r3-eth0 + ip address 11.1.1.6/30 +! +interface r3-eth1 + ip address 11.1.3.1/30 +! diff --git a/tests/topotests/bgp_link_bw_ip/r4/bgp-route-1.json b/tests/topotests/bgp_link_bw_ip/r4/bgp-route-1.json new file mode 100644 index 0000000000..87d1ae0b44 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r4/bgp-route-1.json @@ -0,0 +1,23 @@ +{ + "prefix":"198.10.1.1\/32", + "paths":[ + { + "valid":true, + "multipath":true, + "nexthops":[ + { + "ip":"11.1.4.6" + } + ] + }, + { + "valid":true, + "multipath":true, + "nexthops":[ + { + "ip":"11.1.4.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r4/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r4/bgpd.conf new file mode 100644 index 0000000000..022a230643 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r4/bgpd.conf @@ -0,0 +1,29 @@ +! +log file bgpd.log +! +debug bgp updates +debug bgp zebra +debug bgp bestpath 198.10.1.1/32 +! +hostname r4 +! +ip prefix-list anycast_ip seq 10 permit 198.10.1.0/24 le 32 +! +route-map anycast_ip permit 10 + match ip address prefix-list anycast_ip + set extcommunity bandwidth num-multipaths +! +route-map anycast_ip permit 20 +! +router bgp 65301 + bgp router-id 11.1.4.1 + bgp bestpath as-path multipath-relax + no bgp ebgp-requires-policy + neighbor 11.1.2.1 remote-as external + neighbor 11.1.4.2 remote-as external + neighbor 11.1.4.6 remote-as external + ! + address-family ipv4 unicast + neighbor 11.1.2.1 route-map anycast_ip out + ! +! diff --git a/tests/topotests/bgp_link_bw_ip/r4/ip-route-1.json b/tests/topotests/bgp_link_bw_ip/r4/ip-route-1.json new file mode 100644 index 0000000000..a9ccf07c82 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r4/ip-route-1.json @@ -0,0 +1,21 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "protocol":"bgp", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.4.2", + "weight":1 + }, + { + "fib":true, + "ip":"11.1.4.6", + "weight":1 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r4/zebra.conf b/tests/topotests/bgp_link_bw_ip/r4/zebra.conf new file mode 100644 index 0000000000..ef61f7eb1b --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r4/zebra.conf @@ -0,0 +1,10 @@ +! +interface r4-eth0 + ip address 11.1.2.2/30 +! +interface r4-eth1 + ip address 11.1.4.1/30 +! +interface r4-eth2 + ip address 11.1.4.5/30 +! diff --git a/tests/topotests/bgp_link_bw_ip/r5/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r5/bgpd.conf new file mode 100644 index 0000000000..fc4e3888d8 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r5/bgpd.conf @@ -0,0 +1,21 @@ +hostname r5 +! +ip prefix-list anycast_ip seq 10 permit 198.10.1.0/24 le 32 +! +route-map anycast_ip permit 10 + match ip address prefix-list anycast_ip + set extcommunity bandwidth num-multipaths +! +route-map anycast_ip permit 20 +! +router bgp 65302 + bgp router-id 11.1.5.1 + bgp bestpath as-path multipath-relax + no bgp ebgp-requires-policy + neighbor 11.1.2.5 remote-as external + neighbor 11.1.5.2 remote-as external + ! + address-family ipv4 unicast + neighbor 11.1.2.5 route-map anycast_ip out + ! +! diff --git a/tests/topotests/bgp_link_bw_ip/r5/zebra.conf b/tests/topotests/bgp_link_bw_ip/r5/zebra.conf new file mode 100644 index 0000000000..66c65964e2 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r5/zebra.conf @@ -0,0 +1,7 @@ +! +interface r5-eth0 + ip address 11.1.2.6/30 +! +interface r5-eth1 + ip address 11.1.5.1/30 +! diff --git a/tests/topotests/bgp_link_bw_ip/r6/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r6/bgpd.conf new file mode 100644 index 0000000000..f08f6337f4 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r6/bgpd.conf @@ -0,0 +1,21 @@ +hostname r6 +! +ip prefix-list anycast_ip seq 10 permit 198.10.1.0/24 le 32 +! +route-map anycast_ip permit 10 + match ip address prefix-list anycast_ip + set extcommunity bandwidth num-multipaths +! +route-map anycast_ip permit 20 +! +router bgp 65303 + bgp router-id 11.1.6.1 + bgp bestpath as-path multipath-relax + no bgp ebgp-requires-policy + neighbor 11.1.3.1 remote-as external + neighbor 11.1.6.2 remote-as external + ! + address-family ipv4 unicast + neighbor 11.1.3.1 route-map anycast_ip out + ! +! diff --git a/tests/topotests/bgp_link_bw_ip/r6/zebra.conf b/tests/topotests/bgp_link_bw_ip/r6/zebra.conf new file mode 100644 index 0000000000..66ff563269 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r6/zebra.conf @@ -0,0 +1,7 @@ +! +interface r6-eth0 + ip address 11.1.3.2/30 +! +interface r6-eth1 + ip address 11.1.6.1/30 +! diff --git a/tests/topotests/bgp_link_bw_ip/r7/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r7/bgpd.conf new file mode 100644 index 0000000000..98dfe2471a --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r7/bgpd.conf @@ -0,0 +1,16 @@ +hostname r7 +! +ip prefix-list redist seq 10 permit 0.0.0.0/0 ge 32 +! +route-map redist permit 10 + match ip address prefix-list redist +! +router bgp 65351 + bgp router-id 11.1.4.2 + no bgp ebgp-requires-policy + neighbor 11.1.4.1 remote-as external + ! + address-family ipv4 unicast + redistribute connected route-map redist + ! +! diff --git a/tests/topotests/bgp_link_bw_ip/r7/zebra.conf b/tests/topotests/bgp_link_bw_ip/r7/zebra.conf new file mode 100644 index 0000000000..38e36cac30 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r7/zebra.conf @@ -0,0 +1,6 @@ +interface r7-eth0 + ip address 11.1.4.2/30 +! +interface r7-eth1 + ip address 50.1.1.7/32 +! diff --git a/tests/topotests/bgp_link_bw_ip/r8/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r8/bgpd.conf new file mode 100644 index 0000000000..b4ba8e8a72 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r8/bgpd.conf @@ -0,0 +1,16 @@ +hostname r8 +! +ip prefix-list redist seq 10 permit 0.0.0.0/0 ge 32 +! +route-map redist permit 10 + match ip address prefix-list redist +! +router bgp 65352 + bgp router-id 11.1.4.6 + no bgp ebgp-requires-policy + neighbor 11.1.4.5 remote-as external + ! + address-family ipv4 unicast + redistribute connected route-map redist + ! +! diff --git a/tests/topotests/bgp_link_bw_ip/r8/zebra.conf b/tests/topotests/bgp_link_bw_ip/r8/zebra.conf new file mode 100644 index 0000000000..1369e19c06 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r8/zebra.conf @@ -0,0 +1,6 @@ +interface r8-eth0 + ip address 11.1.4.6/30 +! +interface r8-eth1 + ip address 50.1.1.8/32 +! diff --git a/tests/topotests/bgp_link_bw_ip/r9/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r9/bgpd.conf new file mode 100644 index 0000000000..31f971dd08 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r9/bgpd.conf @@ -0,0 +1,16 @@ +hostname r9 +! +ip prefix-list redist seq 10 permit 0.0.0.0/0 ge 32 +! +route-map redist permit 10 + match ip address prefix-list redist +! +router bgp 65353 + bgp router-id 11.1.5.2 + no bgp ebgp-requires-policy + neighbor 11.1.5.1 remote-as external + ! + address-family ipv4 unicast + redistribute connected route-map redist + ! +! diff --git a/tests/topotests/bgp_link_bw_ip/r9/zebra.conf b/tests/topotests/bgp_link_bw_ip/r9/zebra.conf new file mode 100644 index 0000000000..c73caf3bfc --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r9/zebra.conf @@ -0,0 +1,6 @@ +interface r9-eth0 + ip address 11.1.5.2/30 +! +interface r9-eth1 + ip address 50.1.1.9/32 +! diff --git a/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py b/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py new file mode 100755 index 0000000000..86eb2969ce --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py @@ -0,0 +1,515 @@ +#!/usr/bin/env python + +# +# test_bgp_linkbw_ip.py +# +# Copyright (c) 2020 by +# Cumulus Networks, Inc +# Vivek Venkatraman +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_bgp_linkbw_ip.py: Test weighted ECMP using BGP link-bandwidth +""" + +import os +import re +import sys +from functools import partial +import pytest +import json + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, '../')) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + +""" +This topology is for validating one of the primary use cases for +weighted ECMP (a.k.a. Unequal cost multipath) using BGP link-bandwidth: +https://tools.ietf.org/html/draft-mohanty-bess-ebgp-dmz + +The topology consists of two PODs. Pod-1 consists of a spine switch +and two leaf switches, with two servers attached to the first leaf and +one to the second leaf. Pod-2 consists of one spine and one leaf, with +one server connected to the leaf. The PODs are connected by a super-spine +switch. + +Note that the use of the term "switch" above is in keeping with common +data-center terminology. These devices are all regular routers; for +this scenario, the servers are also routers as they have to announce +anycast IP (VIP) addresses via BGP. +""" + +class BgpLinkBwTopo(Topo): + "Test topology builder" + def build(self, *_args, **_opts): + "Build function" + tgen = get_topogen(self) + + # Create 10 routers - 1 super-spine, 2 spines, 3 leafs + # and 4 servers + routers = {} + for i in range(1, 11): + routers[i] = tgen.add_router('r{}'.format(i)) + + # Create 13 "switches" - to interconnect the above routers + switches = {} + for i in range(1, 14): + switches[i] = tgen.add_switch('s{}'.format(i)) + + # Interconnect R1 (super-spine) to R2 and R3 (the two spines) + switches[1].add_link(tgen.gears['r1']) + switches[1].add_link(tgen.gears['r2']) + switches[2].add_link(tgen.gears['r1']) + switches[2].add_link(tgen.gears['r3']) + + # Interconnect R2 (spine in pod-1) to R4 and R5 (the associated + # leaf switches) + switches[3].add_link(tgen.gears['r2']) + switches[3].add_link(tgen.gears['r4']) + switches[4].add_link(tgen.gears['r2']) + switches[4].add_link(tgen.gears['r5']) + + # Interconnect R3 (spine in pod-2) to R6 (associated leaf) + switches[5].add_link(tgen.gears['r3']) + switches[5].add_link(tgen.gears['r6']) + + # Interconnect leaf switches to servers + switches[6].add_link(tgen.gears['r4']) + switches[6].add_link(tgen.gears['r7']) + switches[7].add_link(tgen.gears['r4']) + switches[7].add_link(tgen.gears['r8']) + switches[8].add_link(tgen.gears['r5']) + switches[8].add_link(tgen.gears['r9']) + switches[9].add_link(tgen.gears['r6']) + switches[9].add_link(tgen.gears['r10']) + + # Create empty networks for the servers + switches[10].add_link(tgen.gears['r7']) + switches[11].add_link(tgen.gears['r8']) + switches[12].add_link(tgen.gears['r9']) + switches[13].add_link(tgen.gears['r10']) + +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(BgpLinkBwTopo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + for rname, router in router_list.iteritems(): + router.load_config( + TopoRouter.RD_ZEBRA, + os.path.join(CWD, '{}/zebra.conf'.format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, + os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + ) + + # Initialize all routers. + tgen.start_router() + + #tgen.mininet_cli() + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + tgen.stop_topology() + +def test_bgp_linkbw_adv(): + "Test #1: Test BGP link-bandwidth advertisement based on number of multipaths" + logger.info('\nTest #1: Test BGP link-bandwidth advertisement based on number of multipaths') + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip('skipped because of router(s) failure') + + r1 = tgen.gears['r1'] + r2 = tgen.gears['r2'] + + # Configure anycast IP on server r7 + logger.info('Configure anycast IP on server r7') + + tgen.net['r7'].cmd('ip addr add 198.10.1.1/32 dev r7-eth1') + + # Check on spine router r2 for link-bw advertisement by leaf router r4 + logger.info('Check on spine router r2 for link-bw advertisement by leaf router r4') + + json_file = '{}/r2/bgp-route-1.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r2, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on spine router r2' + assert result is None, assertmsg + + # Check on spine router r2 that default weight is used as there is no multipath + logger.info('Check on spine router r2 that default weight is used as there is no multipath') + + json_file = '{}/r2/ip-route-1.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r2, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on spine router r2' + assert result is None, assertmsg + + # Check on super-spine router r1 that link-bw has been propagated by spine router r2 + logger.info('Check on super-spine router r1 that link-bw has been propagated by spine router r2') + + json_file = '{}/r1/bgp-route-1.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + +def test_bgp_cumul_linkbw(): + "Test #2: Test cumulative link-bandwidth propagation" + logger.info('\nTest #2: Test cumulative link-bandwidth propagation') + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip('skipped because of router(s) failure') + + r1 = tgen.gears['r1'] + r2 = tgen.gears['r2'] + r4 = tgen.gears['r4'] + + # Configure anycast IP on additional server r8 + logger.info('Configure anycast IP on server r8') + + tgen.net['r8'].cmd('ip addr add 198.10.1.1/32 dev r8-eth1') + + # Check multipath on leaf router r4 + logger.info('Check multipath on leaf router r4') + + json_file = '{}/r4/bgp-route-1.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r4, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on leaf router r4' + assert result is None, assertmsg + + # Check regular ECMP is in effect on leaf router r4 + logger.info('Check regular ECMP is in effect on leaf router r4') + + json_file = '{}/r4/ip-route-1.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r4, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on leaf router r4' + assert result is None, assertmsg + + # Check on spine router r2 that leaf has propagated the cumulative link-bw based on num-multipaths + logger.info('Check on spine router r2 that leaf has propagated the cumulative link-bw based on num-multipaths') + + json_file = '{}/r2/bgp-route-2.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r2, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on spine router r2' + assert result is None, assertmsg + +def test_weighted_ecmp(): + "Test #3: Test weighted ECMP - multipath with next hop weights" + logger.info('\nTest #3: Test weighted ECMP - multipath with next hop weights') + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip('skipped because of router(s) failure') + + r1 = tgen.gears['r1'] + r2 = tgen.gears['r2'] + + # Configure anycast IP on additional server r9 + logger.info('Configure anycast IP on server r9') + + tgen.net['r9'].cmd('ip addr add 198.10.1.1/32 dev r9-eth1') + + # Check multipath on spine router r2 + logger.info('Check multipath on spine router r2') + json_file = '{}/r2/bgp-route-3.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r2, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on spine router r2' + assert result is None, assertmsg + + # Check weighted ECMP is in effect on the spine router r2 + logger.info('Check weighted ECMP is in effect on the spine router r2') + + json_file = '{}/r2/ip-route-2.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r2, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on spine router r2' + assert result is None, assertmsg + + # Configure anycast IP on additional server r10 + logger.info('Configure anycast IP on server r10') + + tgen.net['r10'].cmd('ip addr add 198.10.1.1/32 dev r10-eth1') + + # Check multipath on super-spine router r1 + logger.info('Check multipath on super-spine router r1') + json_file = '{}/r1/bgp-route-2.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + # Check weighted ECMP is in effect on the super-spine router r1 + logger.info('Check weighted ECMP is in effect on the super-spine router r1') + json_file = '{}/r1/ip-route-1.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + +def test_weighted_ecmp_link_flap(): + "Test #4: Test weighted ECMP rebalancing upon change (link flap)" + logger.info('\nTest #4: Test weighted ECMP rebalancing upon change (link flap)') + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip('skipped because of router(s) failure') + + r1 = tgen.gears['r1'] + r2 = tgen.gears['r2'] + + # Bring down link on server r9 + logger.info('Bring down link on server r9') + + tgen.net['r9'].cmd('ip link set dev r9-eth1 down') + + # Check spine router r2 has only one path + logger.info('Check spine router r2 has only one path') + + json_file = '{}/r2/ip-route-3.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r2, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on spine router r2' + assert result is None, assertmsg + + # Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1 + logger.info('Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1') + + json_file = '{}/r1/bgp-route-3.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + json_file = '{}/r1/ip-route-2.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + # Bring up link on server r9 + logger.info('Bring up link on server r9') + + tgen.net['r9'].cmd('ip link set dev r9-eth1 up') + + # Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1 + logger.info('Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1') + + json_file = '{}/r1/bgp-route-2.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + json_file = '{}/r1/ip-route-1.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + +def test_weighted_ecmp_second_anycast_ip(): + "Test #5: Test weighted ECMP for a second anycast IP" + logger.info('\nTest #5: Test weighted ECMP for a second anycast IP') + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip('skipped because of router(s) failure') + + r1 = tgen.gears['r1'] + r2 = tgen.gears['r2'] + + # Configure anycast IP on additional server r7, r9 and r10 + logger.info('Configure anycast IP on server r7, r9 and r10') + + tgen.net['r7'].cmd('ip addr add 198.10.1.11/32 dev r7-eth1') + tgen.net['r9'].cmd('ip addr add 198.10.1.11/32 dev r9-eth1') + tgen.net['r10'].cmd('ip addr add 198.10.1.11/32 dev r10-eth1') + + # Check link-bandwidth and weighted ECMP on super-spine router r1 + logger.info('Check link-bandwidth and weighted ECMP on super-spine router r1') + + json_file = '{}/r1/bgp-route-4.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show bgp ipv4 uni 198.10.1.11/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + json_file = '{}/r1/ip-route-3.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.11/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + +def test_paths_with_and_without_linkbw(): + "Test #6: Test paths with and without link-bandwidth - receiver should resort to regular ECMP" + logger.info('\nTest #6: Test paths with and without link-bandwidth - receiver should resort to regular ECMP') + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip('skipped because of router(s) failure') + + r1 = tgen.gears['r1'] + + # Configure leaf router r6 to not advertise any link-bandwidth + logger.info('Configure leaf router r6 to not advertise any link-bandwidth') + + tgen.net['r6'].cmd('vtysh -c \"conf t\" -c \"router bgp 65303\" -c \"address-family ipv4 unicast\" -c \"no neighbor 11.1.3.1 route-map anycast_ip out\"') + + # Check link-bandwidth change on super-spine router r1 + logger.info('Check link-bandwidth change on super-spine router r1') + + json_file = '{}/r1/bgp-route-5.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + # Check super-spine router r1 resorts to regular ECMP + logger.info('Check super-spine router r1 resorts to regular ECMP') + + json_file = '{}/r1/ip-route-4.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + json_file = '{}/r1/ip-route-5.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.11/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + +def test_linkbw_handling_options(): + "Test #7: Test different options for processing link-bandwidth on the receiver" + logger.info('\nTest #7: Test different options for processing link-bandwidth on the receiver') + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip('skipped because of router(s) failure') + + r1 = tgen.gears['r1'] + + # Configure super-spine r1 to skip multipaths without link-bandwidth + logger.info('Configure super-spine r1 to skip multipaths without link-bandwidth') + + tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"router bgp 65101\" -c \"bgp bestpath bandwidth skip-missing\"') + + # Check super-spine router r1 resorts to only one path as other path is skipped + logger.info('Check super-spine router r1 resorts to only one path as other path is skipped') + + json_file = '{}/r1/ip-route-6.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + json_file = '{}/r1/ip-route-7.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.11/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + # Configure super-spine r1 to use default-weight for multipaths without link-bandwidth + logger.info('Configure super-spine r1 to use default-weight for multipaths without link-bandwidth') + + tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"router bgp 65101\" -c \"bgp bestpath bandwidth default-weight-for-missing\"') + + # Check super-spine router r1 uses ECMP with weight 1 for path without link-bandwidth + logger.info('Check super-spine router r1 uses ECMP with weight 1 for path without link-bandwidth') + + json_file = '{}/r1/ip-route-8.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + json_file = '{}/r1/ip-route-9.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.11/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + +if __name__ == '__main__': + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_local_as_private_remove/r1/bgpd.conf b/tests/topotests/bgp_local_as_private_remove/r1/bgpd.conf index e2f034453f..6f8fcd753d 100644 --- a/tests/topotests/bgp_local_as_private_remove/r1/bgpd.conf +++ b/tests/topotests/bgp_local_as_private_remove/r1/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65000 + no bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 1000 neighbor 192.168.255.2 local-as 500 address-family ipv4 unicast diff --git a/tests/topotests/bgp_local_as_private_remove/r2/bgpd.conf b/tests/topotests/bgp_local_as_private_remove/r2/bgpd.conf index 0549697ff0..27427a9aaa 100644 --- a/tests/topotests/bgp_local_as_private_remove/r2/bgpd.conf +++ b/tests/topotests/bgp_local_as_private_remove/r2/bgpd.conf @@ -1,2 +1,3 @@ router bgp 1000 + no bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 500 diff --git a/tests/topotests/bgp_local_as_private_remove/r3/bgpd.conf b/tests/topotests/bgp_local_as_private_remove/r3/bgpd.conf index 4e57f71c48..f2050ddfdb 100644 --- a/tests/topotests/bgp_local_as_private_remove/r3/bgpd.conf +++ b/tests/topotests/bgp_local_as_private_remove/r3/bgpd.conf @@ -1,4 +1,5 @@ router bgp 3000 + no bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 1000 neighbor 192.168.255.2 local-as 500 address-family ipv4 unicast diff --git a/tests/topotests/bgp_local_as_private_remove/r4/bgpd.conf b/tests/topotests/bgp_local_as_private_remove/r4/bgpd.conf index 0549697ff0..27427a9aaa 100644 --- a/tests/topotests/bgp_local_as_private_remove/r4/bgpd.conf +++ b/tests/topotests/bgp_local_as_private_remove/r4/bgpd.conf @@ -1,2 +1,3 @@ router bgp 1000 + no bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 500 diff --git a/tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py b/tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py index da4b67b087..56bb14411a 100644 --- a/tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py +++ b/tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py @@ -35,7 +35,7 @@ import time import pytest CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -43,20 +43,22 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r4"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r3']) - switch.add_link(tgen.gears['r4']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -66,20 +68,20 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_remove_private_as(): tgen = get_topogen() @@ -88,24 +90,29 @@ def test_bgp_remove_private_as(): def _bgp_converge(router): while True: - output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) - if output['192.168.255.1']['bgpState'] == 'Established': + output = json.loads( + tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json") + ) + if output["192.168.255.1"]["bgpState"] == "Established": time.sleep(1) return True def _bgp_as_path(router): - output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp 172.16.255.254/32 json")) - if output['prefix'] == '172.16.255.254/32': - return output['paths'][0]['aspath']['segments'][0]['list'] + output = json.loads( + tgen.gears[router].vtysh_cmd("show ip bgp 172.16.255.254/32 json") + ) + if output["prefix"] == "172.16.255.254/32": + return output["paths"][0]["aspath"]["segments"][0]["list"] + + if _bgp_converge("r2"): + assert len(_bgp_as_path("r2")) == 1 + assert 65000 not in _bgp_as_path("r2") - if _bgp_converge('r2'): - assert len(_bgp_as_path('r2')) == 1 - assert 65000 not in _bgp_as_path('r2') + if _bgp_converge("r4"): + assert len(_bgp_as_path("r4")) == 2 + assert 3000 in _bgp_as_path("r4") - if _bgp_converge('r4'): - assert len(_bgp_as_path('r4')) == 2 - assert 3000 in _bgp_as_path('r4') -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_maximum_prefix_invalid_update/r1/bgpd.conf b/tests/topotests/bgp_maximum_prefix_invalid_update/r1/bgpd.conf index 235b42b3d5..f0df56e947 100644 --- a/tests/topotests/bgp_maximum_prefix_invalid_update/r1/bgpd.conf +++ b/tests/topotests/bgp_maximum_prefix_invalid_update/r1/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65000 + no bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 65001 address-family ipv4 unicast redistribute connected diff --git a/tests/topotests/bgp_maximum_prefix_invalid_update/r2/bgpd.conf b/tests/topotests/bgp_maximum_prefix_invalid_update/r2/bgpd.conf index e016284159..ef50dd0d7f 100644 --- a/tests/topotests/bgp_maximum_prefix_invalid_update/r2/bgpd.conf +++ b/tests/topotests/bgp_maximum_prefix_invalid_update/r2/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65001 + no bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 65000 address-family ipv4 neighbor 192.168.255.1 maximum-prefix 1 diff --git a/tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py b/tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py index e7f4f40f06..5e7c6d4b63 100644 --- a/tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py +++ b/tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py @@ -39,7 +39,7 @@ import time import pytest CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -47,16 +47,18 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -66,20 +68,20 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_maximum_prefix_invalid(): tgen = get_topogen() @@ -88,12 +90,16 @@ def test_bgp_maximum_prefix_invalid(): def _bgp_converge(router): while True: - output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) - if output['192.168.255.1']['connectionsEstablished'] > 0: + output = json.loads( + tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json") + ) + if output["192.168.255.1"]["connectionsEstablished"] > 0: return True def _bgp_parsing_nlri(router): - cmd_max_exceeded = 'grep "%MAXPFXEXCEED: No. of IPv4 Unicast prefix received" bgpd.log' + cmd_max_exceeded = ( + 'grep "%MAXPFXEXCEED: No. of IPv4 Unicast prefix received" bgpd.log' + ) cmdt_error_parsing_nlri = 'grep "Error parsing NLRI" bgpd.log' output_max_exceeded = tgen.gears[router].run(cmd_max_exceeded) output_error_parsing_nlri = tgen.gears[router].run(cmdt_error_parsing_nlri) @@ -103,10 +109,10 @@ def test_bgp_maximum_prefix_invalid(): return False return True + if _bgp_converge("r2"): + assert _bgp_parsing_nlri("r2") == True - if _bgp_converge('r2'): - assert _bgp_parsing_nlri('r2') == True -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_maximum_prefix_out/r1/bgpd.conf b/tests/topotests/bgp_maximum_prefix_out/r1/bgpd.conf index 9a68809631..03e3eb6e08 100644 --- a/tests/topotests/bgp_maximum_prefix_out/r1/bgpd.conf +++ b/tests/topotests/bgp_maximum_prefix_out/r1/bgpd.conf @@ -1,5 +1,6 @@ ! router bgp 65001 + no bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 65002 address-family ipv4 unicast redistribute connected diff --git a/tests/topotests/bgp_maximum_prefix_out/r2/bgpd.conf b/tests/topotests/bgp_maximum_prefix_out/r2/bgpd.conf index 1659c4bec4..2d47b2f661 100644 --- a/tests/topotests/bgp_maximum_prefix_out/r2/bgpd.conf +++ b/tests/topotests/bgp_maximum_prefix_out/r2/bgpd.conf @@ -1,5 +1,6 @@ ! router bgp 65002 + no bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 65001 exit-address-family ! diff --git a/tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py b/tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py index d77aa5aff2..708684f696 100644 --- a/tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py +++ b/tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py @@ -35,7 +35,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -43,16 +43,18 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -62,38 +64,34 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_maximum_prefix_out(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r2'] + router = tgen.gears["r2"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.2 json")) expected = { - '192.168.255.2': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 2 - } - } + "192.168.255.2": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}}, } } return topotest.json_cmp(output, expected) @@ -103,6 +101,7 @@ def test_bgp_maximum_prefix_out(): assert result is None, 'Failed bgp convergence in "{}"'.format(router) -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_multiview_topo1/README.md b/tests/topotests/bgp_multiview_topo1/README.md index 2a2747344a..c1a1445894 100644 --- a/tests/topotests/bgp_multiview_topo1/README.md +++ b/tests/topotests/bgp_multiview_topo1/README.md @@ -1,4 +1,4 @@ -# Simple FreeRangeRouting Route-Server Test +# Simple FRRouting Route-Server Test ## Topology +----------+ +----------+ +----------+ +----------+ +----------+ diff --git a/tests/topotests/bgp_multiview_topo1/peer1/exa-receive.py b/tests/topotests/bgp_multiview_topo1/peer1/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_multiview_topo1/peer1/exa-receive.py +++ b/tests/topotests/bgp_multiview_topo1/peer1/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_multiview_topo1/peer1/exa-send.py b/tests/topotests/bgp_multiview_topo1/peer1/exa-send.py index 2de2bce40a..505b08d6aa 100755 --- a/tests/topotests/bgp_multiview_topo1/peer1/exa-send.py +++ b/tests/topotests/bgp_multiview_topo1/peer1/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -16,13 +16,16 @@ numRoutes = int(argv[2]) # Announce numRoutes different routes per PE for i in range(0, numRoutes): - stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer)) + stdout.write( + "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n" + % ((peer + 100), i, peer, peer) + ) stdout.flush() # Announce 1 overlapping route per peer -stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer) +stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) diff --git a/tests/topotests/bgp_multiview_topo1/peer2/exa-receive.py b/tests/topotests/bgp_multiview_topo1/peer2/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_multiview_topo1/peer2/exa-receive.py +++ b/tests/topotests/bgp_multiview_topo1/peer2/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_multiview_topo1/peer2/exa-send.py b/tests/topotests/bgp_multiview_topo1/peer2/exa-send.py index 2de2bce40a..505b08d6aa 100755 --- a/tests/topotests/bgp_multiview_topo1/peer2/exa-send.py +++ b/tests/topotests/bgp_multiview_topo1/peer2/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -16,13 +16,16 @@ numRoutes = int(argv[2]) # Announce numRoutes different routes per PE for i in range(0, numRoutes): - stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer)) + stdout.write( + "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n" + % ((peer + 100), i, peer, peer) + ) stdout.flush() # Announce 1 overlapping route per peer -stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer) +stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) diff --git a/tests/topotests/bgp_multiview_topo1/peer3/exa-receive.py b/tests/topotests/bgp_multiview_topo1/peer3/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_multiview_topo1/peer3/exa-receive.py +++ b/tests/topotests/bgp_multiview_topo1/peer3/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_multiview_topo1/peer3/exa-send.py b/tests/topotests/bgp_multiview_topo1/peer3/exa-send.py index 2de2bce40a..505b08d6aa 100755 --- a/tests/topotests/bgp_multiview_topo1/peer3/exa-send.py +++ b/tests/topotests/bgp_multiview_topo1/peer3/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -16,13 +16,16 @@ numRoutes = int(argv[2]) # Announce numRoutes different routes per PE for i in range(0, numRoutes): - stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer)) + stdout.write( + "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n" + % ((peer + 100), i, peer, peer) + ) stdout.flush() # Announce 1 overlapping route per peer -stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer) +stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) diff --git a/tests/topotests/bgp_multiview_topo1/peer4/exa-receive.py b/tests/topotests/bgp_multiview_topo1/peer4/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_multiview_topo1/peer4/exa-receive.py +++ b/tests/topotests/bgp_multiview_topo1/peer4/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_multiview_topo1/peer4/exa-send.py b/tests/topotests/bgp_multiview_topo1/peer4/exa-send.py index 2de2bce40a..505b08d6aa 100755 --- a/tests/topotests/bgp_multiview_topo1/peer4/exa-send.py +++ b/tests/topotests/bgp_multiview_topo1/peer4/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -16,13 +16,16 @@ numRoutes = int(argv[2]) # Announce numRoutes different routes per PE for i in range(0, numRoutes): - stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer)) + stdout.write( + "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n" + % ((peer + 100), i, peer, peer) + ) stdout.flush() # Announce 1 overlapping route per peer -stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer) +stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) diff --git a/tests/topotests/bgp_multiview_topo1/peer5/exa-receive.py b/tests/topotests/bgp_multiview_topo1/peer5/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_multiview_topo1/peer5/exa-receive.py +++ b/tests/topotests/bgp_multiview_topo1/peer5/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_multiview_topo1/peer5/exa-send.py b/tests/topotests/bgp_multiview_topo1/peer5/exa-send.py index 2de2bce40a..505b08d6aa 100755 --- a/tests/topotests/bgp_multiview_topo1/peer5/exa-send.py +++ b/tests/topotests/bgp_multiview_topo1/peer5/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -16,13 +16,16 @@ numRoutes = int(argv[2]) # Announce numRoutes different routes per PE for i in range(0, numRoutes): - stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer)) + stdout.write( + "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n" + % ((peer + 100), i, peer, peer) + ) stdout.flush() # Announce 1 overlapping route per peer -stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer) +stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) diff --git a/tests/topotests/bgp_multiview_topo1/peer6/exa-receive.py b/tests/topotests/bgp_multiview_topo1/peer6/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_multiview_topo1/peer6/exa-receive.py +++ b/tests/topotests/bgp_multiview_topo1/peer6/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_multiview_topo1/peer6/exa-send.py b/tests/topotests/bgp_multiview_topo1/peer6/exa-send.py index 2de2bce40a..505b08d6aa 100755 --- a/tests/topotests/bgp_multiview_topo1/peer6/exa-send.py +++ b/tests/topotests/bgp_multiview_topo1/peer6/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -16,13 +16,16 @@ numRoutes = int(argv[2]) # Announce numRoutes different routes per PE for i in range(0, numRoutes): - stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer)) + stdout.write( + "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n" + % ((peer + 100), i, peer, peer) + ) stdout.flush() # Announce 1 overlapping route per peer -stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer) +stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) diff --git a/tests/topotests/bgp_multiview_topo1/peer7/exa-receive.py b/tests/topotests/bgp_multiview_topo1/peer7/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_multiview_topo1/peer7/exa-receive.py +++ b/tests/topotests/bgp_multiview_topo1/peer7/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_multiview_topo1/peer7/exa-send.py b/tests/topotests/bgp_multiview_topo1/peer7/exa-send.py index 2de2bce40a..505b08d6aa 100755 --- a/tests/topotests/bgp_multiview_topo1/peer7/exa-send.py +++ b/tests/topotests/bgp_multiview_topo1/peer7/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -16,13 +16,16 @@ numRoutes = int(argv[2]) # Announce numRoutes different routes per PE for i in range(0, numRoutes): - stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer)) + stdout.write( + "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n" + % ((peer + 100), i, peer, peer) + ) stdout.flush() # Announce 1 overlapping route per peer -stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer) +stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) diff --git a/tests/topotests/bgp_multiview_topo1/peer8/exa-receive.py b/tests/topotests/bgp_multiview_topo1/peer8/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_multiview_topo1/peer8/exa-receive.py +++ b/tests/topotests/bgp_multiview_topo1/peer8/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_multiview_topo1/peer8/exa-send.py b/tests/topotests/bgp_multiview_topo1/peer8/exa-send.py index 2de2bce40a..505b08d6aa 100755 --- a/tests/topotests/bgp_multiview_topo1/peer8/exa-send.py +++ b/tests/topotests/bgp_multiview_topo1/peer8/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -16,13 +16,16 @@ numRoutes = int(argv[2]) # Announce numRoutes different routes per PE for i in range(0, numRoutes): - stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer)) + stdout.write( + "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n" + % ((peer + 100), i, peer, peer) + ) stdout.flush() # Announce 1 overlapping route per peer -stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer) +stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) diff --git a/tests/topotests/bgp_multiview_topo1/r1/bgpd.conf b/tests/topotests/bgp_multiview_topo1/r1/bgpd.conf index 71397a9942..5f65a54d7f 100644 --- a/tests/topotests/bgp_multiview_topo1/r1/bgpd.conf +++ b/tests/topotests/bgp_multiview_topo1/r1/bgpd.conf @@ -13,6 +13,7 @@ log file bgpd.log ! router bgp 100 view 1 bgp router-id 172.30.1.1 + no bgp ebgp-requires-policy network 172.20.0.0/28 route-map local1 timers bgp 60 180 neighbor 172.16.1.1 remote-as 65001 @@ -21,6 +22,7 @@ router bgp 100 view 1 ! router bgp 100 view 2 bgp router-id 172.30.1.1 + no bgp ebgp-requires-policy network 172.20.0.0/28 route-map local2 timers bgp 60 180 neighbor 172.16.1.3 remote-as 65003 @@ -28,6 +30,7 @@ router bgp 100 view 2 ! router bgp 100 view 3 bgp router-id 172.30.1.1 + no bgp ebgp-requires-policy network 172.20.0.0/28 timers bgp 60 180 neighbor 172.16.1.6 remote-as 65006 diff --git a/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py b/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py index c851567dda..c342b17dd2 100755 --- a/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py +++ b/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py @@ -60,7 +60,7 @@ test_bgp_multiview_topo1.py: Simple Quagga/FRR Route-Server Test ~~ 172.20.0.1/28 ~~ attributes (using route-map) ~~ Stub Switch ~~ ~~~~~~~~~~~~~ -""" +""" import os import re @@ -90,37 +90,39 @@ fatal_error = "" ## ##################################################### + class NetworkTopo(Topo): "BGP Multiview Topology 1" def build(self, **_opts): - exabgpPrivateDirs = ['/etc/exabgp', - '/var/run/exabgp', - '/var/log'] + exabgpPrivateDirs = ["/etc/exabgp", "/var/run/exabgp", "/var/log"] # Setup Routers router = {} for i in range(1, 2): - router[i] = topotest.addRouter(self, 'r%s' % i) + router[i] = topotest.addRouter(self, "r%s" % i) # Setup Provider BGP peers peer = {} for i in range(1, 9): - peer[i] = self.addHost('peer%s' % i, ip='172.16.1.%s/24' % i, - defaultRoute='via 172.16.1.254', - privateDirs=exabgpPrivateDirs) + peer[i] = self.addHost( + "peer%s" % i, + ip="172.16.1.%s/24" % i, + defaultRoute="via 172.16.1.254", + privateDirs=exabgpPrivateDirs, + ) # Setup Switches switch = {} # First switch is for a dummy interface (for local network) - switch[0] = self.addSwitch('sw0', cls=topotest.LegacySwitch) - self.addLink(switch[0], router[1], intfName2='r1-stub') + switch[0] = self.addSwitch("sw0", cls=topotest.LegacySwitch) + self.addLink(switch[0], router[1], intfName2="r1-stub") # Second switch is for connection to all peering routers - switch[1] = self.addSwitch('sw1', cls=topotest.LegacySwitch) - self.addLink(switch[1], router[1], intfName2='r1-eth0') + switch[1] = self.addSwitch("sw1", cls=topotest.LegacySwitch) + self.addLink(switch[1], router[1], intfName2="r1-eth0") for j in range(1, 9): - self.addLink(switch[1], peer[j], intfName2='peer%s-eth0' % j) + self.addLink(switch[1], peer[j], intfName2="peer%s-eth0" % j) ##################################################### @@ -129,6 +131,7 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(module): global topo, net @@ -136,7 +139,7 @@ def setup_module(module): print("******************************************\n") print("Cleanup old Mininet runs") - os.system('sudo mn -c > /dev/null 2>&1') + os.system("sudo mn -c > /dev/null 2>&1") thisDir = os.path.dirname(os.path.realpath(__file__)) topo = NetworkTopo() @@ -146,25 +149,26 @@ def setup_module(module): # Starting Routers for i in range(1, 2): - net['r%s' % i].loadConf('zebra', '%s/r%s/zebra.conf' % (thisDir, i)) - net['r%s' % i].loadConf('bgpd', '%s/r%s/bgpd.conf' % (thisDir, i)) - net['r%s' % i].startRouter() + net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i)) + net["r%s" % i].loadConf("bgpd", "%s/r%s/bgpd.conf" % (thisDir, i)) + net["r%s" % i].startRouter() # Starting PE Hosts and init ExaBGP on each of them - print('*** Starting BGP on all 8 Peers') + print("*** Starting BGP on all 8 Peers") for i in range(1, 9): - net['peer%s' % i].cmd('cp %s/exabgp.env /etc/exabgp/exabgp.env' % thisDir) - net['peer%s' % i].cmd('cp %s/peer%s/* /etc/exabgp/' % (thisDir, i)) - net['peer%s' % i].cmd('chmod 644 /etc/exabgp/*') - net['peer%s' % i].cmd('chmod 755 /etc/exabgp/*.py') - net['peer%s' % i].cmd('chown -R exabgp:exabgp /etc/exabgp') - net['peer%s' % i].cmd('exabgp -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg') - print('peer%s' % i), - print('') + net["peer%s" % i].cmd("cp %s/exabgp.env /etc/exabgp/exabgp.env" % thisDir) + net["peer%s" % i].cmd("cp %s/peer%s/* /etc/exabgp/" % (thisDir, i)) + net["peer%s" % i].cmd("chmod 644 /etc/exabgp/*") + net["peer%s" % i].cmd("chmod 755 /etc/exabgp/*.py") + net["peer%s" % i].cmd("chown -R exabgp:exabgp /etc/exabgp") + net["peer%s" % i].cmd("exabgp -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg") + print("peer%s" % i), + print("") # For debugging after starting Quagga/FRR daemons, uncomment the next line # CLI(net) + def teardown_module(module): global net @@ -172,20 +176,21 @@ def teardown_module(module): print("******************************************\n") # Shutdown - clean up everything - print('*** Killing BGP on Peer routers') + print("*** Killing BGP on Peer routers") # Killing ExaBGP for i in range(1, 9): - net['peer%s' % i].cmd('kill `cat /var/run/exabgp/exabgp.pid`') + net["peer%s" % i].cmd("kill `cat /var/run/exabgp/exabgp.pid`") # End - Shutdown network net.stop() + def test_router_running(): global fatal_error global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) print("\n\n** Check if FRR/Quagga is running on each Router node") @@ -193,7 +198,7 @@ def test_router_running(): # Starting Routers for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -207,7 +212,7 @@ def test_bgp_converge(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) # Wait for BGP to converge (All Neighbors in either Full or TwoWay State) @@ -220,9 +225,12 @@ def test_bgp_converge(): # Look for any node not yet converged for i in range(1, 2): for view in range(1, 4): - notConverged = net['r%s' % i].cmd('vtysh -c "show ip bgp view %s summary" 2> /dev/null | grep ^[0-9] | grep -v " 11$"' % view) + notConverged = net["r%s" % i].cmd( + 'vtysh -c "show ip bgp view %s summary" 2> /dev/null | grep ^[0-9] | grep -v " 11$"' + % view + ) if notConverged: - print('Waiting for r%s, view %s' % (i, view)) + print("Waiting for r%s, view %s" % (i, view)) sys.stdout.flush() break if notConverged: @@ -231,17 +239,17 @@ def test_bgp_converge(): sleep(5) timeout -= 5 else: - print('Done') + print("Done") break else: # Bail out with error if a router fails to converge - bgpStatus = net['r%s' % i].cmd('vtysh -c "show ip bgp view %s summary"' % view) + bgpStatus = net["r%s" % i].cmd('vtysh -c "show ip bgp view %s summary"' % view) assert False, "BGP did not converge:\n%s" % bgpStatus # Wait for an extra 5s to announce all routes - print('Waiting 5s for routes to be announced'); + print("Waiting 5s for routes to be announced") sleep(5) - + print("BGP converged.") # if timeout < 60: @@ -251,18 +259,19 @@ def test_bgp_converge(): # Make sure that all daemons are running for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting Quagga/FRR daemons, uncomment the next line # CLI(net) + def test_bgp_routingTable(): global fatal_error global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -274,56 +283,68 @@ def test_bgp_routingTable(): for view in range(1, 4): success = 0 # This glob pattern should work as long as number of views < 10 - for refTableFile in (glob.glob( - '%s/r%s/show_ip_bgp_view_%s*.ref' % (thisDir, i, view))): + for refTableFile in glob.glob( + "%s/r%s/show_ip_bgp_view_%s*.ref" % (thisDir, i, view) + ): if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ip bgp view %s" 2> /dev/null' % view).rstrip() - + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show ip bgp view %s" 2> /dev/null' % view) + .rstrip() + ) + # Fix inconsitent spaces between 0.99.24 and newer versions of Quagga... - actual = re.sub('0 0', '0 0', actual) - actual = re.sub(r'([0-9]) 32768', r'\1 32768', actual) + actual = re.sub("0 0", "0 0", actual) + actual = re.sub( + r"([0-9]) 32768", r"\1 32768", actual + ) # Remove summary line (changed recently) - actual = re.sub(r'Total number.*', '', actual) - actual = re.sub(r'Displayed.*', '', actual) + actual = re.sub(r"Total number.*", "", actual) + actual = re.sub(r"Displayed.*", "", actual) actual = actual.rstrip() # Fix table version (ignore it) - actual = re.sub(r'(BGP table version is )[0-9]+', r'\1XXX', actual) + actual = re.sub(r"(BGP table version is )[0-9]+", r"\1XXX", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual BGP routing table", - title2="expected BGP routing table") + title2="expected BGP routing table", + ) if diff: diffresult[refTableFile] = diff else: success = 1 print("template %s matched: r%s ok" % (refTableFile, i)) - break; + break if not success: - resultstr = 'No template matched.\n' + resultstr = "No template matched.\n" for f in diffresult.iterkeys(): resultstr += ( - 'template %s: r%s failed Routing Table Check for view %s:\n%s\n' - % (f, i, view, diffresult[f])) + "template %s: r%s failed Routing Table Check for view %s:\n%s\n" + % (f, i, view, diffresult[f]) + ) raise AssertionError( - "Routing Table verification failed for router r%s, view %s:\n%s" % (i, view, resultstr)) - + "Routing Table verification failed for router r%s, view %s:\n%s" + % (i, view, resultstr) + ) # Make sure that all daemons are running for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -335,24 +356,26 @@ def test_shutdown_check_stderr(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - print("SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n") - pytest.skip('Skipping test for Stderr output') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + print( + "SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n" + ) + pytest.skip("Skipping test for Stderr output") thisDir = os.path.dirname(os.path.realpath(__file__)) print("\n\n** Verifying unexpected STDERR output from daemons") print("******************************************\n") - net['r1'].stopRouter() + net["r1"].stopRouter() - log = net['r1'].getStdErr('bgpd') + log = net["r1"].getStdErr("bgpd") if log: print("\nBGPd StdErr Log:\n" + log) - log = net['r1'].getStdErr('zebra') + log = net["r1"].getStdErr("zebra") if log: print("\nZebra StdErr Log:\n" + log) @@ -362,22 +385,26 @@ def test_shutdown_check_memleak(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) - if os.environ.get('TOPOTESTS_CHECK_MEMLEAK') is None: - print("SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n") - pytest.skip('Skipping test for memory leaks') - + if os.environ.get("TOPOTESTS_CHECK_MEMLEAK") is None: + print( + "SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n" + ) + pytest.skip("Skipping test for memory leaks") + thisDir = os.path.dirname(os.path.realpath(__file__)) - net['r1'].stopRouter() - net['r1'].report_memory_leaks(os.environ.get('TOPOTESTS_CHECK_MEMLEAK'), os.path.basename(__file__)) + net["r1"].stopRouter() + net["r1"].report_memory_leaks( + os.environ.get("TOPOTESTS_CHECK_MEMLEAK"), os.path.basename(__file__) + ) -if __name__ == '__main__': +if __name__ == "__main__": - setLogLevel('info') + setLogLevel("info") # To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli # retval = pytest.main(["-s", "--tb=no"]) retval = pytest.main(["-s"]) diff --git a/tests/topotests/bgp_prefix_sid/peer2/exa-receive.py b/tests/topotests/bgp_prefix_sid/peer2/exa-receive.py index eaa6a67872..f1ec9fa5ba 100755 --- a/tests/topotests/bgp_prefix_sid/peer2/exa-receive.py +++ b/tests/topotests/bgp_prefix_sid/peer2/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,7 +13,7 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: diff --git a/tests/topotests/bgp_prefix_sid/r1/bgpd.conf b/tests/topotests/bgp_prefix_sid/r1/bgpd.conf index 7a38cc307f..2f8759f960 100644 --- a/tests/topotests/bgp_prefix_sid/r1/bgpd.conf +++ b/tests/topotests/bgp_prefix_sid/r1/bgpd.conf @@ -5,6 +5,7 @@ log commands router bgp 1 bgp router-id 10.0.0.1 no bgp default ipv4-unicast + no bgp ebgp-requires-policy neighbor 10.0.0.101 remote-as 2 neighbor 10.0.0.102 remote-as 3 ! diff --git a/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py b/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py index dc203cabc5..3a6aefe7ee 100755 --- a/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py +++ b/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py @@ -33,7 +33,7 @@ import functools import pytest CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -45,13 +45,17 @@ from mininet.topo import Topo class TemplateTopo(Topo): def build(self, **_opts): tgen = get_topogen(self) - router = tgen.add_router('r1') - switch = tgen.add_switch('s1') + router = tgen.add_router("r1") + switch = tgen.add_switch("s1") switch.add_link(router) - switch = tgen.gears['s1'] - peer1 = tgen.add_exabgp_peer('peer1', ip='10.0.0.101', defaultRoute='via 10.0.0.1') - peer2 = tgen.add_exabgp_peer('peer2', ip='10.0.0.102', defaultRoute='via 10.0.0.1') + switch = tgen.gears["s1"] + peer1 = tgen.add_exabgp_peer( + "peer1", ip="10.0.0.101", defaultRoute="via 10.0.0.1" + ) + peer2 = tgen.add_exabgp_peer( + "peer2", ip="10.0.0.102", defaultRoute="via 10.0.0.1" + ) switch.add_link(peer1) switch.add_link(peer2) @@ -60,17 +64,21 @@ def setup_module(module): tgen = Topogen(TemplateTopo, module.__name__) tgen.start_topology() - router = tgen.gears['r1'] - router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format('r1'))) - router.load_config(TopoRouter.RD_BGP, os.path.join(CWD, '{}/bgpd.conf'.format('r1'))) + router = tgen.gears["r1"] + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format("r1")) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format("r1")) + ) router.start() - logger.info('starting exaBGP on peer1') + logger.info("starting exaBGP on peer1") peer_list = tgen.exabgp_peers() for pname, peer in peer_list.iteritems(): peer_dir = os.path.join(CWD, pname) - env_file = os.path.join(CWD, 'exabgp.env') - logger.info('Running ExaBGP peer') + env_file = os.path.join(CWD, "exabgp.env") + logger.info("Running ExaBGP peer") peer.start(peer_dir, env_file) logger.info(pname) @@ -82,45 +90,45 @@ def teardown_module(module): def test_r1_receive_and_advertise_prefix_sid_type1(): tgen = get_topogen() - router = tgen.gears['r1'] + router = tgen.gears["r1"] def _check_type1_r1(router, prefix, remoteLabel, labelIndex): - output = router.vtysh_cmd('show bgp ipv4 labeled-unicast {} json'.format(prefix)) + output = router.vtysh_cmd( + "show bgp ipv4 labeled-unicast {} json".format(prefix) + ) output = json.loads(output) expected = { - 'prefix': prefix, - 'advertisedTo': { '10.0.0.101':{}, '10.0.0.102':{} }, - 'paths': [{ - 'valid':True, - 'remoteLabel': remoteLabel, - 'labelIndex': labelIndex, - }] + "prefix": prefix, + "advertisedTo": {"10.0.0.101": {}, "10.0.0.102": {}}, + "paths": [ + {"valid": True, "remoteLabel": remoteLabel, "labelIndex": labelIndex,} + ], } return topotest.json_cmp(output, expected) - test_func = functools.partial(_check_type1_r1, router, '3.0.0.1/32', 800001, 1) + test_func = functools.partial(_check_type1_r1, router, "3.0.0.1/32", 800001, 1) success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) assert result is None, 'Failed _check_type1_r1 in "{}"'.format(router) - test_func = functools.partial(_check_type1_r1, router, '3.0.0.2/32', 800002, 2) + test_func = functools.partial(_check_type1_r1, router, "3.0.0.2/32", 800002, 2) success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) assert result is None, 'Failed _check_type1_r1 in "{}"'.format(router) def exabgp_get_update_prefix(filename, afi, nexthop, prefix): - with open('/tmp/peer2-received.log') as f: + with open("/tmp/peer2-received.log") as f: for line in f.readlines(): output = json.loads(line) - ret = output.get('neighbor') + ret = output.get("neighbor") if ret is None: continue - ret = ret.get('message') + ret = ret.get("message") if ret is None: continue - ret = ret.get('update') + ret = ret.get("update") if ret is None: continue - ret = ret.get('announce') + ret = ret.get("announce") if ret is None: continue ret = ret.get(afi) @@ -138,36 +146,40 @@ def exabgp_get_update_prefix(filename, afi, nexthop, prefix): def test_peer2_receive_prefix_sid_type1(): tgen = get_topogen() - peer2 = tgen.gears['peer2'] + peer2 = tgen.gears["peer2"] def _check_type1_peer2(prefix, labelindex): - output = exabgp_get_update_prefix('/tmp/peer2-received.log', 'ipv4 nlri-mpls', '10.0.0.101', prefix) + output = exabgp_get_update_prefix( + "/tmp/peer2-received.log", "ipv4 nlri-mpls", "10.0.0.101", prefix + ) expected = { - 'type': 'update', - 'neighbor': { - 'ip': '10.0.0.1', - 'message': { - 'update': { - 'attribute': { - 'attribute-0x28-0xE0': '0x010007000000{:08x}'.format(labelindex) + "type": "update", + "neighbor": { + "ip": "10.0.0.1", + "message": { + "update": { + "attribute": { + "attribute-0x28-0xE0": "0x010007000000{:08x}".format( + labelindex + ) }, - 'announce': { 'ipv4 nlri-mpls': { '10.0.0.101': {} } } + "announce": {"ipv4 nlri-mpls": {"10.0.0.101": {}}}, } - } - } + }, + }, } return topotest.json_cmp(output, expected) - test_func = functools.partial(_check_type1_peer2, '3.0.0.1/32', labelindex=1) + test_func = functools.partial(_check_type1_peer2, "3.0.0.1/32", labelindex=1) success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) - assert result is None, 'Failed _check_type1_peer2 in "{}"'.format('peer2') + assert result is None, 'Failed _check_type1_peer2 in "{}"'.format("peer2") - test_func = functools.partial(_check_type1_peer2, '3.0.0.2/32', labelindex=2) + test_func = functools.partial(_check_type1_peer2, "3.0.0.2/32", labelindex=2) success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) - assert result is None, 'Failed _check_type1_peer2 in "{}"'.format('peer2') + assert result is None, 'Failed _check_type1_peer2 in "{}"'.format("peer2") -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] ret = pytest.main(args) sys.exit(ret) diff --git a/tests/topotests/bgp_reject_as_sets/r1/bgpd.conf b/tests/topotests/bgp_reject_as_sets/r1/bgpd.conf index 7b24c4bbf9..94bfc5e561 100644 --- a/tests/topotests/bgp_reject_as_sets/r1/bgpd.conf +++ b/tests/topotests/bgp_reject_as_sets/r1/bgpd.conf @@ -1,5 +1,6 @@ ! exit1 router bgp 65001 + no bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 65002 address-family ipv4 unicast redistribute connected diff --git a/tests/topotests/bgp_reject_as_sets/r2/bgpd.conf b/tests/topotests/bgp_reject_as_sets/r2/bgpd.conf index c991b5bcd8..f217b7f794 100644 --- a/tests/topotests/bgp_reject_as_sets/r2/bgpd.conf +++ b/tests/topotests/bgp_reject_as_sets/r2/bgpd.conf @@ -1,6 +1,7 @@ ! spine router bgp 65002 bgp reject-as-sets + no bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 65001 neighbor 192.168.254.2 remote-as 65003 address-family ipv4 unicast diff --git a/tests/topotests/bgp_reject_as_sets/r3/bgpd.conf b/tests/topotests/bgp_reject_as_sets/r3/bgpd.conf index bee518c84b..8d085a0e4b 100644 --- a/tests/topotests/bgp_reject_as_sets/r3/bgpd.conf +++ b/tests/topotests/bgp_reject_as_sets/r3/bgpd.conf @@ -1,5 +1,6 @@ ! exit2 router bgp 65003 + no bgp ebgp-requires-policy neighbor 192.168.254.1 remote-as 65002 address-family ipv4 unicast neighbor 192.168.254.1 allowas-in diff --git a/tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py b/tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py index f307edc678..b49a57b308 100644 --- a/tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py +++ b/tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py @@ -42,7 +42,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -50,20 +50,22 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 4): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -73,38 +75,34 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_reject_as_sets(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r2'] + router = tgen.gears["r2"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.2 json")) expected = { - '192.168.255.2': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 2 - } - } + "192.168.255.2": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}}, } } return topotest.json_cmp(output, expected) @@ -112,34 +110,23 @@ def test_bgp_reject_as_sets(): def _bgp_has_aggregated_route_with_stripped_as_set(router): output = json.loads(router.vtysh_cmd("show ip bgp 172.16.0.0/16 json")) expected = { - 'paths': [ - { - 'aspath': { - 'string': 'Local', - 'segments': [ - ], - 'length': 0 - } - } - ] + "paths": [{"aspath": {"string": "Local", "segments": [], "length": 0}}] } return topotest.json_cmp(output, expected) def _bgp_announce_route_without_as_sets(router): - output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.254.2 advertised-routes json")) + output = json.loads( + router.vtysh_cmd( + "show ip bgp neighbor 192.168.254.2 advertised-routes json" + ) + ) expected = { - 'advertisedRoutes': { - '172.16.0.0/16': { - 'asPath': '' - }, - '192.168.254.0/30': { - 'asPath': '65003' - }, - '192.168.255.0/30': { - 'asPath': '65001' - } + "advertisedRoutes": { + "172.16.0.0/16": {"path": ""}, + "192.168.254.0/30": {"path": "65003"}, + "192.168.255.0/30": {"path": "65001"}, }, - 'totalPrefixCounter': 3 + "totalPrefixCounter": 3, } return topotest.json_cmp(output, expected) @@ -148,7 +135,9 @@ def test_bgp_reject_as_sets(): assert result is None, 'Failed bgp convergence in "{}"'.format(router) - test_func = functools.partial(_bgp_has_aggregated_route_with_stripped_as_set, router) + test_func = functools.partial( + _bgp_has_aggregated_route_with_stripped_as_set, router + ) success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert result is None, 'Failed to see an aggregated route in "{}"'.format(router) @@ -156,8 +145,11 @@ def test_bgp_reject_as_sets(): test_func = functools.partial(_bgp_announce_route_without_as_sets, router) success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert result is None, 'Route 172.16.0.0/16 should be sent without AS_SET to r3 "{}"'.format(router) + assert ( + result is None + ), 'Route 172.16.0.0/16 should be sent without AS_SET to r3 "{}"'.format(router) + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_rfapi_basic_sanity/customize.py b/tests/topotests/bgp_rfapi_basic_sanity/customize.py index a125c6582f..ea548a7337 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/customize.py +++ b/tests/topotests/bgp_rfapi_basic_sanity/customize.py @@ -75,12 +75,15 @@ from lib.ltemplate import ltemplateRtrCmd from mininet.topo import Topo import shutil + CWD = os.path.dirname(os.path.realpath(__file__)) # test name based on directory TEST = os.path.basename(CWD) + class ThisTestTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -89,36 +92,37 @@ class ThisTestTopo(Topo): # between routers, switches and hosts. # # Create P/PE routers - tgen.add_router('r1') + tgen.add_router("r1") for routern in range(2, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # Create a switch with just one router connected to it to simulate a # empty network. switch = {} - switch[0] = tgen.add_switch('sw0') - switch[0].add_link(tgen.gears['r1'], nodeif='r1-eth0') - switch[0].add_link(tgen.gears['r2'], nodeif='r2-eth0') + switch[0] = tgen.add_switch("sw0") + switch[0].add_link(tgen.gears["r1"], nodeif="r1-eth0") + switch[0].add_link(tgen.gears["r2"], nodeif="r2-eth0") - switch[1] = tgen.add_switch('sw1') - switch[1].add_link(tgen.gears['r2'], nodeif='r2-eth1') - switch[1].add_link(tgen.gears['r3'], nodeif='r3-eth0') - switch[1].add_link(tgen.gears['r4'], nodeif='r4-eth0') + switch[1] = tgen.add_switch("sw1") + switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth1") + switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth0") + switch[1].add_link(tgen.gears["r4"], nodeif="r4-eth0") + + switch[2] = tgen.add_switch("sw2") + switch[2].add_link(tgen.gears["r2"], nodeif="r2-eth2") + switch[2].add_link(tgen.gears["r3"], nodeif="r3-eth1") - switch[2] = tgen.add_switch('sw2') - switch[2].add_link(tgen.gears['r2'], nodeif='r2-eth2') - switch[2].add_link(tgen.gears['r3'], nodeif='r3-eth1') def ltemplatePreRouterStartHook(): cc = ltemplateRtrCmd() tgen = get_topogen() - logger.info('pre router-start hook') - #check for normal init + logger.info("pre router-start hook") + # check for normal init if len(tgen.net) == 1: - logger.info('Topology not configured, skipping setup') + logger.info("Topology not configured, skipping setup") return False return True + def ltemplatePostRouterStartHook(): - logger.info('post router-start hook') + logger.info("post router-start hook") return True - diff --git a/tests/topotests/bgp_rfapi_basic_sanity/r1/bgpd.conf b/tests/topotests/bgp_rfapi_basic_sanity/r1/bgpd.conf index 05eac758f1..b3fe5ff23d 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/r1/bgpd.conf +++ b/tests/topotests/bgp_rfapi_basic_sanity/r1/bgpd.conf @@ -8,6 +8,7 @@ log commands router bgp 5226 bgp router-id 1.1.1.1 bgp cluster-id 1.1.1.1 + no bgp ebgp-requires-policy neighbor 2.2.2.2 remote-as 5226 neighbor 2.2.2.2 update-source 1.1.1.1 ! diff --git a/tests/topotests/bgp_rfapi_basic_sanity/r2/bgpd.conf b/tests/topotests/bgp_rfapi_basic_sanity/r2/bgpd.conf index 241c2ac0ae..524051426b 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/r2/bgpd.conf +++ b/tests/topotests/bgp_rfapi_basic_sanity/r2/bgpd.conf @@ -8,6 +8,7 @@ log commands router bgp 5226 bgp router-id 2.2.2.2 bgp cluster-id 2.2.2.2 + no bgp ebgp-requires-policy neighbor 1.1.1.1 remote-as 5226 neighbor 1.1.1.1 update-source 2.2.2.2 neighbor 3.3.3.3 remote-as 5226 @@ -28,6 +29,3 @@ router bgp 5226 neighbor 4.4.4.4 route-reflector-client exit-address-family end - - - diff --git a/tests/topotests/bgp_rfapi_basic_sanity/r3/bgpd.conf b/tests/topotests/bgp_rfapi_basic_sanity/r3/bgpd.conf index 67b26e3a50..fbb6a65d61 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/r3/bgpd.conf +++ b/tests/topotests/bgp_rfapi_basic_sanity/r3/bgpd.conf @@ -8,6 +8,7 @@ log commands router bgp 5226 bgp router-id 3.3.3.3 bgp cluster-id 3.3.3.3 + no bgp ebgp-requires-policy neighbor 2.2.2.2 remote-as 5226 neighbor 2.2.2.2 update-source 3.3.3.3 ! @@ -45,6 +46,3 @@ router bgp 5226 exit-vnc ! end - - - diff --git a/tests/topotests/bgp_rfapi_basic_sanity/r4/bgpd.conf b/tests/topotests/bgp_rfapi_basic_sanity/r4/bgpd.conf index 2ba5c74e5b..d61f776f3d 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/r4/bgpd.conf +++ b/tests/topotests/bgp_rfapi_basic_sanity/r4/bgpd.conf @@ -8,6 +8,7 @@ log commands router bgp 5226 bgp router-id 4.4.4.4 bgp cluster-id 4.4.4.4 + no bgp ebgp-requires-policy neighbor 2.2.2.2 remote-as 5226 neighbor 2.2.2.2 update-source 4.4.4.4 ! @@ -46,6 +47,3 @@ router bgp 5226 exit-vnc ! end - - - diff --git a/tests/topotests/bgp_rfapi_basic_sanity/scripts/add_routes.py b/tests/topotests/bgp_rfapi_basic_sanity/scripts/add_routes.py index 4d6a7582ba..f4b4da55d2 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/scripts/add_routes.py +++ b/tests/topotests/bgp_rfapi_basic_sanity/scripts/add_routes.py @@ -1,36 +1,159 @@ from lutil import luCommand -holddownFactorSet = luCommand('r1','vtysh -c "show running"','rfp holddown-factor','none','Holddown factor set') + +holddownFactorSet = luCommand( + "r1", + 'vtysh -c "show running"', + "rfp holddown-factor", + "none", + "Holddown factor set", +) if not holddownFactorSet: to = "-1" cost = "" else: to = "6" cost = "cost 50" -luCommand('r1','vtysh -c "debug rfapi-dev open vn 10.0.0.1 un 1.1.1.1"','rfapi_set_response_cb: status 0', 'pass', 'Opened RFAPI') -luCommand('r1','vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 11.11.11.11"','rc=2', 'pass', 'Clean query') -luCommand('r1','vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 11.11.11.0/24 lifetime {}"'.format(to),'', 'none', 'Prefix registered') -luCommand('r1','vtysh -c "show vnc registrations local"','1 out of 1','wait','Local registration') -luCommand('r1','vtysh -c "debug rfapi-dev response-omit-self off"','.','none') -luCommand('r1','vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 11.11.11.11"','11.11.11.0/24', 'pass', 'Query self') +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev open vn 10.0.0.1 un 1.1.1.1"', + "rfapi_set_response_cb: status 0", + "pass", + "Opened RFAPI", +) +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 11.11.11.11"', + "rc=2", + "pass", + "Clean query", +) +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 11.11.11.0/24 lifetime {}"'.format( + to + ), + "", + "none", + "Prefix registered", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations local"', + "1 out of 1", + "wait", + "Local registration", +) +luCommand("r1", 'vtysh -c "debug rfapi-dev response-omit-self off"', ".", "none") +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 11.11.11.11"', + "11.11.11.0/24", + "pass", + "Query self", +) -luCommand('r3','vtysh -c "debug rfapi-dev open vn 10.0.0.2 un 2.2.2.2"','rfapi_set_response_cb: status 0', 'pass', 'Opened RFAPI') -luCommand('r3','vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 22.22.22.0/24 lifetime {}"'.format(to),'', 'none', 'Prefix registered') -luCommand('r3','vtysh -c "show vnc registrations local"','1 out of 1','wait','Local registration') -luCommand('r3','vtysh -c "debug rfapi-dev response-omit-self on"','.','none') -luCommand('r3','vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 22.22.22.22"','rc=2', 'pass', 'Self excluded') -luCommand('r3','vtysh -c "debug rfapi-dev open vn 10.0.1.2 un 2.1.1.2"','rfapi_set_response_cb: status 0', 'pass', 'Opened query only RFAPI') -luCommand('r3','vtysh -c "debug rfapi-dev query vn 10.0.1.2 un 2.1.1.2 target 22.22.22.22"','22.22.22.0/24', 'pass', 'See local') +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev open vn 10.0.0.2 un 2.2.2.2"', + "rfapi_set_response_cb: status 0", + "pass", + "Opened RFAPI", +) +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 22.22.22.0/24 lifetime {}"'.format( + to + ), + "", + "none", + "Prefix registered", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations local"', + "1 out of 1", + "wait", + "Local registration", +) +luCommand("r3", 'vtysh -c "debug rfapi-dev response-omit-self on"', ".", "none") +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 22.22.22.22"', + "rc=2", + "pass", + "Self excluded", +) +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev open vn 10.0.1.2 un 2.1.1.2"', + "rfapi_set_response_cb: status 0", + "pass", + "Opened query only RFAPI", +) +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev query vn 10.0.1.2 un 2.1.1.2 target 22.22.22.22"', + "22.22.22.0/24", + "pass", + "See local", +) -luCommand('r4','vtysh -c "debug rfapi-dev open vn 10.0.0.3 un 3.3.3.3"','rfapi_set_response_cb: status 0', 'pass', 'Opened RFAPI') -luCommand('r4','vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 33.33.33.0/24 lifetime {}"'.format(to),'', 'none', 'Prefix registered') -luCommand('r4','vtysh -c "show vnc registrations local"','1 out of 1','wait','Local registration') -luCommand('r4','vtysh -c "debug rfapi-dev response-omit-self off"','.','none') -luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 33.33.33.33"','33.33.33.0/24', 'pass', 'Query self') +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev open vn 10.0.0.3 un 3.3.3.3"', + "rfapi_set_response_cb: status 0", + "pass", + "Opened RFAPI", +) +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 33.33.33.0/24 lifetime {}"'.format( + to + ), + "", + "none", + "Prefix registered", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations local"', + "1 out of 1", + "wait", + "Local registration", +) +luCommand("r4", 'vtysh -c "debug rfapi-dev response-omit-self off"', ".", "none") +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 33.33.33.33"', + "33.33.33.0/24", + "pass", + "Query self", +) -luCommand('r4','vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 11.11.11.0/24 lifetime {} {}"'.format(to, cost),'', 'none', 'MP Prefix registered') -luCommand('r4','vtysh -c "show vnc registrations local"','2 out of 2','wait','Local registration') -luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 11.11.11.11"','11.11.11.0/24', 'pass', 'Query self MP') +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 11.11.11.0/24 lifetime {} {}"'.format( + to, cost + ), + "", + "none", + "MP Prefix registered", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations local"', + "2 out of 2", + "wait", + "Local registration", +) +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 11.11.11.11"', + "11.11.11.0/24", + "pass", + "Query self MP", +) -luCommand('r1','vtysh -c "show vnc registrations"','.','none') -luCommand('r3','vtysh -c "show vnc registrations"','.','none') -luCommand('r4','vtysh -c "show vnc registrations"','.','none') +luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none") diff --git a/tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py b/tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py index 6fbe4ff1c0..6ad3e735ee 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py +++ b/tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py @@ -1,10 +1,48 @@ -luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180) -luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180) -luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180) -luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180) -luCommand('r1','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping') -luCommand('r1','ping 4.4.4.4 -c 1',' 0. packet loss','wait','PE->PE4 (loopback) ping') -#luCommand('r4','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping') +luCommand( + "r1", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r3", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r4", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r2", + 'vtysh -c "show bgp summary"', + " 00:0.* 00:0.* 00:0", + "wait", + "Core adjacencies up", + 180, +) +luCommand( + "r1", + 'vtysh -c "show bgp vrf all summary"', + " 00:0", + "wait", + "All adjacencies up", + 180, +) +luCommand( + "r3", + 'vtysh -c "show bgp vrf all summary"', + " 00:0", + "wait", + "All adjacencies up", + 180, +) +luCommand( + "r4", + 'vtysh -c "show bgp vrf all summary"', + " 00:0", + "wait", + "All adjacencies up", + 180, +) +luCommand( + "r1", "ping 3.3.3.3 -c 1", " 0. packet loss", "wait", "PE->PE3 (loopback) ping" +) +luCommand( + "r1", "ping 4.4.4.4 -c 1", " 0. packet loss", "wait", "PE->PE4 (loopback) ping" +) +# luCommand('r4','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping') diff --git a/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_close.py b/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_close.py index 5fffce7ca0..9fdef84cdf 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_close.py +++ b/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_close.py @@ -1,19 +1,102 @@ from lutil import luCommand -holddownFactorSet = luCommand('r1','vtysh -c "show running"','rfp holddown-factor','none','Holddown factor set') + +holddownFactorSet = luCommand( + "r1", + 'vtysh -c "show running"', + "rfp holddown-factor", + "none", + "Holddown factor set", +) if not holddownFactorSet: to = "-1" else: to = "1" -luCommand('r1','vtysh -c "debug rfapi-dev open vn 20.0.0.1 un 1.1.1.21"','rfapi_set_response_cb: status 0', 'pass', 'Opened RFAPI') -luCommand('r1','vtysh -c "debug rfapi-dev register vn 20.0.0.1 un 1.1.1.21 prefix 111.111.111.0/24 lifetime {}"'.format(to),'', 'none', 'Prefix registered') -luCommand('r1','vtysh -c "show vnc registrations local"','111.111.111.0/24','wait','Local registration',1) -luCommand('r1','vtysh -c "show vnc registrations"','.','none') -luCommand('r3','vtysh -c "show vnc registrations"','111.111.111.0/24','wait','See registration') -luCommand('r4','vtysh -c "show vnc registrations"','111.111.111.0/24','wait','See registration') -luCommand('r1','vtysh -c "debug rfapi-dev close vn 20.0.0.1 un 1.1.1.21"','status 0', 'pass', 'Closed RFAPI') -luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3','wait','See cleanup') -luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3','wait','See cleanup') -luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 2','wait','See cleanup') -luCommand('r1','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown',20) -luCommand('r3','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown') -luCommand('r4','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown') +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev open vn 20.0.0.1 un 1.1.1.21"', + "rfapi_set_response_cb: status 0", + "pass", + "Opened RFAPI", +) +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev register vn 20.0.0.1 un 1.1.1.21 prefix 111.111.111.0/24 lifetime {}"'.format( + to + ), + "", + "none", + "Prefix registered", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations local"', + "111.111.111.0/24", + "wait", + "Local registration", + 1, +) +luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "111.111.111.0/24", + "wait", + "See registration", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "111.111.111.0/24", + "wait", + "See registration", +) +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev close vn 20.0.0.1 un 1.1.1.21"', + "status 0", + "pass", + "Closed RFAPI", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 1 .* Remotely: *Active: 3", + "wait", + "See cleanup", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 1 .* Remotely: *Active: 3", + "wait", + "See cleanup", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 2 .* Remotely: *Active: 2", + "wait", + "See cleanup", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 0", + "wait", + "Out of holddown", + 20, +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 0", + "wait", + "Out of holddown", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 0", + "wait", + "Out of holddown", +) diff --git a/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_routes.py b/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_routes.py index a380c79fcf..1caa827ce2 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_routes.py +++ b/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_routes.py @@ -1,19 +1,74 @@ from lutil import luCommand -luCommand('r1','vtysh -c "show bgp ipv4 vpn"','','none','VPN SAFI') -luCommand('r2','vtysh -c "show bgp ipv4 vpn"','','none','VPN SAFI') -luCommand('r3','vtysh -c "show bgp ipv4 vpn"','','none','VPN SAFI') -luCommand('r4','vtysh -c "show bgp ipv4 vpn"','','none','VPN SAFI') -luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3','wait','See all registrations') -luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3','wait','See all registrations') -luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 2','wait','See all registrations') -num = '4 routes and 4' -luCommand('r1','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI okay') -luCommand('r2','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI okay') -luCommand('r3','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI okay') -luCommand('r4','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI okay') -luCommand('r1','vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 22.22.22.22"','pfx=', 'pass', 'Query R2s info') -luCommand('r1','vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 33.33.33.33"','pfx=', 'pass', 'Query R4s info') -luCommand('r3','vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 11.11.11.11"','11.11.11.0/24.*11.11.11.0/24.*', 'pass', 'Query R1s+R4s info') -luCommand('r3','vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 33.33.33.33"','pfx=', 'pass', 'Query R4s info') -luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 11.11.11.11"','11.11.11.0/24.*11.11.11.0/24.*', 'pass', 'Query R1s+R4s info') -luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 22.22.22.22"','pfx=', 'pass', 'Query R2s info') + +luCommand("r1", 'vtysh -c "show bgp ipv4 vpn"', "", "none", "VPN SAFI") +luCommand("r2", 'vtysh -c "show bgp ipv4 vpn"', "", "none", "VPN SAFI") +luCommand("r3", 'vtysh -c "show bgp ipv4 vpn"', "", "none", "VPN SAFI") +luCommand("r4", 'vtysh -c "show bgp ipv4 vpn"', "", "none", "VPN SAFI") +luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 1 .* Remotely: *Active: 3", + "wait", + "See all registrations", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 1 .* Remotely: *Active: 3", + "wait", + "See all registrations", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 2 .* Remotely: *Active: 2", + "wait", + "See all registrations", +) +num = "4 routes and 4" +luCommand("r1", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI okay") +luCommand("r2", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI okay") +luCommand("r3", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI okay") +luCommand("r4", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI okay") +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 22.22.22.22"', + "pfx=", + "pass", + "Query R2s info", +) +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 33.33.33.33"', + "pfx=", + "pass", + "Query R4s info", +) +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 11.11.11.11"', + "11.11.11.0/24.*11.11.11.0/24.*", + "pass", + "Query R1s+R4s info", +) +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 33.33.33.33"', + "pfx=", + "pass", + "Query R4s info", +) +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 11.11.11.11"', + "11.11.11.0/24.*11.11.11.0/24.*", + "pass", + "Query R1s+R4s info", +) +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 22.22.22.22"', + "pfx=", + "pass", + "Query R2s info", +) diff --git a/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_timeout.py b/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_timeout.py index f4467ecc33..e68e9e93ab 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_timeout.py +++ b/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_timeout.py @@ -1,68 +1,325 @@ from lutil import luCommand -holddownFactorSet = luCommand('r1','vtysh -c "show running"','rfp holddown-factor','none','Holddown factor set') -luCommand('r1','vtysh -c "show vnc registrations"','.','none') -luCommand('r3','vtysh -c "show vnc registrations"','.','none') -luCommand('r4','vtysh -c "show vnc registrations"','.','none') + +holddownFactorSet = luCommand( + "r1", + 'vtysh -c "show running"', + "rfp holddown-factor", + "none", + "Holddown factor set", +) +luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none") if not holddownFactorSet: - luCommand('r1','vtysh -c "show vnc summary"','.','pass','Holddown factor not set -- skipping test') + luCommand( + "r1", + 'vtysh -c "show vnc summary"', + ".", + "pass", + "Holddown factor not set -- skipping test", + ) else: - #holddown time test - luCommand('r1','vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 lifetime 10"','', 'none', 'Prefix registered') - luCommand('r1','vtysh -c "show vnc registrations local"','1.111.0.0/16','wait','Local registration') + # holddown time test + luCommand( + "r1", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 lifetime 10"', + "", + "none", + "Prefix registered", + ) + luCommand( + "r1", + 'vtysh -c "show vnc registrations local"', + "1.111.0.0/16", + "wait", + "Local registration", + ) - luCommand('r3','vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 lifetime 10"','', 'none', 'Prefix registered') - luCommand('r3','vtysh -c "show vnc registrations local"','1.222.0.0/16','wait','Local registration') + luCommand( + "r3", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 lifetime 10"', + "", + "none", + "Prefix registered", + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations local"', + "1.222.0.0/16", + "wait", + "Local registration", + ) - luCommand('r4','vtysh -c "show vnc registrations"','Remotely: *Active: 4 ','wait', 'See registrations, L=10') + luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Remotely: *Active: 4 ", + "wait", + "See registrations, L=10", + ) - luCommand('r4','vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 1.222.0.0/16 lifetime 5 cost 50"','', 'none', 'MP Prefix registered') - luCommand('r4','vtysh -c "show vnc registrations local"','1.222.0.0/16','wait','Local registration (MP prefix)') + luCommand( + "r4", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 1.222.0.0/16 lifetime 5 cost 50"', + "", + "none", + "MP Prefix registered", + ) + luCommand( + "r4", + 'vtysh -c "show vnc registrations local"', + "1.222.0.0/16", + "wait", + "Local registration (MP prefix)", + ) - luCommand('r1','vtysh -c "show vnc registrations"','.','none') - luCommand('r3','vtysh -c "show vnc registrations"','.','none') + luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none") + luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none") - luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 1.111.111.111"','pfx=', 'pass', 'Query R1s info') - luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 1.222.222.222"','1.222.0.0/16.*1.222.0.0/16', 'pass', 'Query R3s+R4s info') + luCommand( + "r4", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 1.111.111.111"', + "pfx=", + "pass", + "Query R1s info", + ) + luCommand( + "r4", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 1.222.222.222"', + "1.222.0.0/16.*1.222.0.0/16", + "pass", + "Query R3s+R4s info", + ) - luCommand('r4','vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 1.222.0.0/16"','', 'none', 'MP Prefix removed') - luCommand('r4','vtysh -c "show vnc registrations"','In Holddown: *Active: 1 ','wait', 'MP prefix in holddown') - luCommand('r1','vtysh -c "show vnc registrations"','In Holddown: *Active: 1 ','wait', 'MP prefix in holddown') - luCommand('r3','vtysh -c "show vnc registrations"','In Holddown: *Active: 1 ','wait', 'MP prefix in holddown') - luCommand('r1','vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 1.222.222.222"','1.222.0.0/16', 'pass', 'Query R3s info') - luCommand('r1','vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16"','', 'none', 'Prefix timeout') - luCommand('r1','vtysh -c "show vnc registrations holddown"','1.111.0.0/16','wait','Local holddown',1) - luCommand('r3','vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16"','', 'none', 'Prefix timeout') - luCommand('r3','vtysh -c "show vnc registrations holddown"','1.222.0.0/16','wait','Local holddown',1) - luCommand('r4','vtysh -c "show vnc registrations"','.','none') - luCommand('r4','vtysh -c "show vnc registrations"','.','none') + luCommand( + "r4", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 1.222.0.0/16"', + "", + "none", + "MP Prefix removed", + ) + luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 1 ", + "wait", + "MP prefix in holddown", + ) + luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 1 ", + "wait", + "MP prefix in holddown", + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 1 ", + "wait", + "MP prefix in holddown", + ) + luCommand( + "r1", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 1.222.222.222"', + "1.222.0.0/16", + "pass", + "Query R3s info", + ) + luCommand( + "r1", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16"', + "", + "none", + "Prefix timeout", + ) + luCommand( + "r1", + 'vtysh -c "show vnc registrations holddown"', + "1.111.0.0/16", + "wait", + "Local holddown", + 1, + ) + luCommand( + "r3", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16"', + "", + "none", + "Prefix timeout", + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations holddown"', + "1.222.0.0/16", + "wait", + "Local holddown", + 1, + ) + luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none") + luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none") - luCommand('r4','vtysh -c "show vnc registrations"','In Holddown: *Active: 2 ','wait', 'In holddown') - luCommand('r1','vtysh -c "show vnc registrations"','In Holddown: *Active: 2 ','wait', 'In holddown') - luCommand('r3','vtysh -c "show vnc registrations"','In Holddown: *Active: 2 ','wait', 'In holddown') + luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 2 ", + "wait", + "In holddown", + ) + luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 2 ", + "wait", + "In holddown", + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 2 ", + "wait", + "In holddown", + ) - luCommand('r1','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown',20) - luCommand('r3','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown') - luCommand('r4','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown') + luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 0", + "wait", + "Out of holddown", + 20, + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 0", + "wait", + "Out of holddown", + ) + luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 0", + "wait", + "Out of holddown", + ) - #kill test - luCommand('r1','vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 lifetime 10"','', 'none', 'Prefix registered') - luCommand('r1','vtysh -c "show vnc registrations local"','1.111.0.0/16','wait','Local registration') + # kill test + luCommand( + "r1", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 lifetime 10"', + "", + "none", + "Prefix registered", + ) + luCommand( + "r1", + 'vtysh -c "show vnc registrations local"', + "1.111.0.0/16", + "wait", + "Local registration", + ) - luCommand('r3','vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 lifetime 10"','', 'none', 'Prefix registered') - luCommand('r3','vtysh -c "show vnc registrations local"','1.222.0.0/16','wait','Local registration') + luCommand( + "r3", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 lifetime 10"', + "", + "none", + "Prefix registered", + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations local"', + "1.222.0.0/16", + "wait", + "Local registration", + ) - luCommand('r4','vtysh -c "show vnc registrations"','Remotely: *Active: 4 ','wait', 'See registrations L=10 (pre-kill)',5) - luCommand('r1','vtysh -c "show vnc registrations"','.','none') - luCommand('r3','vtysh -c "show vnc registrations"','.','none') - luCommand('r1','vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 kill"','', 'none', 'Prefix kill') - luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 4 .*In Holddown: *Active: 0','wait','Registration killed',1) - luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 3 .*In Holddown: *Active: 1','wait','Remote in holddown',5) - luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 3 .*In Holddown: *Active: 1','wait','Remote in holddown',5) + luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Remotely: *Active: 4 ", + "wait", + "See registrations L=10 (pre-kill)", + 5, + ) + luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none") + luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none") + luCommand( + "r1", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 kill"', + "", + "none", + "Prefix kill", + ) + luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 1 .* Remotely: *Active: 4 .*In Holddown: *Active: 0", + "wait", + "Registration killed", + 1, + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 2 .* Remotely: *Active: 3 .*In Holddown: *Active: 1", + "wait", + "Remote in holddown", + 5, + ) + luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 2 .* Remotely: *Active: 3 .*In Holddown: *Active: 1", + "wait", + "Remote in holddown", + 5, + ) - luCommand('r3','vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 kill"','', 'none', 'Prefix kill') - luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 1','wait','Registration killed',1) - luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 2 .*In Holddown: *Active: 2','wait','Remote in holddown',5) + luCommand( + "r3", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 kill"', + "", + "none", + "Prefix kill", + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 1", + "wait", + "Registration killed", + 1, + ) + luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 2 .* Remotely: *Active: 2 .*In Holddown: *Active: 2", + "wait", + "Remote in holddown", + 5, + ) - luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 0','wait','Out of holddown',20) - luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 0','wait','Out of holddown') - luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 2 .*In Holddown: *Active: 0','wait','Out of holddown') + luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 0", + "wait", + "Out of holddown", + 20, + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 0", + "wait", + "Out of holddown", + ) + luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 2 .* Remotely: *Active: 2 .*In Holddown: *Active: 0", + "wait", + "Out of holddown", + ) diff --git a/tests/topotests/bgp_rfapi_basic_sanity/scripts/cleanup_all.py b/tests/topotests/bgp_rfapi_basic_sanity/scripts/cleanup_all.py index e9c1916f75..eea977bfaf 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/scripts/cleanup_all.py +++ b/tests/topotests/bgp_rfapi_basic_sanity/scripts/cleanup_all.py @@ -1,33 +1,124 @@ from lutil import luCommand -luCommand('r1','vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 11.11.11.0/24"','', 'none', 'Prefix removed') -luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 0 ','wait','Local registration removed') -luCommand('r1','vtysh -c "debug rfapi-dev close vn 10.0.0.1 un 1.1.1.1"','status 0', 'pass', 'Closed RFAPI') -luCommand('r3','vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 22.22.22.0/24"','', 'none', 'Prefix removed') -luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 0 ','wait','Local registration removed') -luCommand('r3','vtysh -c "debug rfapi-dev close vn 10.0.0.2 un 2.2.2.2"','status 0', 'pass', 'Closed RFAPI') +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 11.11.11.0/24"', + "", + "none", + "Prefix removed", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 ", + "wait", + "Local registration removed", +) +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev close vn 10.0.0.1 un 1.1.1.1"', + "status 0", + "pass", + "Closed RFAPI", +) -luCommand('r4','vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 33.33.33.0/24"','', 'none', 'Prefix removed') -luCommand('r4','vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 11.11.11.0/24"','', 'none', 'MP prefix removed') -luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 0 ','wait','Local registration removed') -#luCommand('r4','vtysh -c "debug rfapi-dev close vn 10.0.0.3 un 3.3.3.3"','status 0', 'pass', 'Closed RFAPI') -luCommand('r4','vtysh -c "clear vnc nve *"','.', 'pass', 'Cleared NVEs') +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 22.22.22.0/24"', + "", + "none", + "Prefix removed", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 ", + "wait", + "Local registration removed", +) +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev close vn 10.0.0.2 un 2.2.2.2"', + "status 0", + "pass", + "Closed RFAPI", +) -luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0','wait','All registrations cleared') -luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0','wait','All registrations cleared') -luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0','wait','All registrations cleared') +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 33.33.33.0/24"', + "", + "none", + "Prefix removed", +) +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 11.11.11.0/24"', + "", + "none", + "MP prefix removed", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 ", + "wait", + "Local registration removed", +) +# luCommand('r4','vtysh -c "debug rfapi-dev close vn 10.0.0.3 un 3.3.3.3"','status 0', 'pass', 'Closed RFAPI') +luCommand("r4", 'vtysh -c "clear vnc nve *"', ".", "pass", "Cleared NVEs") -num = '0 exist' -luCommand('r1','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI clear') -luCommand('r2','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI clear') -luCommand('r3','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI clear') -luCommand('r4','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI clear') +luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 .* Remotely: *Active: 0", + "wait", + "All registrations cleared", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 .* Remotely: *Active: 0", + "wait", + "All registrations cleared", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 .* Remotely: *Active: 0", + "wait", + "All registrations cleared", +) -luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0','wait','No holddowns',20) -luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0','wait','No holddowns') -luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0','wait','No holddowns') +num = "0 exist" +luCommand("r1", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI clear") +luCommand("r2", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI clear") +luCommand("r3", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI clear") +luCommand("r4", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI clear") -luCommand('r1','vtysh -c "show vnc summary"','.','none') -luCommand('r3','vtysh -c "show vnc summary"','.','none') -luCommand('r4','vtysh -c "show vnc summary"','.','none') +luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0", + "wait", + "No holddowns", + 20, +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0", + "wait", + "No holddowns", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0", + "wait", + "No holddowns", +) +luCommand("r1", 'vtysh -c "show vnc summary"', ".", "none") +luCommand("r3", 'vtysh -c "show vnc summary"', ".", "none") +luCommand("r4", 'vtysh -c "show vnc summary"', ".", "none") diff --git a/tests/topotests/bgp_rfapi_basic_sanity/test_bgp_rfapi_basic_sanity.py b/tests/topotests/bgp_rfapi_basic_sanity/test_bgp_rfapi_basic_sanity.py index 0e1f236b7d..cd59bbc395 100755 --- a/tests/topotests/bgp_rfapi_basic_sanity/test_bgp_rfapi_basic_sanity.py +++ b/tests/topotests/bgp_rfapi_basic_sanity/test_bgp_rfapi_basic_sanity.py @@ -25,64 +25,71 @@ import os import sys import pytest -sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')) +sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")) from lib.ltemplate import * + def test_add_routes(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/add_routes.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/add_routes.py", False, CliOnFail, CheckFunc) + def test_adjacencies(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/adjacencies.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/adjacencies.py", False, CliOnFail, CheckFunc) + def test_check_routes(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/check_routes.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/check_routes.py", False, CliOnFail, CheckFunc) + def test_check_close(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/check_close.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/check_close.py", False, CliOnFail, CheckFunc) + def test_check_timeout(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/check_timeout.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/check_timeout.py", False, CliOnFail, CheckFunc) + def test_cleanup_all(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/cleanup_all.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/cleanup_all.py", False, CliOnFail, CheckFunc) + -if __name__ == '__main__': +if __name__ == "__main__": retval = pytest.main(["-s"]) sys.exit(retval) diff --git a/tests/topotests/bgp_rfapi_basic_sanity_config2/r1/bgpd.conf b/tests/topotests/bgp_rfapi_basic_sanity_config2/r1/bgpd.conf index f7f5e2ee96..626d8227e7 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity_config2/r1/bgpd.conf +++ b/tests/topotests/bgp_rfapi_basic_sanity_config2/r1/bgpd.conf @@ -8,6 +8,7 @@ log commands router bgp 5226 bgp router-id 1.1.1.1 bgp cluster-id 1.1.1.1 + no bgp ebgp-requires-policy neighbor 2.2.2.2 remote-as 5226 neighbor 2.2.2.2 update-source 1.1.1.1 ! diff --git a/tests/topotests/bgp_rfapi_basic_sanity_config2/r2/bgpd.conf b/tests/topotests/bgp_rfapi_basic_sanity_config2/r2/bgpd.conf index 241c2ac0ae..524051426b 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity_config2/r2/bgpd.conf +++ b/tests/topotests/bgp_rfapi_basic_sanity_config2/r2/bgpd.conf @@ -8,6 +8,7 @@ log commands router bgp 5226 bgp router-id 2.2.2.2 bgp cluster-id 2.2.2.2 + no bgp ebgp-requires-policy neighbor 1.1.1.1 remote-as 5226 neighbor 1.1.1.1 update-source 2.2.2.2 neighbor 3.3.3.3 remote-as 5226 @@ -28,6 +29,3 @@ router bgp 5226 neighbor 4.4.4.4 route-reflector-client exit-address-family end - - - diff --git a/tests/topotests/bgp_rfapi_basic_sanity_config2/r3/bgpd.conf b/tests/topotests/bgp_rfapi_basic_sanity_config2/r3/bgpd.conf index 17e351988d..8c75a39efa 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity_config2/r3/bgpd.conf +++ b/tests/topotests/bgp_rfapi_basic_sanity_config2/r3/bgpd.conf @@ -8,6 +8,7 @@ log commands router bgp 5226 bgp router-id 3.3.3.3 bgp cluster-id 3.3.3.3 + no bgp ebgp-requires-policy neighbor 2.2.2.2 remote-as 5226 neighbor 2.2.2.2 update-source 3.3.3.3 ! @@ -46,6 +47,3 @@ router bgp 5226 exit-vnc ! end - - - diff --git a/tests/topotests/bgp_rfapi_basic_sanity_config2/r4/bgpd.conf b/tests/topotests/bgp_rfapi_basic_sanity_config2/r4/bgpd.conf index 0b8808cb80..38f8758cbc 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity_config2/r4/bgpd.conf +++ b/tests/topotests/bgp_rfapi_basic_sanity_config2/r4/bgpd.conf @@ -8,6 +8,7 @@ log commands router bgp 5226 bgp router-id 4.4.4.4 bgp cluster-id 4.4.4.4 + no bgp ebgp-requires-policy neighbor 2.2.2.2 remote-as 5226 neighbor 2.2.2.2 update-source 4.4.4.4 ! @@ -47,6 +48,3 @@ router bgp 5226 exit-vnc ! end - - - diff --git a/tests/topotests/bgp_rr_ibgp/spine1/bgpd.conf b/tests/topotests/bgp_rr_ibgp/spine1/bgpd.conf index 29a119c291..fa77cce073 100644 --- a/tests/topotests/bgp_rr_ibgp/spine1/bgpd.conf +++ b/tests/topotests/bgp_rr_ibgp/spine1/bgpd.conf @@ -1,5 +1,6 @@ hostname spine1 router bgp 99 + no bgp ebgp-requires-policy neighbor 192.168.2.1 remote-as internal neighbor 192.168.4.2 remote-as internal address-family ipv4 uni diff --git a/tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py b/tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py index c7daa06b76..da45e73ab4 100755 --- a/tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py +++ b/tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py @@ -38,7 +38,7 @@ import json # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -55,6 +55,7 @@ from mininet.topo import Topo ## ##################################################### + class NetworkTopo(Topo): "BGP_RR_IBGP Topology 1" @@ -63,30 +64,30 @@ class NetworkTopo(Topo): tgen = get_topogen(self) - tgen.add_router('tor1') - tgen.add_router('tor2') - tgen.add_router('spine1') + tgen.add_router("tor1") + tgen.add_router("tor2") + tgen.add_router("spine1") # First switch is for a dummy interface (for local network) # on tor1 - # 192.168.1.0/24 - switch = tgen.add_switch('sw1') - switch.add_link(tgen.gears['tor1']) + # 192.168.1.0/24 + switch = tgen.add_switch("sw1") + switch.add_link(tgen.gears["tor1"]) - # 192.168.2.0/24 - tor1 <-> spine1 connection - switch = tgen.add_switch('sw2') - switch.add_link(tgen.gears['tor1']) - switch.add_link(tgen.gears['spine1']) + # 192.168.2.0/24 - tor1 <-> spine1 connection + switch = tgen.add_switch("sw2") + switch.add_link(tgen.gears["tor1"]) + switch.add_link(tgen.gears["spine1"]) # 3rd switch is for a dummy interface (for local netwokr) - # 192.168.3.0/24 - tor2 - switch = tgen.add_switch('sw3') - switch.add_link(tgen.gears['tor2']) + # 192.168.3.0/24 - tor2 + switch = tgen.add_switch("sw3") + switch.add_link(tgen.gears["tor2"]) - # 192.168.4.0/24 - tor2 <-> spine1 connection - switch = tgen.add_switch('sw4') - switch.add_link(tgen.gears['tor2']) - switch.add_link(tgen.gears['spine1']) + # 192.168.4.0/24 - tor2 <-> spine1 connection + switch = tgen.add_switch("sw4") + switch.add_link(tgen.gears["tor2"]) + switch.add_link(tgen.gears["spine1"]) ##################################################### @@ -95,6 +96,7 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(module): "Setup topology" tgen = Topogen(NetworkTopo, module.__name__) @@ -104,12 +106,10 @@ def setup_module(module): router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() @@ -132,7 +132,7 @@ def test_converge_protocols(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - topotest.sleep(5, 'Waiting for BGP_RR_IBGP convergence') + topotest.sleep(5, "Waiting for BGP_RR_IBGP convergence") def test_bgp_rr_ibgp_routes(): @@ -146,6 +146,7 @@ def test_bgp_rr_ibgp_routes(): # Verify BGP_RR_IBGP Status logger.info("Verifying BGP_RR_IBGP routes") + def test_zebra_ipv4_routingTable(): "Test 'show ip route'" @@ -157,16 +158,19 @@ def test_zebra_ipv4_routingTable(): failures = 0 router_list = tgen.routers().values() for router in router_list: - output = router.vtysh_cmd('show ip route json', isjson=True) - refTableFile = '{}/{}/show_ip_route.json_ref'.format(CWD, router.name) + output = router.vtysh_cmd("show ip route json", isjson=True) + refTableFile = "{}/{}/show_ip_route.json_ref".format(CWD, router.name) expected = json.loads(open(refTableFile).read()) - assertmsg = 'Zebra IPv4 Routing Table verification failed for router {}'.format(router.name) + assertmsg = "Zebra IPv4 Routing Table verification failed for router {}".format( + router.name + ) assert topotest.json_cmp(output, expected) is None, assertmsg + def test_shutdown_check_stderr(): - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - pytest.skip('Skipping test for Stderr output and memory leaks') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + pytest.skip("Skipping test for Stderr output and memory leaks") tgen = get_topogen() # Don't run this test if we have any failure. @@ -179,15 +183,15 @@ def test_shutdown_check_stderr(): for router in router_list: router.stop() - log = tgen.net[router.name].getStdErr('bgpd') + log = tgen.net[router.name].getStdErr("bgpd") if log: - logger.error('BGPd StdErr Log:' + log) - log = tgen.net[router.name].getStdErr('zebra') + logger.error("BGPd StdErr Log:" + log) + log = tgen.net[router.name].getStdErr("zebra") if log: - logger.error('Zebra StdErr Log:' + log) + logger.error("Zebra StdErr Log:" + log) -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_rr_ibgp/tor1/bgpd.conf b/tests/topotests/bgp_rr_ibgp/tor1/bgpd.conf index e8ec0f7680..b028ab4e8b 100644 --- a/tests/topotests/bgp_rr_ibgp/tor1/bgpd.conf +++ b/tests/topotests/bgp_rr_ibgp/tor1/bgpd.conf @@ -1,4 +1,5 @@ hostname tor1 router bgp 99 + no bgp ebgp-requires-policy neighbor 192.168.2.3 remote-as internal redistribute connected diff --git a/tests/topotests/bgp_rr_ibgp/tor2/bgpd.conf b/tests/topotests/bgp_rr_ibgp/tor2/bgpd.conf index b091c97ac3..99c34158b9 100644 --- a/tests/topotests/bgp_rr_ibgp/tor2/bgpd.conf +++ b/tests/topotests/bgp_rr_ibgp/tor2/bgpd.conf @@ -1,4 +1,5 @@ hostname tor2 router bgp 99 + no bgp ebgp-requires-policy neighbor 192.168.4.3 remote-as internal redistribute connected diff --git a/tests/topotests/bgp_sender-as-path-loop-detection/r1/bgpd.conf b/tests/topotests/bgp_sender-as-path-loop-detection/r1/bgpd.conf index a91b564bff..b16e94d7c1 100644 --- a/tests/topotests/bgp_sender-as-path-loop-detection/r1/bgpd.conf +++ b/tests/topotests/bgp_sender-as-path-loop-detection/r1/bgpd.conf @@ -1,5 +1,6 @@ ! exit1 router bgp 65001 + no bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 65002 address-family ipv4 unicast neighbor 192.168.255.1 route-map prepend out diff --git a/tests/topotests/bgp_sender-as-path-loop-detection/r2/bgpd.conf b/tests/topotests/bgp_sender-as-path-loop-detection/r2/bgpd.conf index 6e8e89360f..674877edd3 100644 --- a/tests/topotests/bgp_sender-as-path-loop-detection/r2/bgpd.conf +++ b/tests/topotests/bgp_sender-as-path-loop-detection/r2/bgpd.conf @@ -1,5 +1,6 @@ ! spine router bgp 65002 + no bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 65001 neighbor 192.168.255.2 solo neighbor 192.168.254.2 remote-as 65003 diff --git a/tests/topotests/bgp_sender-as-path-loop-detection/r3/bgpd.conf b/tests/topotests/bgp_sender-as-path-loop-detection/r3/bgpd.conf index 8962befad2..4ee7a39ab2 100644 --- a/tests/topotests/bgp_sender-as-path-loop-detection/r3/bgpd.conf +++ b/tests/topotests/bgp_sender-as-path-loop-detection/r3/bgpd.conf @@ -1,4 +1,5 @@ ! exit2 router bgp 65003 + no bgp ebgp-requires-policy neighbor 192.168.254.1 remote-as 65002 ! diff --git a/tests/topotests/bgp_sender-as-path-loop-detection/test_bgp_sender-as-path-loop-detection.py b/tests/topotests/bgp_sender-as-path-loop-detection/test_bgp_sender-as-path-loop-detection.py index 708464864a..56a98c1ef8 100644 --- a/tests/topotests/bgp_sender-as-path-loop-detection/test_bgp_sender-as-path-loop-detection.py +++ b/tests/topotests/bgp_sender-as-path-loop-detection/test_bgp_sender-as-path-loop-detection.py @@ -35,7 +35,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -43,20 +43,22 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 4): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -66,38 +68,34 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_sender_as_path_loop_detection(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r2'] + router = tgen.gears["r2"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.2 json")) expected = { - '192.168.255.2': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 2 - } - } + "192.168.255.2": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}}, } } return topotest.json_cmp(output, expected) @@ -105,19 +103,11 @@ def test_bgp_sender_as_path_loop_detection(): def _bgp_has_route_from_r1(router): output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.254/32 json")) expected = { - 'paths': [ + "paths": [ { - 'aspath': { - 'segments': [ - { - 'type': 'as-sequence', - 'list': [ - 65001, - 65003 - ] - } - ], - 'length': 2 + "aspath": { + "segments": [{"type": "as-sequence", "list": [65001, 65003]}], + "length": 2, } } ] @@ -125,10 +115,12 @@ def test_bgp_sender_as_path_loop_detection(): return topotest.json_cmp(output, expected) def _bgp_suppress_route_to_r3(router): - output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.254.2 advertised-routes json")) - expected = { - 'totalPrefixCounter': 0 - } + output = json.loads( + router.vtysh_cmd( + "show ip bgp neighbor 192.168.254.2 advertised-routes json" + ) + ) + expected = {"totalPrefixCounter": 0} return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_converge, router) @@ -144,8 +136,11 @@ def test_bgp_sender_as_path_loop_detection(): test_func = functools.partial(_bgp_suppress_route_to_r3, router) success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert result is None, 'Route 172.16.255.254/32 should not be sent to r3 "{}"'.format(router) + assert ( + result is None + ), 'Route 172.16.255.254/32 should not be sent to r3 "{}"'.format(router) + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_set_local-preference_add_subtract/r1/bgpd.conf b/tests/topotests/bgp_set_local-preference_add_subtract/r1/bgpd.conf index 1a9c5325ad..7dab52fef0 100644 --- a/tests/topotests/bgp_set_local-preference_add_subtract/r1/bgpd.conf +++ b/tests/topotests/bgp_set_local-preference_add_subtract/r1/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65000 + no bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 65000 neighbor 192.168.255.3 remote-as 65000 exit-address-family diff --git a/tests/topotests/bgp_set_local-preference_add_subtract/r2/bgpd.conf b/tests/topotests/bgp_set_local-preference_add_subtract/r2/bgpd.conf index 89e1256667..a8a0384632 100644 --- a/tests/topotests/bgp_set_local-preference_add_subtract/r2/bgpd.conf +++ b/tests/topotests/bgp_set_local-preference_add_subtract/r2/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65000 + no bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 65000 address-family ipv4 redistribute connected diff --git a/tests/topotests/bgp_set_local-preference_add_subtract/r3/bgpd.conf b/tests/topotests/bgp_set_local-preference_add_subtract/r3/bgpd.conf index fabd4605f3..2f5dceede2 100644 --- a/tests/topotests/bgp_set_local-preference_add_subtract/r3/bgpd.conf +++ b/tests/topotests/bgp_set_local-preference_add_subtract/r3/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65000 + no bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 65000 address-family ipv4 redistribute connected diff --git a/tests/topotests/bgp_set_local-preference_add_subtract/test_bgp_set_local-preference_add_subtract.py b/tests/topotests/bgp_set_local-preference_add_subtract/test_bgp_set_local-preference_add_subtract.py index 09e195e22d..ce3165db25 100644 --- a/tests/topotests/bgp_set_local-preference_add_subtract/test_bgp_set_local-preference_add_subtract.py +++ b/tests/topotests/bgp_set_local-preference_add_subtract/test_bgp_set_local-preference_add_subtract.py @@ -36,7 +36,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -44,17 +44,19 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 4): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -64,70 +66,48 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_set_local_preference(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r1'] + router = tgen.gears["r1"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor json")) expected = { - '192.168.255.2': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 2 - } - } + "192.168.255.2": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}}, + }, + "192.168.255.3": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}}, }, - '192.168.255.3': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 2 - } - } - } } return topotest.json_cmp(output, expected) def _bgp_check_local_preference(router): output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.254/32 json")) expected = { - 'paths': [ - { - 'localpref': 50, - 'nexthops': [ - { - 'ip': '192.168.255.3' - } - ] - }, - { - 'localpref': 150, - 'nexthops': [ - { - 'ip': '192.168.255.2' - } - ] - } + "paths": [ + {"locPrf": 50, "nexthops": [{"ip": "192.168.255.3"}]}, + {"locPrf": 150, "nexthops": [{"ip": "192.168.255.2"}]}, ] } return topotest.json_cmp(output, expected) @@ -140,8 +120,11 @@ def test_bgp_set_local_preference(): test_func = functools.partial(_bgp_check_local_preference, router) success, result = topotest.run_and_expect(test_func, None, count=15, wait=0.5) - assert result is None, 'Failed to see applied BGP local-preference in "{}"'.format(router) + assert result is None, 'Failed to see applied BGP local-preference in "{}"'.format( + router + ) + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/r1/bgpd.conf b/tests/topotests/bgp_show_ip_bgp_fqdn/r1/bgpd.conf index 235b42b3d5..f0df56e947 100644 --- a/tests/topotests/bgp_show_ip_bgp_fqdn/r1/bgpd.conf +++ b/tests/topotests/bgp_show_ip_bgp_fqdn/r1/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65000 + no bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 65001 address-family ipv4 unicast redistribute connected diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/r2/bgpd.conf b/tests/topotests/bgp_show_ip_bgp_fqdn/r2/bgpd.conf index c05bfd5a6b..422a7345f9 100644 --- a/tests/topotests/bgp_show_ip_bgp_fqdn/r2/bgpd.conf +++ b/tests/topotests/bgp_show_ip_bgp_fqdn/r2/bgpd.conf @@ -1,5 +1,5 @@ router bgp 65001 + no bgp ebgp-requires-policy bgp default show-hostname neighbor 192.168.255.1 remote-as 65000 - address-family ipv4 unicast - redistribute connected + neighbor 192.168.254.1 remote-as 65001 diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/r2/zebra.conf b/tests/topotests/bgp_show_ip_bgp_fqdn/r2/zebra.conf index 5abba71ce8..e9e2e4391f 100644 --- a/tests/topotests/bgp_show_ip_bgp_fqdn/r2/zebra.conf +++ b/tests/topotests/bgp_show_ip_bgp_fqdn/r2/zebra.conf @@ -5,5 +5,8 @@ interface lo interface r2-eth0 ip address 192.168.255.2/24 ! +interface r2-eth1 + ip address 192.168.254.2/24 +! ip forwarding ! diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/r3/bgpd.conf b/tests/topotests/bgp_show_ip_bgp_fqdn/r3/bgpd.conf new file mode 100644 index 0000000000..8fcf6a736d --- /dev/null +++ b/tests/topotests/bgp_show_ip_bgp_fqdn/r3/bgpd.conf @@ -0,0 +1,3 @@ +router bgp 65001 + bgp default show-hostname + neighbor 192.168.254.2 remote-as 65001 diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/r3/zebra.conf b/tests/topotests/bgp_show_ip_bgp_fqdn/r3/zebra.conf new file mode 100644 index 0000000000..a8b8bc38c5 --- /dev/null +++ b/tests/topotests/bgp_show_ip_bgp_fqdn/r3/zebra.conf @@ -0,0 +1,6 @@ +! +interface r3-eth0 + ip address 192.168.254.1/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/test_bgp_show_ip_bgp_fqdn.py b/tests/topotests/bgp_show_ip_bgp_fqdn/test_bgp_show_ip_bgp_fqdn.py index f5119468e0..e8ad180935 100644 --- a/tests/topotests/bgp_show_ip_bgp_fqdn/test_bgp_show_ip_bgp_fqdn.py +++ b/tests/topotests/bgp_show_ip_bgp_fqdn/test_bgp_show_ip_bgp_fqdn.py @@ -26,6 +26,13 @@ test_bgp_show_ip_bgp_fqdn.py: Test if FQND is visible in `show [ip] bgp` output if `bgp default show-hostname` is toggled. + +Topology: +r1 <-- eBGP --> r2 <-- iBGP --> r3 + +1. Check if both hostname and ip are added to JSON output +for 172.16.255.254/32 on r2. +2. Check if only ip is added to JSON output for 172.16.255.254/32 on r3. """ import os @@ -36,7 +43,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -44,16 +51,22 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) - for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + for routern in range(1, 4): + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -63,55 +76,58 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_show_ip_bgp_hostname(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r2'] - def _bgp_converge(router): - output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) - expected = { - '192.168.255.1': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 2 - } - } - } - } + output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.254/32 json")) + expected = {"prefix": "172.16.255.254/32"} return topotest.json_cmp(output, expected) def _bgp_show_nexthop_hostname_and_ip(router): output = json.loads(router.vtysh_cmd("show ip bgp json")) - for nh in output['routes']['172.16.255.253/32'][0]['nexthops']: - if 'hostname' in nh and 'ip' in nh: + for nh in output["routes"]["172.16.255.254/32"][0]["nexthops"]: + if "hostname" in nh and "ip" in nh: return True return False - test_func = functools.partial(_bgp_converge, router) + def _bgp_show_nexthop_ip_only(router): + output = json.loads(router.vtysh_cmd("show ip bgp json")) + for nh in output["routes"]["172.16.255.254/32"][0]["nexthops"]: + if "ip" in nh and not "hostname" in nh: + return True + return False + + test_func = functools.partial(_bgp_converge, tgen.gears["r2"]) success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert result is None, 'Failed bgp convergence in "{}"'.format(router) - assert _bgp_show_nexthop_hostname_and_ip(router) == True + test_func = functools.partial(_bgp_converge, tgen.gears["r3"]) + success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + + assert result is None, 'Failed bgp convergence in "{}"'.format(tgen.gears["r2"]) + assert _bgp_show_nexthop_hostname_and_ip(tgen.gears["r2"]) == True + + assert result is None, 'Failed bgp convergence in "{}"'.format(tgen.gears["r3"]) + assert _bgp_show_nexthop_ip_only(tgen.gears["r3"]) == True + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_vrf_lite_ipv6_rtadv/r1/bgpd.conf b/tests/topotests/bgp_vrf_lite_ipv6_rtadv/r1/bgpd.conf index 3c974c767f..002cecd1fa 100644 --- a/tests/topotests/bgp_vrf_lite_ipv6_rtadv/r1/bgpd.conf +++ b/tests/topotests/bgp_vrf_lite_ipv6_rtadv/r1/bgpd.conf @@ -1,5 +1,6 @@ router bgp 101 vrf r1-cust1 bgp router-id 10.254.254.1 + no bgp ebgp-requires-policy neighbor r2g peer-group neighbor r2g remote-as external neighbor r2g bfd diff --git a/tests/topotests/bgp_vrf_lite_ipv6_rtadv/r2/bgpd.conf b/tests/topotests/bgp_vrf_lite_ipv6_rtadv/r2/bgpd.conf index 39362abd46..0878b9b995 100644 --- a/tests/topotests/bgp_vrf_lite_ipv6_rtadv/r2/bgpd.conf +++ b/tests/topotests/bgp_vrf_lite_ipv6_rtadv/r2/bgpd.conf @@ -1,5 +1,6 @@ router bgp 102 vrf r2-cust1 bgp router-id 10.254.254.2 + no bgp ebgp-requires-policy neighbor r2g peer-group neighbor r2g remote-as external neighbor r2g bfd diff --git a/tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py b/tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py index 2944b3729c..5d8c80c6a2 100644 --- a/tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py +++ b/tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py @@ -35,7 +35,7 @@ import platform # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -49,17 +49,19 @@ from mininet.topo import Topo class BGPIPV6RTADVVRFTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 2 routers. - tgen.add_router('r1') - tgen.add_router('r2') + tgen.add_router("r1") + tgen.add_router("r2") + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): "Sets up the pytest environment" @@ -68,52 +70,57 @@ def setup_module(mod): router_list = tgen.routers() - logger.info('Testing with VRF Lite support') + logger.info("Testing with VRF Lite support") krel = platform.release() # May need to adjust handling of vrf traffic depending on kernel version l3mdev_accept = 0 - if topotest.version_cmp(krel, '4.15') >= 0 and \ - topotest.version_cmp(krel, '4.18') <= 0: + if ( + topotest.version_cmp(krel, "4.15") >= 0 + and topotest.version_cmp(krel, "4.18") <= 0 + ): l3mdev_accept = 1 - if topotest.version_cmp(krel, '5.0') >= 0: + if topotest.version_cmp(krel, "5.0") >= 0: l3mdev_accept = 1 - logger.info('krel \'{0}\' setting net.ipv4.tcp_l3mdev_accept={1}'.format( - krel, l3mdev_accept)) + logger.info( + "krel '{0}' setting net.ipv4.tcp_l3mdev_accept={1}".format(krel, l3mdev_accept) + ) - cmds = ['ip link add {0}-cust1 type vrf table 1001', - 'ip link add loop1 type dummy', - 'ip link set loop1 master {0}-cust1', - 'ip link set {0}-eth0 master {0}-cust1'] + cmds = [ + "ip link add {0}-cust1 type vrf table 1001", + "ip link add loop1 type dummy", + "ip link set loop1 master {0}-cust1", + "ip link set {0}-eth0 master {0}-cust1", + ] for rname, router in router_list.iteritems(): for cmd in cmds: output = tgen.net[rname].cmd(cmd.format(rname)) - output = tgen.net[rname].cmd('sysctl -n net.ipv4.tcp_l3mdev_accept') + output = tgen.net[rname].cmd("sysctl -n net.ipv4.tcp_l3mdev_accept") logger.info( - 'router {0}: existing tcp_l3mdev_accept was {1}'.format( - rname, output)) + "router {0}: existing tcp_l3mdev_accept was {1}".format(rname, output) + ) if l3mdev_accept: output = tgen.net[rname].cmd( - 'sysctl -w net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept)) + "sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept) + ) for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) # Initialize all routers. tgen.start_router() + def teardown_module(_mod): "Teardown the pytest environment" tgen = get_topogen() @@ -134,44 +141,51 @@ def test_protocols_convergence(): logger.info("Checking IPv4 routes for convergence") for router in tgen.routers().values(): - json_file = '{}/{}/ipv4_routes.json'.format(CWD, router.name) + json_file = "{}/{}/ipv4_routes.json".format(CWD, router.name) if not os.path.isfile(json_file): - logger.info('skipping file {}'.format(json_file)) + logger.info("skipping file {}".format(json_file)) continue expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ip route vrf {}-cust1 json'.format(router.name), expected) - _, result = topotest.run_and_expect(test_func, None, count=160, - wait=0.5) + test_func = partial( + topotest.router_json_cmp, + router, + "show ip route vrf {}-cust1 json".format(router.name), + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg # Check IPv6 routing tables. logger.info("Checking IPv6 routes for convergence") for router in tgen.routers().values(): - json_file = '{}/{}/ipv6_routes.json'.format(CWD, router.name) + json_file = "{}/{}/ipv6_routes.json".format(CWD, router.name) if not os.path.isfile(json_file): - logger.info('skipping file {}'.format(json_file)) + logger.info("skipping file {}".format(json_file)) continue expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ipv6 route vrf {}-cust1 json'.format(router.name), expected) - _, result = topotest.run_and_expect(test_func, None, count=160, - wait=0.5) + test_func = partial( + topotest.router_json_cmp, + router, + "show ipv6 route vrf {}-cust1 json".format(router.name), + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg + def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_vrf_netns/peer1/exa-receive.py b/tests/topotests/bgp_vrf_netns/peer1/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_vrf_netns/peer1/exa-receive.py +++ b/tests/topotests/bgp_vrf_netns/peer1/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_vrf_netns/peer1/exa-send.py b/tests/topotests/bgp_vrf_netns/peer1/exa-send.py index 9a2a201c57..9279cc45ff 100755 --- a/tests/topotests/bgp_vrf_netns/peer1/exa-send.py +++ b/tests/topotests/bgp_vrf_netns/peer1/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -17,10 +17,12 @@ asnum = 99 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 community %i:1 next-hop 10.0.%i.%i\n' % (i, i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 community %i:1 next-hop 10.0.%i.%i\n" + % (i, i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp_vrf_netns/r1/bgpd.conf b/tests/topotests/bgp_vrf_netns/r1/bgpd.conf index e3f158d7b3..dabf9521ac 100644 --- a/tests/topotests/bgp_vrf_netns/r1/bgpd.conf +++ b/tests/topotests/bgp_vrf_netns/r1/bgpd.conf @@ -2,6 +2,7 @@ router bgp 100 vrf r1-cust1 bgp router-id 10.0.1.1 bgp bestpath as-path multipath-relax + no bgp ebgp-requires-policy neighbor 10.0.1.101 remote-as 99 ! ! diff --git a/tests/topotests/bgp_vrf_netns/r1/summary.txt b/tests/topotests/bgp_vrf_netns/r1/summary.txt index 7473fa2151..1a079ff130 100644 --- a/tests/topotests/bgp_vrf_netns/r1/summary.txt +++ b/tests/topotests/bgp_vrf_netns/r1/summary.txt @@ -8,7 +8,7 @@ "10.0.1.101":{ "outq":0, "inq":0, - "prefixReceivedCount":10, + "pfxRcd":10, "state":"Established" } }, diff --git a/tests/topotests/bgp_vrf_netns/r1/summary20.txt b/tests/topotests/bgp_vrf_netns/r1/summary20.txt index 18318e07a8..2b5787e6da 100644 --- a/tests/topotests/bgp_vrf_netns/r1/summary20.txt +++ b/tests/topotests/bgp_vrf_netns/r1/summary20.txt @@ -7,7 +7,7 @@ "10.0.1.101":{ "outq":0, "inq":0, - "prefixReceivedCount":10, + "pfxRcd":10, "state":"Established" } }, diff --git a/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py b/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py index a5590bcaf6..ae48f01a0e 100755 --- a/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py +++ b/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py @@ -33,7 +33,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -53,6 +53,7 @@ CustomizeVrfWithNetns = True ## ##################################################### + class BGPVRFNETNSTopo1(Topo): "BGP EBGP VRF NETNS Topology 1" @@ -60,18 +61,17 @@ class BGPVRFNETNSTopo1(Topo): tgen = get_topogen(self) # Setup Routers - tgen.add_router('r1') + tgen.add_router("r1") # Setup Switches - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) # Add eBGP ExaBGP neighbors - peer_ip = '10.0.1.101' - peer_route = 'via 10.0.1.1' - peer = tgen.add_exabgp_peer('peer1', - ip=peer_ip, defaultRoute=peer_route) - switch = tgen.gears['s1'] + peer_ip = "10.0.1.101" + peer_route = "via 10.0.1.1" + peer = tgen.add_exabgp_peer("peer1", ip=peer_ip, defaultRoute=peer_route) + switch = tgen.gears["s1"] switch.add_link(peer) @@ -81,74 +81,87 @@ class BGPVRFNETNSTopo1(Topo): ## ##################################################### + def setup_module(module): tgen = Topogen(BGPVRFNETNSTopo1, module.__name__) tgen.start_topology() # Get r1 reference - router = tgen.gears['r1'] + router = tgen.gears["r1"] # check for zebra capability if CustomizeVrfWithNetns == True: - if router.check_capability( - TopoRouter.RD_ZEBRA, - '--vrfwnetns' - ) == False: - return pytest.skip('Skipping BGP VRF NETNS Test. VRF NETNS backend not available on FRR') - if os.system('ip netns list') != 0: - return pytest.skip('Skipping BGP VRF NETNS Test. NETNS not available on System') + if router.check_capability(TopoRouter.RD_ZEBRA, "--vrfwnetns") == False: + return pytest.skip( + "Skipping BGP VRF NETNS Test. VRF NETNS backend not available on FRR" + ) + if os.system("ip netns list") != 0: + return pytest.skip( + "Skipping BGP VRF NETNS Test. NETNS not available on System" + ) # retrieve VRF backend kind if CustomizeVrfWithNetns == True: - logger.info('Testing with VRF Namespace support') + logger.info("Testing with VRF Namespace support") # create VRF r1-cust1 # move r1-eth0 to VRF r1-cust1 - cmds = ['if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi', - 'ip netns add {0}-cust1', - 'ip link set dev {0}-eth0 netns {0}-cust1', - 'ip netns exec {0}-cust1 ifconfig {0}-eth0 up'] + cmds = [ + "if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi", + "ip netns add {0}-cust1", + "ip link set dev {0}-eth0 netns {0}-cust1", + "ip netns exec {0}-cust1 ifconfig {0}-eth0 up", + ] for cmd in cmds: - cmd = cmd.format('r1') - logger.info('cmd: '+cmd); - output = router.run(cmd.format('r1')) + cmd = cmd.format("r1") + logger.info("cmd: " + cmd) + output = router.run(cmd.format("r1")) if output != None and len(output) > 0: - logger.info('Aborting due to unexpected output: cmd="{}" output=\n{}'.format(cmd, output)) - return pytest.skip('Skipping BGP VRF NETNS Test. Unexpected output to command: '+cmd) - #run daemons + logger.info( + 'Aborting due to unexpected output: cmd="{}" output=\n{}'.format( + cmd, output + ) + ) + return pytest.skip( + "Skipping BGP VRF NETNS Test. Unexpected output to command: " + cmd + ) + # run daemons router.load_config( TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format('r1')), - '--vrfwnetns' + os.path.join(CWD, "{}/zebra.conf".format("r1")), + "--vrfwnetns", ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format('r1')) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format("r1")) ) - logger.info('Launching BGP and ZEBRA') + logger.info("Launching BGP and ZEBRA") # BGP and ZEBRA start without underlying VRF router.start() # Starting Hosts and init ExaBGP on each of them - logger.info('starting exaBGP on peer1') + logger.info("starting exaBGP on peer1") peer_list = tgen.exabgp_peers() for pname, peer in peer_list.iteritems(): peer_dir = os.path.join(CWD, pname) - env_file = os.path.join(CWD, 'exabgp.env') - logger.info('Running ExaBGP peer') + env_file = os.path.join(CWD, "exabgp.env") + logger.info("Running ExaBGP peer") peer.start(peer_dir, env_file) logger.info(pname) + def teardown_module(module): tgen = get_topogen() # move back r1-eth0 to default VRF # delete VRF r1-cust1 - cmds = ['ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1', - 'ip netns delete {0}-cust1'] + cmds = [ + "ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1", + "ip netns delete {0}-cust1", + ] for cmd in cmds: - tgen.net['r1'].cmd(cmd.format('r1')) + tgen.net["r1"].cmd(cmd.format("r1")) tgen.stop_topology() + def test_bgp_vrf_learn(): "Test daemon learnt VRF context" tgen = get_topogen() @@ -158,11 +171,11 @@ def test_bgp_vrf_learn(): pytest.skip(tgen.errors) # Expected result - output = tgen.gears['r1'].vtysh_cmd("show vrf", isjson=False) - logger.info('output is: {}'.format(output)) + output = tgen.gears["r1"].vtysh_cmd("show vrf", isjson=False) + logger.info("output is: {}".format(output)) - output = tgen.gears['r1'].vtysh_cmd("show bgp vrfs", isjson=False) - logger.info('output is: {}'.format(output)) + output = tgen.gears["r1"].vtysh_cmd("show bgp vrfs", isjson=False) + logger.info("output is: {}".format(output)) def test_bgp_convergence(): @@ -175,23 +188,25 @@ def test_bgp_convergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bgp convergence') + logger.info("waiting for bgp convergence") # Expected result - router = tgen.gears['r1'] - if router.has_version('<', '3.0'): - reffile = os.path.join(CWD, 'r1/summary20.txt') + router = tgen.gears["r1"] + if router.has_version("<", "3.0"): + reffile = os.path.join(CWD, "r1/summary20.txt") else: - reffile = os.path.join(CWD, 'r1/summary.txt') + reffile = os.path.join(CWD, "r1/summary.txt") expected = json.loads(open(reffile).read()) - test_func = functools.partial(topotest.router_json_cmp, - router, 'show bgp vrf r1-cust1 summary json', expected) + test_func = functools.partial( + topotest.router_json_cmp, router, "show bgp vrf r1-cust1 summary json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=90, wait=0.5) - assertmsg = 'BGP router network did not converge' + assertmsg = "BGP router network did not converge" assert res is None, assertmsg + def test_bgp_vrf_netns(): tgen = get_topogen() @@ -200,24 +215,28 @@ def test_bgp_vrf_netns(): pytest.skip(tgen.errors) expect = { - 'routerId': '10.0.1.1', - 'routes': { - }, + "routerId": "10.0.1.1", + "routes": {}, } for subnet in range(0, 10): - netkey = '10.201.{}.0/24'.format(subnet) - expect['routes'][netkey] = [] - peer = {'valid': True} - expect['routes'][netkey].append(peer) - - test_func = functools.partial(topotest.router_json_cmp, - tgen.gears['r1'], 'show ip bgp vrf r1-cust1 ipv4 json', expect) + netkey = "10.201.{}.0/24".format(subnet) + expect["routes"][netkey] = [] + peer = {"valid": True} + expect["routes"][netkey].append(peer) + + test_func = functools.partial( + topotest.router_json_cmp, + tgen.gears["r1"], + "show ip bgp vrf r1-cust1 ipv4 json", + expect, + ) _, res = topotest.run_and_expect(test_func, None, count=12, wait=0.5) assertmsg = 'expected routes in "show ip bgp vrf r1-cust1 ipv4" output' assert res is None, assertmsg -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] ret = pytest.main(args) diff --git a/tests/topotests/conftest.py b/tests/topotests/conftest.py index 76b0ab017e..d46c52a4c4 100755 --- a/tests/topotests/conftest.py +++ b/tests/topotests/conftest.py @@ -9,13 +9,18 @@ import pytest topology_only = False + def pytest_addoption(parser): """ Add topology-only option to the topology tester. This option makes pytest only run the setup_module() to setup the topology without running any tests. """ - parser.addoption('--topology-only', action='store_true', - help='Only set up this topology, don\'t run tests') + parser.addoption( + "--topology-only", + action="store_true", + help="Only set up this topology, don't run tests", + ) + def pytest_runtest_call(): """ @@ -30,7 +35,8 @@ def pytest_runtest_call(): # Allow user to play with the setup. tgen.mininet_cli() - pytest.exit('the topology executed successfully') + pytest.exit("the topology executed successfully") + def pytest_assertrepr_compare(op, left, right): """ @@ -44,17 +50,19 @@ def pytest_assertrepr_compare(op, left, right): return json_result.errors + def pytest_configure(config): "Assert that the environment is correctly configured." global topology_only if not diagnose_env(): - pytest.exit('enviroment has errors, please read the logs') + pytest.exit("enviroment has errors, please read the logs") - if config.getoption('--topology-only'): + if config.getoption("--topology-only"): topology_only = True + def pytest_runtest_makereport(item, call): "Log all assert messages to default logger with error level" # Nothing happened @@ -65,18 +73,22 @@ def pytest_runtest_makereport(item, call): modname = parent.module.__name__ # Treat skips as non errors - if call.excinfo.typename != 'AssertionError': - logger.info('assert skipped at "{}/{}": {}'.format( - modname, item.name, call.excinfo.value)) + if call.excinfo.typename != "AssertionError": + logger.info( + 'assert skipped at "{}/{}": {}'.format( + modname, item.name, call.excinfo.value + ) + ) return # Handle assert failures parent._previousfailed = item - logger.error('assert failed at "{}/{}": {}'.format( - modname, item.name, call.excinfo.value)) + logger.error( + 'assert failed at "{}/{}": {}'.format(modname, item.name, call.excinfo.value) + ) # (topogen) Set topology error to avoid advancing in the test. tgen = get_topogen() if tgen is not None: # This will cause topogen to report error on `routers_have_failure`. - tgen.set_error('{}/{}'.format(modname, item.name)) + tgen.set_error("{}/{}".format(modname, item.name)) diff --git a/tests/topotests/eigrp-topo1/test_eigrp_topo1.py b/tests/topotests/eigrp-topo1/test_eigrp_topo1.py index 1c00face43..c1dd88823b 100755 --- a/tests/topotests/eigrp-topo1/test_eigrp_topo1.py +++ b/tests/topotests/eigrp-topo1/test_eigrp_topo1.py @@ -35,7 +35,7 @@ import json # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -52,6 +52,7 @@ from mininet.topo import Topo ## ##################################################### + class NetworkTopo(Topo): "EIGRP Topology 1" @@ -61,27 +62,27 @@ class NetworkTopo(Topo): tgen = get_topogen(self) for routern in range(1, 4): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # On main router # First switch is for a dummy interface (for local network) - switch = tgen.add_switch('sw1') - switch.add_link(tgen.gears['r1']) + switch = tgen.add_switch("sw1") + switch.add_link(tgen.gears["r1"]) # Switches for EIGRP # switch 2 switch is for connection to EIGRP router - switch = tgen.add_switch('sw2') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("sw2") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) # switch 4 is stub on remote EIGRP router - switch = tgen.add_switch('sw4') - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("sw4") + switch.add_link(tgen.gears["r3"]) # switch 3 is between EIGRP routers - switch = tgen.add_switch('sw3') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("sw3") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) ##################################################### @@ -90,6 +91,7 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(module): "Setup topology" tgen = Topogen(NetworkTopo, module.__name__) @@ -99,12 +101,10 @@ def setup_module(module): router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_EIGRP, - os.path.join(CWD, '{}/eigrpd.conf'.format(rname)) + TopoRouter.RD_EIGRP, os.path.join(CWD, "{}/eigrpd.conf".format(rname)) ) tgen.start_router() @@ -126,7 +126,7 @@ def test_converge_protocols(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - topotest.sleep(5, 'Waiting for EIGRP convergence') + topotest.sleep(5, "Waiting for EIGRP convergence") def test_eigrp_routes(): @@ -142,7 +142,7 @@ def test_eigrp_routes(): router_list = tgen.routers().values() for router in router_list: - refTableFile = '{}/{}/show_ip_eigrp.json'.format(CWD, router.name) + refTableFile = "{}/{}/show_ip_eigrp.json".format(CWD, router.name) # Read expected result from file expected = json.loads(open(refTableFile).read()) @@ -153,6 +153,7 @@ def test_eigrp_routes(): assertmsg = '"show ip eigrp topo" mismatches on {}'.format(router.name) assert topotest.json_cmp(actual, expected) is None, assertmsg + def test_zebra_ipv4_routingTable(): "Test 'show ip route'" @@ -164,27 +165,29 @@ def test_zebra_ipv4_routingTable(): failures = 0 router_list = tgen.routers().values() for router in router_list: - output = router.vtysh_cmd('show ip route json', isjson=True) - refTableFile = '{}/{}/show_ip_route.json_ref'.format(CWD, router.name) + output = router.vtysh_cmd("show ip route json", isjson=True) + refTableFile = "{}/{}/show_ip_route.json_ref".format(CWD, router.name) expected = json.loads(open(refTableFile).read()) - assertmsg = 'Zebra IPv4 Routing Table verification failed for router {}'.format(router.name) + assertmsg = "Zebra IPv4 Routing Table verification failed for router {}".format( + router.name + ) assert topotest.json_cmp(output, expected) is None, assertmsg + def test_shut_interface_and_recover(): "Test shutdown of an interface and recovery of the interface" tgen = get_topogen() - router = tgen.gears['r1'] - router.run('ip link set r1-eth1 down') - topotest.sleep(5, 'Waiting for EIGRP convergence') - router.run('ip link set r1-eth1 up') - + router = tgen.gears["r1"] + router.run("ip link set r1-eth1 down") + topotest.sleep(5, "Waiting for EIGRP convergence") + router.run("ip link set r1-eth1 up") def test_shutdown_check_stderr(): - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - pytest.skip('Skipping test for Stderr output and memory leaks') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + pytest.skip("Skipping test for Stderr output and memory leaks") tgen = get_topogen() # Don't run this test if we have any failure. @@ -197,15 +200,15 @@ def test_shutdown_check_stderr(): for router in router_list: router.stop() - log = tgen.net[router.name].getStdErr('eigrpd') + log = tgen.net[router.name].getStdErr("eigrpd") if log: - logger.error('EIGRPd StdErr Log:' + log) - log = tgen.net[router.name].getStdErr('zebra') + logger.error("EIGRPd StdErr Log:" + log) + log = tgen.net[router.name].getStdErr("zebra") if log: - logger.error('Zebra StdErr Log:' + log) + logger.error("Zebra StdErr Log:" + log) -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) @@ -237,23 +240,23 @@ def ip_eigrp_topo(node): } } """ - output = topotest.normalize_text(node.vtysh_cmd('show ip eigrp topo')).splitlines() + output = topotest.normalize_text(node.vtysh_cmd("show ip eigrp topo")).splitlines() result = {} for idx, line in enumerate(output): - columns = line.split(' ', 1) + columns = line.split(" ", 1) # Parse the following format into python dicts # code A.B.C.D/E, X successors, FD is Y, serno: Z # via FOO, interface-name code = columns[0] - if code not in ['P', 'A', 'U', 'Q', 'R', 'r', 's']: + if code not in ["P", "A", "U", "Q", "R", "r", "s"]: continue if not result.has_key(code): result[code] = {} # Split network from the rest - columns = columns[1].split(',') + columns = columns[1].split(",") # Parse first line data network = columns[0] @@ -263,33 +266,33 @@ def ip_eigrp_topo(node): if column == columns[0]: continue - match = re.search(r'(\d+) successors', column) + match = re.search(r"(\d+) successors", column) if match is not None: - result[code][network]['successors'] = match.group(1) + result[code][network]["successors"] = match.group(1) continue - match = re.search(r'FD is (\d+)', column) + match = re.search(r"FD is (\d+)", column) if match is not None: - result[code][network]['fd'] = match.group(1) + result[code][network]["fd"] = match.group(1) continue - match = re.search(r'serno: (\d+)', column) + match = re.search(r"serno: (\d+)", column) if match is not None: - result[code][network]['serno'] = match.group(1) + result[code][network]["serno"] = match.group(1) continue # Parse second line data nextline = output[idx + 1] - columns = topotest.normalize_text(nextline).split(',') + columns = topotest.normalize_text(nextline).split(",") for column in columns: - match = re.search(r'via (.+)', column) + match = re.search(r"via (.+)", column) if match is not None: - result[code][network]['via'] = match.group(1) + result[code][network]["via"] = match.group(1) continue - match = re.search(r'(.+)', column) + match = re.search(r"(.+)", column) if match is not None: - result[code][network]['interface'] = match.group(1) + result[code][network]["interface"] = match.group(1) continue return result diff --git a/tests/topotests/evpn-pim-1/leaf1/bgpd.conf b/tests/topotests/evpn-pim-1/leaf1/bgpd.conf index 33d34db677..4dedfecd61 100644 --- a/tests/topotests/evpn-pim-1/leaf1/bgpd.conf +++ b/tests/topotests/evpn-pim-1/leaf1/bgpd.conf @@ -1,9 +1,10 @@ router bgp 65002 + no bgp ebgp-requires-policy neighbor 192.168.1.1 remote-as external redistribute connected address-family l2vpn evpn neighbor 192.168.1.1 activate advertise-all-vni ! -!
\ No newline at end of file +! diff --git a/tests/topotests/evpn-pim-1/leaf2/bgpd.conf b/tests/topotests/evpn-pim-1/leaf2/bgpd.conf index 3dd9f237be..5bc708240d 100644 --- a/tests/topotests/evpn-pim-1/leaf2/bgpd.conf +++ b/tests/topotests/evpn-pim-1/leaf2/bgpd.conf @@ -1,9 +1,10 @@ router bgp 65003 + no bgp ebgp-requires-policy neighbor 192.168.2.1 remote-as external redistribute connected address-family l2vpn evpn neighbor 192.168.2.1 activate advertise-all-vni ! -!
\ No newline at end of file +! diff --git a/tests/topotests/evpn-pim-1/spine/bgp.summ.json b/tests/topotests/evpn-pim-1/spine/bgp.summ.json index faf40c8d43..53370507e8 100644 --- a/tests/topotests/evpn-pim-1/spine/bgp.summ.json +++ b/tests/topotests/evpn-pim-1/spine/bgp.summ.json @@ -12,7 +12,6 @@ "tableVersion":0, "outq":0, "inq":0, - "prefixReceivedCount":3, "pfxRcd":3, "pfxSnt":7, "state":"Established", @@ -26,7 +25,6 @@ "tableVersion":0, "outq":0, "inq":0, - "prefixReceivedCount":3, "pfxRcd":3, "pfxSnt":7, "state":"Established", diff --git a/tests/topotests/evpn-pim-1/spine/bgpd.conf b/tests/topotests/evpn-pim-1/spine/bgpd.conf index 9a845043e9..16c17b29cc 100644 --- a/tests/topotests/evpn-pim-1/spine/bgpd.conf +++ b/tests/topotests/evpn-pim-1/spine/bgpd.conf @@ -1,5 +1,6 @@ router bgp 65001 + no bgp ebgp-requires-policy neighbor 192.168.1.2 remote-as external neighbor 192.168.2.3 remote-as external redistribute connected diff --git a/tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py b/tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py index dafe2e03ac..94bb91d49f 100755 --- a/tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py +++ b/tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py @@ -36,7 +36,7 @@ from functools import partial # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -53,6 +53,7 @@ from mininet.topo import Topo ## ##################################################### + class NetworkTopo(Topo): "evpn-pim Topology 1" @@ -61,34 +62,33 @@ class NetworkTopo(Topo): tgen = get_topogen(self) - tgen.add_router('spine') - tgen.add_router('leaf1') - tgen.add_router('leaf2') - tgen.add_router('host1') - tgen.add_router('host2') + tgen.add_router("spine") + tgen.add_router("leaf1") + tgen.add_router("leaf2") + tgen.add_router("host1") + tgen.add_router("host2") # On main router # First switch is for a dummy interface (for local network) # spine-eth0 is connected to leaf1-eth0 - switch = tgen.add_switch('sw1') - switch.add_link(tgen.gears['spine']) - switch.add_link(tgen.gears['leaf1']) + switch = tgen.add_switch("sw1") + switch.add_link(tgen.gears["spine"]) + switch.add_link(tgen.gears["leaf1"]) # spine-eth1 is connected to leaf2-eth0 - switch = tgen.add_switch('sw2') - switch.add_link(tgen.gears['spine']) - switch.add_link(tgen.gears['leaf2']) + switch = tgen.add_switch("sw2") + switch.add_link(tgen.gears["spine"]) + switch.add_link(tgen.gears["leaf2"]) # leaf1-eth1 is connected to host1-eth0 - switch = tgen.add_switch('sw3') - switch.add_link(tgen.gears['leaf1']) - switch.add_link(tgen.gears['host1']) + switch = tgen.add_switch("sw3") + switch.add_link(tgen.gears["leaf1"]) + switch.add_link(tgen.gears["host1"]) # leaf2-eth1 is connected to host2-eth0 - switch = tgen.add_switch('sw4') - switch.add_link(tgen.gears['leaf2']) - switch.add_link(tgen.gears['host2']) - + switch = tgen.add_switch("sw4") + switch.add_link(tgen.gears["leaf2"]) + switch.add_link(tgen.gears["host2"]) ##################################################### @@ -97,42 +97,45 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(module): "Setup topology" tgen = Topogen(NetworkTopo, module.__name__) tgen.start_topology() - leaf1 = tgen.gears['leaf1'] - leaf2 = tgen.gears['leaf2'] - - leaf1.run('brctl addbr brleaf1') - leaf2.run('brctl addbr brleaf2') - leaf1.run('ip link set dev brleaf1 up') - leaf2.run('ip link set dev brleaf2 up') - leaf1.run('ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev leaf1-eth1 dstport 4789') - leaf2.run('ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev leaf2-eth1 dstport 4789') - leaf1.run('brctl addif brleaf1 vxlan0') - leaf2.run('brctl addif brleaf2 vxlan0') - leaf1.run('ip link set up dev vxlan0') - leaf2.run('ip link set up dev vxlan0') - #tgen.mininet_cli() + leaf1 = tgen.gears["leaf1"] + leaf2 = tgen.gears["leaf2"] + + leaf1.run("brctl addbr brleaf1") + leaf2.run("brctl addbr brleaf2") + leaf1.run("ip link set dev brleaf1 up") + leaf2.run("ip link set dev brleaf2 up") + leaf1.run( + "ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev leaf1-eth1 dstport 4789" + ) + leaf2.run( + "ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev leaf2-eth1 dstport 4789" + ) + leaf1.run("brctl addif brleaf1 vxlan0") + leaf2.run("brctl addif brleaf2 vxlan0") + leaf1.run("ip link set up dev vxlan0") + leaf2.run("ip link set up dev vxlan0") + # tgen.mininet_cli() # This is a sample of configuration loading. router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_PIM, - os.path.join(CWD, '{}/pimd.conf'.format(rname)) + TopoRouter.RD_PIM, os.path.join(CWD, "{}/pimd.conf".format(rname)) ) tgen.start_router() - #tgen.mininet_cli() + # tgen.mininet_cli() + def teardown_module(_mod): "Teardown the pytest environment" @@ -150,16 +153,18 @@ def test_converge_protocols(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - spine = tgen.gears['spine'] - json_file = '{}/{}/bgp.summ.json'.format(CWD, spine.name) + spine = tgen.gears["spine"] + json_file = "{}/{}/bgp.summ.json".format(CWD, spine.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - spine, 'show bgp ipv4 uni summ json', expected) + test_func = partial( + topotest.router_json_cmp, spine, "show bgp ipv4 uni summ json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=125, wait=1) assertmsg = '"{}" JSON output mismatches'.format(spine.name) assert result is None, assertmsg - #tgen.mininet_cli() + # tgen.mininet_cli() + def test_multicast_groups_on_rp(): "Ensure the multicast groups show up on the spine" @@ -172,20 +177,22 @@ def test_multicast_groups_on_rp(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - spine = tgen.gears['spine'] - json_file = '{}/{}/join-info.json'.format(CWD, spine.name) + spine = tgen.gears["spine"] + json_file = "{}/{}/join-info.json".format(CWD, spine.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - spine, 'show ip pim join json', expected) + test_func = partial( + topotest.router_json_cmp, spine, "show ip pim join json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) assertmsg = '"{}" JSON output mismatches'.format(spine.name) assert result is None, assertmsg - #tgen.mininet_cli() + # tgen.mininet_cli() + def test_shutdown_check_stderr(): - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - pytest.skip('Skipping test for Stderr output and memory leaks') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + pytest.skip("Skipping test for Stderr output and memory leaks") tgen = get_topogen() # Don't run this test if we have any failure. @@ -198,18 +205,17 @@ def test_shutdown_check_stderr(): for router in router_list: router.stop() - log = tgen.net[router.name].getStdErr('pimd') + log = tgen.net[router.name].getStdErr("pimd") if log: - logger.error('PIMd StdErr Log:' + log) - log = tgen.net[router.name].getStdErr('bgpd') + logger.error("PIMd StdErr Log:" + log) + log = tgen.net[router.name].getStdErr("bgpd") if log: - logger.error('BGPd StdErr Log:' + log) - log = tgen.net[router.name].getStdErr('zebra') + logger.error("BGPd StdErr Log:" + log) + log = tgen.net[router.name].getStdErr("zebra") if log: - logger.error('Zebra StdErr Log:' + log) + logger.error("Zebra StdErr Log:" + log) -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) - diff --git a/tests/topotests/example-test/test_example.py b/tests/topotests/example-test/test_example.py index 8e37ad11d4..72eceee612 100755 --- a/tests/topotests/example-test/test_example.py +++ b/tests/topotests/example-test/test_example.py @@ -9,52 +9,61 @@ import pytest fatal_error = "" + def setup_module(module): - print ("setup_module module:%s" % module.__name__) + print("setup_module module:%s" % module.__name__) + def teardown_module(module): - print ("teardown_module module:%s" % module.__name__) + print("teardown_module module:%s" % module.__name__) + def setup_function(function): - print ("setup_function function:%s" % function.__name__) + print("setup_function function:%s" % function.__name__) + def teardown_function(function): - print ("teardown_function function:%s" % function.__name__) + print("teardown_function function:%s" % function.__name__) + def test_numbers_compare(): a = 12 - print ("Dummy Output") - assert( a == 12 ) + print("Dummy Output") + assert a == 12 + def test_fail_example(): assert True, "Some Text with explaination in case of failure" + def test_ls_exits_zero(): "Tests for ls command on invalid file" global fatal_error proc = subprocess.Popen( - ["ls", "/some/nonexistant/file"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + ["ls", "/some/nonexistant/file"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, ) stdout, stderr = proc.communicate() - if (proc.returncode != 0): + if proc.returncode != 0: # Mark this as a fatal error which skips some other tests on failure fatal_error = "test_fail_example failed" assert proc.returncode == 0, "Return Code is non-Zero:\n%s" % stderr + def test_skipped_on_fatalerror(): global fatal_error # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) assert True, "Some Text with explaination in case of failure" -if __name__ == '__main__': + +if __name__ == "__main__": retval = pytest.main(["-s"]) sys.exit(retval) diff --git a/tests/topotests/example-test/test_template.py b/tests/topotests/example-test/test_template.py index 4e35ce8b9f..afe974876a 100755 --- a/tests/topotests/example-test/test_template.py +++ b/tests/topotests/example-test/test_template.py @@ -32,7 +32,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -43,8 +43,10 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class TemplateTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -56,17 +58,18 @@ class TemplateTopo(Topo): # # Create 2 routers for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # Create a switch with just one router connected to it to simulate a # empty network. - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) # Create a connection between r1 and r2 - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + def setup_module(mod): "Sets up the pytest environment" @@ -83,12 +86,13 @@ def setup_module(mod): router.load_config( TopoRouter.RD_ZEBRA, # Uncomment next line to load configuration from ./router/zebra.conf - #os.path.join(CWD, '{}/zebra.conf'.format(rname)) + # os.path.join(CWD, '{}/zebra.conf'.format(rname)) ) # After loading the configurations, this function loads configured daemons. tgen.start_router() + def teardown_module(mod): "Teardown the pytest environment" tgen = get_topogen() @@ -96,6 +100,7 @@ def teardown_module(mod): # This function tears down the whole topology. tgen.stop_topology() + def test_call_mininet_cli(): "Dummy test that just calls mininet CLI so we can interact with the build." tgen = get_topogen() @@ -103,18 +108,20 @@ def test_call_mininet_cli(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('calling mininet CLI') + logger.info("calling mininet CLI") tgen.mininet_cli() + # Memory leak test template def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py index 8e794b9946..f24f463b8a 100755 --- a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py +++ b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py @@ -33,8 +33,8 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) -sys.path.append(os.path.join(CWD, '../../')) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../../")) # pylint: disable=C0413 from lib.topogen import Topogen, get_topogen @@ -44,19 +44,19 @@ from mininet.topo import Topo # Import topoJson from lib, to create topology and initial configuration from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, verify_rib + start_topology, + write_test_header, + write_test_footer, + verify_rib, ) from lib.topolog import logger -from lib.bgp import ( - verify_bgp_convergence -) +from lib.bgp import verify_bgp_convergence from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/example_topojson_multiple_links.json".format(CWD) try: - with open(jsonFile, 'r') as topoJson: + with open(jsonFile, "r") as topoJson: topo = json.load(topoJson) except IOError: assert False, "Could not read file {}".format(jsonFile) @@ -155,8 +155,9 @@ def test_bgp_convergence(request): # Api call verify whether BGP is converged bgp_convergence = verify_bgp_convergence(tgen, topo) - assert bgp_convergence is True, "test_bgp_convergence failed.. \n" \ - " Error: {}".format(bgp_convergence) + assert ( + bgp_convergence is True + ), "test_bgp_convergence failed.. \n" " Error: {}".format(bgp_convergence) logger.info("BGP is converged successfully \n") write_test_footer(tc_name) @@ -167,7 +168,7 @@ def test_static_routes(request): tgen = get_topogen() if bgp_convergence is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -175,20 +176,19 @@ def test_static_routes(request): # Static routes are created as part of initial configuration, # verifying RIB - dut = 'r3' - protocol = 'bgp' - next_hop = '10.0.0.1' + dut = "r3" + protocol = "bgp" + next_hop = "10.0.0.1" input_dict = {"r1": topo["routers"]["r1"]} # Uncomment below to debug # tgen.mininet_cli() - result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + result = verify_rib(tgen, "ipv4", dut, input_dict, next_hop=next_hop) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py index 315c7b3f2d..3ae3c9f4fe 100755 --- a/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py +++ b/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py @@ -32,31 +32,31 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) -sys.path.append(os.path.join(CWD, '../../')) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../../")) # pylint: disable=C0413 -from lib.topogen import Topogen, get_topogen +from lib.topogen import Topogen, get_topogen # Required to instantiate the topology builder class. from mininet.topo import Topo # Import topoJson from lib, to create topology and initial configuration from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, verify_rib + start_topology, + write_test_header, + write_test_footer, + verify_rib, ) from lib.topolog import logger -from lib.bgp import ( - verify_bgp_convergence -) +from lib.bgp import verify_bgp_convergence from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/example_topojson.json".format(CWD) try: - with open(jsonFile, 'r') as topoJson: + with open(jsonFile, "r") as topoJson: topo = json.load(topoJson) except IOError: assert False, "Could not read file {}".format(jsonFile) @@ -65,6 +65,7 @@ except IOError: bgp_convergence = False input_dict = {} + class TemplateTopo(Topo): """ Test topology builder @@ -87,6 +88,7 @@ class TemplateTopo(Topo): # Building topology from json file build_topo_from_json(tgen, topo) + def setup_module(mod): """ Sets up the pytest environment @@ -96,7 +98,7 @@ def setup_module(mod): testsuite_run_time = time.asctime(time.localtime(time.time())) logger.info("Testsuite start time: {}".format(testsuite_run_time)) - logger.info("="*40) + logger.info("=" * 40) logger.info("Running setup_module to create topology") @@ -122,6 +124,7 @@ def setup_module(mod): logger.info("Running setup_module() done") + def teardown_module(mod): """ Teardown the pytest environment @@ -152,8 +155,9 @@ def test_bgp_convergence(request): # Api call verify whether BGP is converged bgp_convergence = verify_bgp_convergence(tgen, topo) - assert bgp_convergence is True, "test_bgp_convergence failed.. \n"\ - " Error: {}".format(bgp_convergence) + assert ( + bgp_convergence is True + ), "test_bgp_convergence failed.. \n" " Error: {}".format(bgp_convergence) logger.info("BGP is converged successfully \n") write_test_footer(tc_name) @@ -164,7 +168,7 @@ def test_static_routes(request): tgen = get_topogen() if bgp_convergence is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -172,19 +176,18 @@ def test_static_routes(request): # Static routes are created as part of initial configuration, # verifying RIB - dut = 'r3' - next_hop = '10.0.0.1' + dut = "r3" + next_hop = "10.0.0.1" input_dict = {"r1": topo["routers"]["r1"]} # Uncomment below to debug # tgen.mininet_cli() - result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + result = verify_rib(tgen, "ipv4", dut, input_dict, next_hop=next_hop) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py index cd069aaec5..06fa2f4626 100755 --- a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py +++ b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py @@ -33,32 +33,32 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) -sys.path.append(os.path.join(CWD, '../../')) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../../")) # pylint: disable=C0413 # Import topogen and topotest helpers -from lib.topogen import Topogen, get_topogen +from lib.topogen import Topogen, get_topogen # Required to instantiate the topology builder class. from mininet.topo import Topo # Import topoJson from lib, to create topology and initial configuration from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, verify_rib + start_topology, + write_test_header, + write_test_footer, + verify_rib, ) from lib.topolog import logger -from lib.bgp import ( - verify_bgp_convergence -) +from lib.bgp import verify_bgp_convergence from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/example_topojson.json".format(CWD) try: - with open(jsonFile, 'r') as topoJson: + with open(jsonFile, "r") as topoJson: topo = json.load(topoJson) except IOError: assert False, "Could not read file {}".format(jsonFile) @@ -100,7 +100,7 @@ def setup_module(mod): testsuite_run_time = time.asctime(time.localtime(time.time())) logger.info("Testsuite start time: {}".format(testsuite_run_time)) - logger.info("="*40) + logger.info("=" * 40) logger.info("Running setup_module to create topology") @@ -157,8 +157,9 @@ def test_bgp_convergence(request): # Api call verify whether BGP is converged bgp_convergence = verify_bgp_convergence(tgen, topo) - assert bgp_convergence is True, "test_bgp_convergence failed.. \n"\ - " Error: {}".format(bgp_convergence) + assert ( + bgp_convergence is True + ), "test_bgp_convergence failed.. \n" " Error: {}".format(bgp_convergence) logger.info("BGP is converged successfully \n") write_test_footer(tc_name) @@ -169,7 +170,7 @@ def test_static_routes(request): tgen = get_topogen() if bgp_convergence is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -177,8 +178,8 @@ def test_static_routes(request): # Static routes are created as part of initial configuration, # verifying RIB - dut = 'r3' - next_hop = ['10.0.0.1', '10.0.0.5'] + dut = "r3" + next_hop = ["10.0.0.1", "10.0.0.5"] input_dict = { "r1": { "static_routes": [ @@ -186,20 +187,19 @@ def test_static_routes(request): "network": "100.0.20.1/32", "no_of_ip": 9, "admin_distance": 100, - "next_hop": "10.0.0.1" + "next_hop": "10.0.0.1", } ] } } # Uncomment below to debug # tgen.mininet_cli() - result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + result = verify_rib(tgen, "ipv4", dut, input_dict, next_hop=next_hop) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/isis-topo1/test_isis_topo1.py b/tests/topotests/isis-topo1/test_isis_topo1.py index 941f917c6b..6b1d9a8964 100644 --- a/tests/topotests/isis-topo1/test_isis_topo1.py +++ b/tests/topotests/isis-topo1/test_isis_topo1.py @@ -36,7 +36,7 @@ import pytest import time CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -48,6 +48,7 @@ from mininet.topo import Topo class ISISTopo1(Topo): "Simple two layer ISIS topology" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -61,27 +62,27 @@ class ISISTopo1(Topo): # \ / # r5 for routern in range(1, 6): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # r1 <- sw1 -> r3 - sw = tgen.add_switch('sw1') - sw.add_link(tgen.gears['r1']) - sw.add_link(tgen.gears['r3']) + sw = tgen.add_switch("sw1") + sw.add_link(tgen.gears["r1"]) + sw.add_link(tgen.gears["r3"]) # r2 <- sw2 -> r4 - sw = tgen.add_switch('sw2') - sw.add_link(tgen.gears['r2']) - sw.add_link(tgen.gears['r4']) + sw = tgen.add_switch("sw2") + sw.add_link(tgen.gears["r2"]) + sw.add_link(tgen.gears["r4"]) # r3 <- sw3 -> r5 - sw = tgen.add_switch('sw3') - sw.add_link(tgen.gears['r3']) - sw.add_link(tgen.gears['r5']) + sw = tgen.add_switch("sw3") + sw.add_link(tgen.gears["r3"]) + sw.add_link(tgen.gears["r5"]) # r4 <- sw4 -> r5 - sw = tgen.add_switch('sw4') - sw.add_link(tgen.gears['r4']) - sw.add_link(tgen.gears['r5']) + sw = tgen.add_switch("sw4") + sw.add_link(tgen.gears["r4"]) + sw.add_link(tgen.gears["r5"]) def setup_module(mod): @@ -92,12 +93,10 @@ def setup_module(mod): # For all registered routers, load the zebra configuration file for rname, router in tgen.routers().iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_ISIS, - os.path.join(CWD, '{}/isisd.conf'.format(rname)) + TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname)) ) # After loading the configurations, this function loads configured daemons. @@ -105,12 +104,12 @@ def setup_module(mod): has_version_20 = False for router in tgen.routers().values(): - if router.has_version('<', '3'): + if router.has_version("<", "3"): has_version_20 = True if has_version_20: - logger.info('Skipping ISIS tests for FRR 2.0') - tgen.set_error('ISIS has convergence problems with IPv6') + logger.info("Skipping ISIS tests for FRR 2.0") + tgen.set_error("ISIS has convergence problems with IPv6") def teardown_module(mod): @@ -136,7 +135,7 @@ def test_isis_convergence(): # ) for rname, router in tgen.routers().iteritems(): - filename = '{0}/{1}/{1}_topology.json'.format(CWD, rname) + filename = "{0}/{1}/{1}_topology.json".format(CWD, rname) expected = json.loads(open(filename).read()) def compare_isis_topology(router, expected): @@ -145,9 +144,8 @@ def test_isis_convergence(): return topotest.json_cmp(actual, expected) test_func = functools.partial(compare_isis_topology, router, expected) - (result, diff) = topotest.run_and_expect(test_func, None, - wait=0.5, count=120) - assert result, 'ISIS did not converge on {}:\n{}'.format(rname, diff) + (result, diff) = topotest.run_and_expect(test_func, None, wait=0.5, count=120) + assert result, "ISIS did not converge on {}:\n{}".format(rname, diff) def test_isis_route_installation(): @@ -157,24 +155,24 @@ def test_isis_route_installation(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Checking routers for installed ISIS routes') + logger.info("Checking routers for installed ISIS routes") # Check for routes in 'show ip route json' for rname, router in tgen.routers().iteritems(): - filename = '{0}/{1}/{1}_route.json'.format(CWD, rname) - expected = json.loads(open(filename, 'r').read()) - actual = router.vtysh_cmd('show ip route json', isjson=True) + filename = "{0}/{1}/{1}_route.json".format(CWD, rname) + expected = json.loads(open(filename, "r").read()) + actual = router.vtysh_cmd("show ip route json", isjson=True) # Older FRR versions don't list interfaces in some ISIS routes - if router.has_version('<', '3.1'): + if router.has_version("<", "3.1"): for network, routes in expected.iteritems(): for route in routes: - if route['protocol'] != 'isis': + if route["protocol"] != "isis": continue - for nexthop in route['nexthops']: - nexthop.pop('interfaceIndex', None) - nexthop.pop('interfaceName', None) + for nexthop in route["nexthops"]: + nexthop.pop("interfaceIndex", None) + nexthop.pop("interfaceName", None) assertmsg = "Router '{}' routes mismatch".format(rname) assert topotest.json_cmp(actual, expected) is None, assertmsg @@ -187,19 +185,19 @@ def test_isis_linux_route_installation(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Checking routers for installed ISIS routes in OS') + logger.info("Checking routers for installed ISIS routes in OS") # Check for routes in `ip route` for rname, router in tgen.routers().iteritems(): - filename = '{0}/{1}/{1}_route_linux.json'.format(CWD, rname) - expected = json.loads(open(filename, 'r').read()) + filename = "{0}/{1}/{1}_route_linux.json".format(CWD, rname) + expected = json.loads(open(filename, "r").read()) actual = topotest.ip4_route(router) # Older FRR versions install routes using different proto - if router.has_version('<', '3.1'): + if router.has_version("<", "3.1"): for network, netoptions in expected.iteritems(): - if 'proto' in netoptions and netoptions['proto'] == '187': - netoptions['proto'] = 'zebra' + if "proto" in netoptions and netoptions["proto"] == "187": + netoptions["proto"] = "zebra" assertmsg = "Router '{}' OS routes mismatch".format(rname) assert topotest.json_cmp(actual, expected) is None, assertmsg @@ -212,27 +210,27 @@ def test_isis_route6_installation(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Checking routers for installed ISIS IPv6 routes') + logger.info("Checking routers for installed ISIS IPv6 routes") # Check for routes in 'show ip route json' for rname, router in tgen.routers().iteritems(): - filename = '{0}/{1}/{1}_route6.json'.format(CWD, rname) - expected = json.loads(open(filename, 'r').read()) - actual = router.vtysh_cmd('show ipv6 route json', isjson=True) + filename = "{0}/{1}/{1}_route6.json".format(CWD, rname) + expected = json.loads(open(filename, "r").read()) + actual = router.vtysh_cmd("show ipv6 route json", isjson=True) # Older FRR versions don't list interfaces in some ISIS routes - if router.has_version('<', '3.1'): + if router.has_version("<", "3.1"): for network, routes in expected.iteritems(): for route in routes: # Older versions display different metrics for IPv6 routes - route.pop('metric', None) + route.pop("metric", None) - if route['protocol'] != 'isis': + if route["protocol"] != "isis": continue - for nexthop in route['nexthops']: - nexthop.pop('interfaceIndex', None) - nexthop.pop('interfaceName', None) + for nexthop in route["nexthops"]: + nexthop.pop("interfaceIndex", None) + nexthop.pop("interfaceName", None) assertmsg = "Router '{}' routes mismatch".format(rname) assert topotest.json_cmp(actual, expected) is None, assertmsg @@ -245,19 +243,19 @@ def test_isis_linux_route6_installation(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Checking routers for installed ISIS IPv6 routes in OS') + logger.info("Checking routers for installed ISIS IPv6 routes in OS") # Check for routes in `ip route` for rname, router in tgen.routers().iteritems(): - filename = '{0}/{1}/{1}_route6_linux.json'.format(CWD, rname) - expected = json.loads(open(filename, 'r').read()) + filename = "{0}/{1}/{1}_route6_linux.json".format(CWD, rname) + expected = json.loads(open(filename, "r").read()) actual = topotest.ip6_route(router) # Older FRR versions install routes using different proto - if router.has_version('<', '3.1'): + if router.has_version("<", "3.1"): for network, netoptions in expected.iteritems(): - if 'proto' in netoptions and netoptions['proto'] == '187': - netoptions['proto'] = 'zebra' + if "proto" in netoptions and netoptions["proto"] == "187": + netoptions["proto"] = "zebra" assertmsg = "Router '{}' OS routes mismatch".format(rname) assert topotest.json_cmp(actual, expected) is None, assertmsg @@ -267,12 +265,12 @@ def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) @@ -296,8 +294,11 @@ def dict_merge(dct, merge_dct): https://gist.github.com/angstwad/bf22d1822c38a92ec0a9 """ for k, v in merge_dct.iteritems(): - if (k in dct and isinstance(dct[k], dict) - and isinstance(merge_dct[k], collections.Mapping)): + if ( + k in dct + and isinstance(dct[k], dict) + and isinstance(merge_dct[k], collections.Mapping) + ): dict_merge(dct[k], merge_dct[k]) else: dct[k] = merge_dct[k] @@ -316,59 +317,59 @@ def parse_topology(lines, level): if area_match: area = area_match.group(1) if area not in areas: - areas[area] = { - level: { - 'ipv4': [], - 'ipv6': [] - } - } + areas[area] = {level: {"ipv4": [], "ipv6": []}} ipv = None continue elif area is None: continue if re.match(r"IS\-IS paths to level-. routers that speak IPv6", line): - ipv = 'ipv6' + ipv = "ipv6" continue if re.match(r"IS\-IS paths to level-. routers that speak IP", line): - ipv = 'ipv4' + ipv = "ipv4" continue - item_match = re.match( - r"([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+)", line) + item_match = re.match(r"([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+)", line) if item_match is not None: # Skip header - if (item_match.group(1) == 'Vertex' and - item_match.group(2) == 'Type' and - item_match.group(3) == 'Metric' and - item_match.group(4) == 'Next-Hop' and - item_match.group(5) == 'Interface' and - item_match.group(6) == 'Parent'): + if ( + item_match.group(1) == "Vertex" + and item_match.group(2) == "Type" + and item_match.group(3) == "Metric" + and item_match.group(4) == "Next-Hop" + and item_match.group(5) == "Interface" + and item_match.group(6) == "Parent" + ): continue - areas[area][level][ipv].append({ - 'vertex': item_match.group(1), - 'type': item_match.group(2), - 'metric': item_match.group(3), - 'next-hop': item_match.group(4), - 'interface': item_match.group(5), - 'parent': item_match.group(6), - }) + areas[area][level][ipv].append( + { + "vertex": item_match.group(1), + "type": item_match.group(2), + "metric": item_match.group(3), + "next-hop": item_match.group(4), + "interface": item_match.group(5), + "parent": item_match.group(6), + } + ) continue item_match = re.match(r"([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+)", line) if item_match is not None: - areas[area][level][ipv].append({ - 'vertex': item_match.group(1), - 'type': item_match.group(2), - 'metric': item_match.group(3), - 'parent': item_match.group(4), - }) + areas[area][level][ipv].append( + { + "vertex": item_match.group(1), + "type": item_match.group(2), + "metric": item_match.group(3), + "parent": item_match.group(4), + } + ) continue item_match = re.match(r"([^ ]+)", line) if item_match is not None: - areas[area][level][ipv].append({'vertex': item_match.group(1)}) + areas[area][level][ipv].append({"vertex": item_match.group(1)}) continue return areas @@ -410,14 +411,14 @@ def show_isis_topology(router): } """ l1out = topotest.normalize_text( - router.vtysh_cmd('show isis topology level-1') + router.vtysh_cmd("show isis topology level-1") ).splitlines() l2out = topotest.normalize_text( - router.vtysh_cmd('show isis topology level-2') + router.vtysh_cmd("show isis topology level-2") ).splitlines() - l1 = parse_topology(l1out, 'level-1') - l2 = parse_topology(l2out, 'level-2') + l1 = parse_topology(l1out, "level-1") + l2 = parse_topology(l2out, "level-2") dict_merge(l1, l2) return l1 diff --git a/tests/topotests/ldp-oc-acl-topo1/r1/show_ldp_all_binding.ref b/tests/topotests/ldp-oc-acl-topo1/r1/show_ldp_all_binding.ref new file mode 100644 index 0000000000..99a59668f8 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r1/show_ldp_all_binding.ref @@ -0,0 +1,61 @@ +{ + "bindings":[ + { + "addressFamily":"ipv4", + "prefix":"1.1.1.1/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"2.2.2.2/32", + "neighborId":"2.2.2.2", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"3.3.3.3/32", + "neighborId":"2.2.2.2", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"4.4.4.4/32", + "neighborId":"0.0.0.0", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.1.0/24", + "neighborId":"2.2.2.2", + "localLabel":"imp-null", + "remoteLabel":"imp-null", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.2.0/24", + "neighborId":"2.2.2.2", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.3.0/24", + "neighborId":"2.2.2.2", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"123.0.1.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r2/show_ldp_all_binding.ref b/tests/topotests/ldp-oc-acl-topo1/r2/show_ldp_all_binding.ref new file mode 100644 index 0000000000..95fb847c1e --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r2/show_ldp_all_binding.ref @@ -0,0 +1,63 @@ +{ + "bindings":[ + { + "addressFamily":"ipv4", + "prefix":"1.1.1.1/32", + "neighborId":"1.1.1.1", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"2.2.2.2/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"3.3.3.3/32", + "neighborId":"3.3.3.3", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"4.4.4.4/32", + "neighborId":"0.0.0.0", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.1.0/24", + "neighborId":"1.1.1.1", + "localLabel":"imp-null", + "remoteLabel":"imp-null", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.2.0/24", + "neighborId":"3.3.3.3", + "localLabel":"imp-null", + "remoteLabel":"imp-null", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.3.0/24", + "neighborId":"3.3.3.3", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"123.0.1.0/24", + "neighborId":"1.1.1.1", + "remoteLabel":"imp-null", + "inUse":1 + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r3/show_ldp_all_binding.ref b/tests/topotests/ldp-oc-acl-topo1/r3/show_ldp_all_binding.ref new file mode 100644 index 0000000000..100dd307ea --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r3/show_ldp_all_binding.ref @@ -0,0 +1,61 @@ +{ + "bindings":[ + { + "addressFamily":"ipv4", + "prefix":"1.1.1.1/32", + "neighborId":"2.2.2.2", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"2.2.2.2/32", + "neighborId":"2.2.2.2", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"3.3.3.3/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"4.4.4.4/32", + "neighborId":"0.0.0.0", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.1.0/24", + "neighborId":"2.2.2.2", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.2.0/24", + "neighborId":"2.2.2.2", + "localLabel":"imp-null", + "remoteLabel":"imp-null", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.3.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"123.0.1.0/24", + "neighborId":"2.2.2.2", + "inUse":1 + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r4/show_ldp_all_binding.ref b/tests/topotests/ldp-oc-acl-topo1/r4/show_ldp_all_binding.ref new file mode 100644 index 0000000000..2a46c40346 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r4/show_ldp_all_binding.ref @@ -0,0 +1,68 @@ +{ + "bindings":[ + { + "addressFamily":"ipv4", + "prefix":"1.1.1.1/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"2.2.2.2/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"3.3.3.3/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"4.4.4.4/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.1.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.2.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.3.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"123.0.1.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/test_ldp_oc_acl_topo1.py b/tests/topotests/ldp-oc-acl-topo1/test_ldp_oc_acl_topo1.py index 47b32a16e6..450d35e16c 100755 --- a/tests/topotests/ldp-oc-acl-topo1/test_ldp_oc_acl_topo1.py +++ b/tests/topotests/ldp-oc-acl-topo1/test_ldp_oc_acl_topo1.py @@ -67,7 +67,7 @@ from functools import partial # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -78,8 +78,10 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class TemplateTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -87,24 +89,25 @@ class TemplateTopo(Topo): # # Define FRR Routers # - for router in ['r1', 'r2', 'r3', 'r4']: + for router in ["r1", "r2", "r3", "r4"]: tgen.add_router(router) # # Define connections # - switch = tgen.add_switch('s0') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s0") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) - switch.add_link(tgen.gears['r4']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r4"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) def setup_module(mod): "Sets up the pytest environment" @@ -116,22 +119,20 @@ def setup_module(mod): # For all registered routers, load the zebra configuration file for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) # Don't start ospfd and ldpd in the CE nodes - if router.name[0] == 'r': + if router.name[0] == "r": router.load_config( - TopoRouter.RD_OSPF, - os.path.join(CWD, '{}/ospfd.conf'.format(rname)) + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_LDP, - os.path.join(CWD, '{}/ldpd.conf'.format(rname)) + TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): "Teardown the pytest environment" tgen = get_topogen() @@ -146,17 +147,17 @@ def router_compare_json_output(rname, command, reference): logger.info('Comparing router "%s" "%s" output', rname, command) tgen = get_topogen() - filename = '{}/{}/{}'.format(CWD, rname, reference) + filename = "{}/{}/{}".format(CWD, rname, reference) expected = json.loads(open(filename).read()) # Run test function until we get an result. Wait at most 80 seconds. - test_func = partial(topotest.router_json_cmp, - tgen.gears[rname], command, expected) + test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected) _, diff = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) assert diff is None, assertmsg + def test_ospf_convergence(): logger.info("Test: check OSPF adjacencies") @@ -166,8 +167,11 @@ def test_ospf_convergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3', 'r4']: - router_compare_json_output(rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json") + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json" + ) + def test_rib(): logger.info("Test: verify RIB") @@ -177,9 +181,10 @@ def test_rib(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3', 'r4']: + for rname in ["r1", "r2", "r3", "r4"]: router_compare_json_output(rname, "show ip route json", "show_ip_route.ref") + def test_ldp_adjacencies(): logger.info("Test: verify LDP adjacencies") tgen = get_topogen() @@ -188,8 +193,11 @@ def test_ldp_adjacencies(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3', 'r4']: - router_compare_json_output(rname, "show mpls ldp discovery json", "show_ldp_discovery.ref") + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show mpls ldp discovery json", "show_ldp_discovery.ref" + ) + def test_ldp_neighbors(): logger.info("Test: verify LDP neighbors") @@ -199,8 +207,11 @@ def test_ldp_neighbors(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3', 'r4']: - router_compare_json_output(rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref") + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref" + ) + def test_ldp_bindings(): logger.info("Test: verify LDP bindings") @@ -210,18 +221,41 @@ def test_ldp_bindings(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3', 'r4']: - router_compare_json_output(rname, "show mpls ldp binding json", "show_ldp_binding.ref") + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show mpls ldp binding json", "show_ldp_binding.ref" + ) + + +def test_ldp_bindings_all_routes(): + logger.info("Test: verify LDP bindings after host filter removed") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # remove ACL that blocks advertising everything but host routes */ + cmd = 'vtysh -c "configure terminal" -c "mpls ldp" -c "address-family ipv4" -c "no label local allocate host-routes"' + tgen.net["r1"].cmd(cmd) + sleep(2) + + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show mpls ldp binding json", "show_ldp_all_binding.ref" + ) + # Memory leak test template def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/ldp-oc-topo1/test_ldp_oc_topo1.py b/tests/topotests/ldp-oc-topo1/test_ldp_oc_topo1.py index eda1b37e52..ac99eb1a26 100755 --- a/tests/topotests/ldp-oc-topo1/test_ldp_oc_topo1.py +++ b/tests/topotests/ldp-oc-topo1/test_ldp_oc_topo1.py @@ -67,7 +67,7 @@ from functools import partial # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -78,8 +78,10 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class TemplateTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -87,24 +89,25 @@ class TemplateTopo(Topo): # # Define FRR Routers # - for router in ['r1', 'r2', 'r3', 'r4']: + for router in ["r1", "r2", "r3", "r4"]: tgen.add_router(router) # # Define connections # - switch = tgen.add_switch('s0') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s0") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) - switch.add_link(tgen.gears['r4']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r4"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) def setup_module(mod): "Sets up the pytest environment" @@ -116,22 +119,20 @@ def setup_module(mod): # For all registered routers, load the zebra configuration file for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) # Don't start ospfd and ldpd in the CE nodes - if router.name[0] == 'r': + if router.name[0] == "r": router.load_config( - TopoRouter.RD_OSPF, - os.path.join(CWD, '{}/ospfd.conf'.format(rname)) + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_LDP, - os.path.join(CWD, '{}/ldpd.conf'.format(rname)) + TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): "Teardown the pytest environment" tgen = get_topogen() @@ -146,17 +147,17 @@ def router_compare_json_output(rname, command, reference): logger.info('Comparing router "%s" "%s" output', rname, command) tgen = get_topogen() - filename = '{}/{}/{}'.format(CWD, rname, reference) + filename = "{}/{}/{}".format(CWD, rname, reference) expected = json.loads(open(filename).read()) # Run test function until we get an result. Wait at most 80 seconds. - test_func = partial(topotest.router_json_cmp, - tgen.gears[rname], command, expected) + test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected) _, diff = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) assert diff is None, assertmsg + def test_ospf_convergence(): logger.info("Test: check OSPF adjacencies") @@ -166,8 +167,11 @@ def test_ospf_convergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3', 'r4']: - router_compare_json_output(rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json") + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json" + ) + def test_rib(): logger.info("Test: verify RIB") @@ -177,9 +181,10 @@ def test_rib(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3', 'r4']: + for rname in ["r1", "r2", "r3", "r4"]: router_compare_json_output(rname, "show ip route json", "show_ip_route.ref") + def test_ldp_adjacencies(): logger.info("Test: verify LDP adjacencies") tgen = get_topogen() @@ -188,8 +193,11 @@ def test_ldp_adjacencies(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3', 'r4']: - router_compare_json_output(rname, "show mpls ldp discovery json", "show_ldp_discovery.ref") + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show mpls ldp discovery json", "show_ldp_discovery.ref" + ) + def test_ldp_neighbors(): logger.info("Test: verify LDP neighbors") @@ -199,8 +207,11 @@ def test_ldp_neighbors(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3', 'r4']: - router_compare_json_output(rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref") + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref" + ) + def test_ldp_bindings(): logger.info("Test: verify LDP bindings") @@ -210,18 +221,22 @@ def test_ldp_bindings(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3', 'r4']: - router_compare_json_output(rname, "show mpls ldp binding json", "show_ldp_binding.ref") + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show mpls ldp binding json", "show_ldp_binding.ref" + ) + # Memory leak test template def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/ldp-topo1/r1/show_ipv4_route.ref b/tests/topotests/ldp-topo1/r1/show_ipv4_route.ref index 7819d303d1..7d398887c4 100644 --- a/tests/topotests/ldp-topo1/r1/show_ipv4_route.ref +++ b/tests/topotests/ldp-topo1/r1/show_ipv4_route.ref @@ -1,7 +1,7 @@ -O 1.1.1.1/32 [110/0] is directly connected, lo -O>* 2.2.2.2/32 [110/10] via 10.0.1.2, r1-eth0, label implicit-null -O>* 3.3.3.3/32 [110/20] via 10.0.1.2, r1-eth0, label xxx -O>* 4.4.4.4/32 [110/20] via 10.0.1.2, r1-eth0, label xxx -O 10.0.1.0/24 [110/10] is directly connected, r1-eth0 -O>* 10.0.2.0/24 [110/20] via 10.0.1.2, r1-eth0, label implicit-null -O>* 10.0.3.0/24 [110/20] via 10.0.1.2, r1-eth0, label implicit-null +O 1.1.1.1/32 [110/0] is directly connected, lo, weight 1 +O>* 2.2.2.2/32 [110/10] via 10.0.1.2, r1-eth0, label implicit-null, weight 1 +O>* 3.3.3.3/32 [110/20] via 10.0.1.2, r1-eth0, label xxx, weight 1 +O>* 4.4.4.4/32 [110/20] via 10.0.1.2, r1-eth0, label xxx, weight 1 +O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1 +O>* 10.0.2.0/24 [110/20] via 10.0.1.2, r1-eth0, label implicit-null, weight 1 +O>* 10.0.3.0/24 [110/20] via 10.0.1.2, r1-eth0, label implicit-null, weight 1 diff --git a/tests/topotests/ldp-topo1/r2/show_ipv4_route.ref b/tests/topotests/ldp-topo1/r2/show_ipv4_route.ref index 2a97757757..90e18962a8 100644 --- a/tests/topotests/ldp-topo1/r2/show_ipv4_route.ref +++ b/tests/topotests/ldp-topo1/r2/show_ipv4_route.ref @@ -1,7 +1,7 @@ -O>* 1.1.1.1/32 [110/10] via 10.0.1.1, r2-eth0, label implicit-null -O 2.2.2.2/32 [110/0] is directly connected, lo -O>* 3.3.3.3/32 [110/10] via 10.0.2.3, r2-eth1, label implicit-null -O>* 4.4.4.4/32 [110/10] via 10.0.2.4, r2-eth1, label implicit-null -O 10.0.1.0/24 [110/10] is directly connected, r2-eth0 -O 10.0.2.0/24 [110/10] is directly connected, r2-eth1 -O 10.0.3.0/24 [110/10] is directly connected, r2-eth2 +O>* 1.1.1.1/32 [110/10] via 10.0.1.1, r2-eth0, label implicit-null, weight 1 +O 2.2.2.2/32 [110/0] is directly connected, lo, weight 1 +O>* 3.3.3.3/32 [110/10] via 10.0.2.3, r2-eth1, label implicit-null, weight 1 +O>* 4.4.4.4/32 [110/10] via 10.0.2.4, r2-eth1, label implicit-null, weight 1 +O 10.0.1.0/24 [110/10] is directly connected, r2-eth0, weight 1 +O 10.0.2.0/24 [110/10] is directly connected, r2-eth1, weight 1 +O 10.0.3.0/24 [110/10] is directly connected, r2-eth2, weight 1 diff --git a/tests/topotests/ldp-topo1/r3/show_ipv4_route.ref b/tests/topotests/ldp-topo1/r3/show_ipv4_route.ref index 645224a97b..9b9c763339 100644 --- a/tests/topotests/ldp-topo1/r3/show_ipv4_route.ref +++ b/tests/topotests/ldp-topo1/r3/show_ipv4_route.ref @@ -1,7 +1,7 @@ -O>* 1.1.1.1/32 [110/20] via 10.0.2.2, r3-eth0, label xxx -O>* 2.2.2.2/32 [110/10] via 10.0.2.2, r3-eth0, label implicit-null -O 3.3.3.3/32 [110/0] is directly connected, lo -O>* 4.4.4.4/32 [110/10] via 10.0.2.4, r3-eth0, label implicit-null -O>* 10.0.1.0/24 [110/20] via 10.0.2.2, r3-eth0, label implicit-null -O 10.0.2.0/24 [110/10] is directly connected, r3-eth0 -O 10.0.3.0/24 [110/10] is directly connected, r3-eth1 +O>* 1.1.1.1/32 [110/20] via 10.0.2.2, r3-eth0, label xxx, weight 1 +O>* 2.2.2.2/32 [110/10] via 10.0.2.2, r3-eth0, label implicit-null, weight 1 +O 3.3.3.3/32 [110/0] is directly connected, lo, weight 1 +O>* 4.4.4.4/32 [110/10] via 10.0.2.4, r3-eth0, label implicit-null, weight 1 +O>* 10.0.1.0/24 [110/20] via 10.0.2.2, r3-eth0, label implicit-null, weight 1 +O 10.0.2.0/24 [110/10] is directly connected, r3-eth0, weight 1 +O 10.0.3.0/24 [110/10] is directly connected, r3-eth1, weight 1 diff --git a/tests/topotests/ldp-topo1/r4/show_ipv4_route.ref b/tests/topotests/ldp-topo1/r4/show_ipv4_route.ref index 321828bfae..7444cc924b 100644 --- a/tests/topotests/ldp-topo1/r4/show_ipv4_route.ref +++ b/tests/topotests/ldp-topo1/r4/show_ipv4_route.ref @@ -1,7 +1,7 @@ -O>* 1.1.1.1/32 [110/20] via 10.0.2.2, r4-eth0, label xxx -O>* 2.2.2.2/32 [110/10] via 10.0.2.2, r4-eth0, label implicit-null -O>* 3.3.3.3/32 [110/10] via 10.0.2.3, r4-eth0, label implicit-null -O 4.4.4.4/32 [110/0] is directly connected, lo -O>* 10.0.1.0/24 [110/20] via 10.0.2.2, r4-eth0, label implicit-null -O 10.0.2.0/24 [110/10] is directly connected, r4-eth0 -O>* 10.0.3.0/24 [110/20] via 10.0.2.2, r4-eth0, label implicit-null +O>* 1.1.1.1/32 [110/20] via 10.0.2.2, r4-eth0, label xxx, weight 1 +O>* 2.2.2.2/32 [110/10] via 10.0.2.2, r4-eth0, label implicit-null, weight 1 +O>* 3.3.3.3/32 [110/10] via 10.0.2.3, r4-eth0, label implicit-null, weight 1 +O 4.4.4.4/32 [110/0] is directly connected, lo, weight 1 +O>* 10.0.1.0/24 [110/20] via 10.0.2.2, r4-eth0, label implicit-null, weight 1 +O 10.0.2.0/24 [110/10] is directly connected, r4-eth0, weight 1 +O>* 10.0.3.0/24 [110/20] via 10.0.2.2, r4-eth0, label implicit-null, weight 1 diff --git a/tests/topotests/ldp-topo1/test_ldp_topo1.py b/tests/topotests/ldp-topo1/test_ldp_topo1.py index c0d11fd5e0..cef4d6587e 100755 --- a/tests/topotests/ldp-topo1/test_ldp_topo1.py +++ b/tests/topotests/ldp-topo1/test_ldp_topo1.py @@ -57,7 +57,7 @@ r3-eth1 .3 | | .3 r3-eth0 | .4 r4-eth0 | r3 | | r4 | | 3.3.3.3 | | 4.4.4.4 | +-----------+ +---------+ -""" +""" import os import re @@ -83,6 +83,7 @@ fatal_error = "" ## ##################################################### + class NetworkTopo(Topo): "LDP Test Topology 1" @@ -91,23 +92,65 @@ class NetworkTopo(Topo): # Setup Routers router = {} for i in range(1, 5): - router[i] = topotest.addRouter(self, 'r%s' % i) + router[i] = topotest.addRouter(self, "r%s" % i) # Setup Switches, add Interfaces and Connections switch = {} # First switch - switch[0] = self.addSwitch('sw0', cls=topotest.LegacySwitch) - self.addLink(switch[0], router[1], intfName2='r1-eth0', addr1='80:AA:00:00:00:00', addr2='00:11:00:01:00:00') - self.addLink(switch[0], router[2], intfName2='r2-eth0', addr1='80:AA:00:00:00:01', addr2='00:11:00:02:00:00') + switch[0] = self.addSwitch("sw0", cls=topotest.LegacySwitch) + self.addLink( + switch[0], + router[1], + intfName2="r1-eth0", + addr1="80:AA:00:00:00:00", + addr2="00:11:00:01:00:00", + ) + self.addLink( + switch[0], + router[2], + intfName2="r2-eth0", + addr1="80:AA:00:00:00:01", + addr2="00:11:00:02:00:00", + ) # Second switch - switch[1] = self.addSwitch('sw1', cls=topotest.LegacySwitch) - self.addLink(switch[1], router[2], intfName2='r2-eth1', addr1='80:AA:00:01:00:00', addr2='00:11:00:02:00:01') - self.addLink(switch[1], router[3], intfName2='r3-eth0', addr1='80:AA:00:01:00:01', addr2='00:11:00:03:00:00') - self.addLink(switch[1], router[4], intfName2='r4-eth0', addr1='80:AA:00:01:00:02', addr2='00:11:00:04:00:00') + switch[1] = self.addSwitch("sw1", cls=topotest.LegacySwitch) + self.addLink( + switch[1], + router[2], + intfName2="r2-eth1", + addr1="80:AA:00:01:00:00", + addr2="00:11:00:02:00:01", + ) + self.addLink( + switch[1], + router[3], + intfName2="r3-eth0", + addr1="80:AA:00:01:00:01", + addr2="00:11:00:03:00:00", + ) + self.addLink( + switch[1], + router[4], + intfName2="r4-eth0", + addr1="80:AA:00:01:00:02", + addr2="00:11:00:04:00:00", + ) # Third switch - switch[2] = self.addSwitch('sw2', cls=topotest.LegacySwitch) - self.addLink(switch[2], router[2], intfName2='r2-eth2', addr1='80:AA:00:02:00:00', addr2='00:11:00:02:00:02') - self.addLink(switch[2], router[3], intfName2='r3-eth1', addr1='80:AA:00:02:00:01', addr2='00:11:00:03:00:01') + switch[2] = self.addSwitch("sw2", cls=topotest.LegacySwitch) + self.addLink( + switch[2], + router[2], + intfName2="r2-eth2", + addr1="80:AA:00:02:00:00", + addr2="00:11:00:02:00:02", + ) + self.addLink( + switch[2], + router[3], + intfName2="r3-eth1", + addr1="80:AA:00:02:00:01", + addr2="00:11:00:03:00:01", + ) ##################################################### @@ -116,6 +159,7 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(module): global topo, net global fatal_error @@ -124,7 +168,7 @@ def setup_module(module): print("******************************************\n") print("Cleanup old Mininet runs") - os.system('sudo mn -c > /dev/null 2>&1') + os.system("sudo mn -c > /dev/null 2>&1") thisDir = os.path.dirname(os.path.realpath(__file__)) topo = NetworkTopo() @@ -134,10 +178,10 @@ def setup_module(module): # Starting Routers for i in range(1, 5): - net['r%s' % i].loadConf('zebra', '%s/r%s/zebra.conf' % (thisDir, i)) - net['r%s' % i].loadConf('ospfd', '%s/r%s/ospfd.conf' % (thisDir, i)) - net['r%s' % i].loadConf('ldpd', '%s/r%s/ldpd.conf' % (thisDir, i)) - fatal_error = net['r%s' % i].startRouter() + net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i)) + net["r%s" % i].loadConf("ospfd", "%s/r%s/ospfd.conf" % (thisDir, i)) + net["r%s" % i].loadConf("ldpd", "%s/r%s/ldpd.conf" % (thisDir, i)) + fatal_error = net["r%s" % i].startRouter() if fatal_error != "": break @@ -145,6 +189,7 @@ def setup_module(module): # For debugging after starting FRR/Quagga daemons, uncomment the next line # CLI(net) + def teardown_module(module): global net @@ -160,7 +205,7 @@ def test_router_running(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) print("\n\n** Check if FRR/Quagga is running on each Router node") @@ -169,18 +214,19 @@ def test_router_running(): # Starting Routers for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line # CLI(net) + def test_mpls_interfaces(): global fatal_error global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -190,40 +236,51 @@ def test_mpls_interfaces(): print("******************************************\n") failures = 0 for i in range(1, 5): - refTableFile = '%s/r%s/show_mpls_ldp_interface.ref' + refTableFile = "%s/r%s/show_mpls_ldp_interface.ref" if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp interface" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show mpls ldp interface" 2> /dev/null') + .rstrip() + ) # Mask out Timer in Uptime actual = re.sub(r" [0-9][0-9]:[0-9][0-9]:[0-9][0-9] ", " xx:xx:xx ", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual MPLS LDP interface status", - title2="expected MPLS LDP interface status") + title2="expected MPLS LDP interface status", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed MPLS LDP Interface status Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed MPLS LDP Interface status Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - if failures>0: + if failures > 0: fatal_error = "MPLS LDP Interface status failed" - assert failures == 0, "MPLS LDP Interface status failed for router r%s:\n%s" % (i, diff) + assert ( + failures == 0 + ), "MPLS LDP Interface status failed for router r%s:\n%s" % (i, diff) # Make sure that all daemons are running for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -235,7 +292,7 @@ def test_mpls_ldp_neighbor_establish(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) # Wait for MPLS LDP neighbors to establish. @@ -247,17 +304,23 @@ def test_mpls_ldp_neighbor_establish(): sys.stdout.flush() # Look for any node not yet converged for i in range(1, 5): - established = net['r%s' % i].cmd('vtysh -c "show mpls ldp neighbor" 2> /dev/null').rstrip() + established = ( + net["r%s" % i] + .cmd('vtysh -c "show mpls ldp neighbor" 2> /dev/null') + .rstrip() + ) # On current version, we need to make sure they all turn to OPERATIONAL on all lines # - lines = ('\n'.join(established.splitlines()) + '\n').splitlines(1) + lines = ("\n".join(established.splitlines()) + "\n").splitlines(1) # Check all lines to be either table header (starting with ^AF or show OPERATIONAL) - header = r'^AF.*' - operational = r'^ip.*OPERATIONAL.*' + header = r"^AF.*" + operational = r"^ip.*OPERATIONAL.*" found_operational = 0 for j in range(1, len(lines)): - if (not re.search(header, lines[j])) and (not re.search(operational, lines[j])): + if (not re.search(header, lines[j])) and ( + not re.search(operational, lines[j]) + ): established = "" # Empty string shows NOT established if re.search(operational, lines[j]): found_operational += 1 @@ -265,14 +328,14 @@ def test_mpls_ldp_neighbor_establish(): # Need at least one operational neighbor established = "" # Empty string shows NOT established if not established: - print('Waiting for r%s' %i) + print("Waiting for r%s" % i) sys.stdout.flush() break if not established: sleep(5) timeout -= 5 else: - print('Done') + print("Done") break else: # Bail out with error if a router fails to converge @@ -285,10 +348,10 @@ def test_mpls_ldp_neighbor_establish(): # Only wait if we actually went through a convergence print("\nwaiting 15s for LDP sessions to establish") sleep(15) - + # Make sure that all daemons are running for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error @@ -297,7 +360,7 @@ def test_mpls_ldp_discovery(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -307,39 +370,54 @@ def test_mpls_ldp_discovery(): print("******************************************\n") failures = 0 for i in range(1, 5): - refTableFile = '%s/r%s/show_mpls_ldp_discovery.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_mpls_ldp_discovery.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp discovery" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show mpls ldp discovery" 2> /dev/null') + .rstrip() + ) # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp discovery" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show mpls ldp discovery" 2> /dev/null') + .rstrip() + ) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual MPLS LDP discovery output", - title2="expected MPLS LDP discovery output") + title2="expected MPLS LDP discovery output", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed MPLS LDP discovery output Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed MPLS LDP discovery output Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "MPLS LDP Interface discovery output for router r%s:\n%s" % (i, diff) + assert ( + failures == 0 + ), "MPLS LDP Interface discovery output for router r%s:\n%s" % (i, diff) # Make sure that all daemons are running for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -351,7 +429,7 @@ def test_mpls_ldp_neighbor(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -361,44 +439,59 @@ def test_mpls_ldp_neighbor(): print("******************************************\n") failures = 0 for i in range(1, 5): - refTableFile = '%s/r%s/show_mpls_ldp_neighbor.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_mpls_ldp_neighbor.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp neighbor" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show mpls ldp neighbor" 2> /dev/null') + .rstrip() + ) # Mask out changing parts in output # Mask out Timer in Uptime - actual = re.sub(r"(ipv4 [0-9\.]+ +OPERATIONAL [0-9\.]+ +)[0-9][0-9]:[0-9][0-9]:[0-9][0-9]", r"\1xx:xx:xx", actual) + actual = re.sub( + r"(ipv4 [0-9\.]+ +OPERATIONAL [0-9\.]+ +)[0-9][0-9]:[0-9][0-9]:[0-9][0-9]", + r"\1xx:xx:xx", + actual, + ) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual MPLS LDP neighbor output", - title2="expected MPLS LDP neighbor output") + title2="expected MPLS LDP neighbor output", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed MPLS LDP neighbor output Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed MPLS LDP neighbor output Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "MPLS LDP Interface neighbor output for router r%s:\n%s" % (i, diff) + assert ( + failures == 0 + ), "MPLS LDP Interface neighbor output for router r%s:\n%s" % (i, diff) # Make sure that all daemons are running for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line - #CLI(net) + # CLI(net) def test_mpls_ldp_binding(): @@ -410,7 +503,7 @@ def test_mpls_ldp_binding(): # pytest.skip("Skipping test_mpls_ldp_binding") # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -420,58 +513,77 @@ def test_mpls_ldp_binding(): print("******************************************\n") failures = 0 for i in range(1, 5): - refTableFile = '%s/r%s/show_mpls_ldp_binding.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_mpls_ldp_binding.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp binding" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show mpls ldp binding" 2> /dev/null') + .rstrip() + ) # Mask out changing parts in output # Mask out label - actual = re.sub(r"(ipv4 [0-9\./]+ +[0-9\.]+ +)[0-9][0-9] (.*)", r"\1xxx\2", actual) - actual = re.sub(r"(ipv4 [0-9\./]+ +[0-9\.]+ +[a-z\-]+ +)[0-9][0-9] (.*)", r"\1xxx\2", actual) + actual = re.sub( + r"(ipv4 [0-9\./]+ +[0-9\.]+ +)[0-9][0-9] (.*)", r"\1xxx\2", actual + ) + actual = re.sub( + r"(ipv4 [0-9\./]+ +[0-9\.]+ +[a-z\-]+ +)[0-9][0-9] (.*)", + r"\1xxx\2", + actual, + ) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Sort lines which start with "xx via inet " - pattern = r'^\s+[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+' + pattern = r"^\s+[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+" swapped = True while swapped: swapped = False for j in range(1, len(actual)): - if re.search(pattern, actual[j]) and re.search(pattern, actual[j-1]): - if actual[j-1] > actual[j]: - temp = actual[j-1] - actual[j-1] = actual[j] + if re.search(pattern, actual[j]) and re.search( + pattern, actual[j - 1] + ): + if actual[j - 1] > actual[j]: + temp = actual[j - 1] + actual[j - 1] = actual[j] actual[j] = temp swapped = True # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual MPLS LDP binding output", - title2="expected MPLS LDP binding output") + title2="expected MPLS LDP binding output", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed MPLS LDP binding output Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed MPLS LDP binding output Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "MPLS LDP Interface binding output for router r%s:\n%s" % (i, diff) + assert ( + failures == 0 + ), "MPLS LDP Interface binding output for router r%s:\n%s" % (i, diff) # Make sure that all daemons are running for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line - #CLI(net) + # CLI(net) def test_zebra_ipv4_routingTable(): @@ -479,7 +591,7 @@ def test_zebra_ipv4_routingTable(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -489,13 +601,17 @@ def test_zebra_ipv4_routingTable(): print("******************************************\n") failures = 0 for i in range(1, 5): - refTableFile = '%s/r%s/show_ipv4_route.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_ipv4_route.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ip route" 2> /dev/null | grep "^O"').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show ip route" 2> /dev/null | grep "^O"') + .rstrip() + ) # Drop timers on end of line (older Quagga Versions) actual = re.sub(r", [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", "", actual) @@ -505,32 +621,40 @@ def test_zebra_ipv4_routingTable(): # and translating remaining implicit (single-digit) labels to label implicit-null actual = re.sub(r" label [0-9]+", " label implicit-null", actual) # Check if we have implicit labels - if not, then remove them from reference - if (not re.search(r" label implicit-null", actual)): + if not re.search(r" label implicit-null", actual): expected = re.sub(r", label implicit-null", "", expected) # now fix newlines of expected (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual IPv4 zebra routing table", - title2="expected IPv4 zebra routing table") + title2="expected IPv4 zebra routing table", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed IPv4 Zebra Routing Table Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed IPv4 Zebra Routing Table Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "IPv4 Zebra Routing Table verification failed for router r%s:\n%s" % (i, diff) + assert failures == 0, ( + "IPv4 Zebra Routing Table verification failed for router r%s:\n%s" + % (i, diff) + ) # Make sure that all daemons are running for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -542,7 +666,7 @@ def test_mpls_table(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -553,45 +677,54 @@ def test_mpls_table(): failures = 0 for i in range(1, 5): - refTableFile = '%s/r%s/show_mpls_table.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_mpls_table.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show mpls table" 2> /dev/null') + actual = net["r%s" % i].cmd('vtysh -c "show mpls table" 2> /dev/null') # Fix inconsistent Label numbers at beginning of line actual = re.sub(r"(\s+)[0-9]+(\s+LDP)", r"\1XX\2", actual) # Fix inconsistent Label numbers at end of line - actual = re.sub(r"(\s+[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+)[0-9][0-9]", r"\1XX", actual) + actual = re.sub( + r"(\s+[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+)[0-9][0-9]", r"\1XX", actual + ) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Sort lines which start with " XX LDP" - pattern = r'^\s+[0-9X]+\s+LDP' + pattern = r"^\s+[0-9X]+\s+LDP" swapped = True while swapped: swapped = False for j in range(1, len(actual)): - if re.search(pattern, actual[j]) and re.search(pattern, actual[j-1]): - if actual[j-1] > actual[j]: - temp = actual[j-1] - actual[j-1] = actual[j] + if re.search(pattern, actual[j]) and re.search( + pattern, actual[j - 1] + ): + if actual[j - 1] > actual[j]: + temp = actual[j - 1] + actual[j - 1] = actual[j] actual[j] = temp swapped = True # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual MPLS table output", - title2="expected MPLS table output") + title2="expected MPLS table output", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed MPLS table output Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed MPLS table output Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) @@ -600,7 +733,7 @@ def test_mpls_table(): # Make sure that all daemons are running for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -611,8 +744,8 @@ def test_linux_mpls_routes(): global fatal_error global net - # Skip if previous fatal error condition is raised - if (fatal_error != ""): + # Skip if previous fatal error condition is raised + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -622,15 +755,17 @@ def test_linux_mpls_routes(): print("******************************************\n") failures = 0 for i in range(1, 5): - refTableFile = '%s/r%s/ip_mpls_route.ref' % (thisDir, i) + refTableFile = "%s/r%s/ip_mpls_route.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('ip -o -family mpls route 2> /dev/null').rstrip() + actual = ( + net["r%s" % i].cmd("ip -o -family mpls route 2> /dev/null").rstrip() + ) # Mask out label and protocol actual = re.sub(r"[0-9][0-9] via inet ", "xx via inet ", actual) @@ -641,39 +776,48 @@ def test_linux_mpls_routes(): # Sort nexthops nexthop_sorted = [] for line in actual.splitlines(): - tokens = re.split(r'\\\t', line.strip()) - nexthop_sorted.append('{} {}'.format( - tokens[0].strip(), - ' '.join([ token.strip() for token in sorted(tokens[1:]) ]) - ).strip()) + tokens = re.split(r"\\\t", line.strip()) + nexthop_sorted.append( + "{} {}".format( + tokens[0].strip(), + " ".join([token.strip() for token in sorted(tokens[1:])]), + ).strip() + ) # Sort lines and fixup differences between old and new iproute - actual = '\n'.join(sorted(nexthop_sorted)) + actual = "\n".join(sorted(nexthop_sorted)) actual = re.sub(r"nexthop via", "nexthopvia", actual) actual = re.sub(r" nexthop as to xx via inet ", " nexthopvia inet ", actual) actual = re.sub(r" weight 1", "", actual) actual = re.sub(r" [ ]+", " ", actual) # put \n back at line ends - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual Linux Kernel MPLS route", - title2="expected Linux Kernel MPLS route") + title2="expected Linux Kernel MPLS route", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed Linux Kernel MPLS route output Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed Linux Kernel MPLS route output Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "Linux Kernel MPLS route output for router r%s:\n%s" % (i, diff) + assert ( + failures == 0 + ), "Linux Kernel MPLS route output for router r%s:\n%s" % (i, diff) # Make sure that all daemons are running for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -685,12 +829,14 @@ def test_shutdown_check_stderr(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - print("SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n") - pytest.skip('Skipping test for Stderr output') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + print( + "SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n" + ) + pytest.skip("Skipping test for Stderr output") thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -698,14 +844,14 @@ def test_shutdown_check_stderr(): print("******************************************\n") for i in range(1, 5): - net['r%s' % i].stopRouter() - log = net['r%s' % i].getStdErr('ldpd') + net["r%s" % i].stopRouter() + log = net["r%s" % i].getStdErr("ldpd") if log: print("\nRouter r%s LDPd StdErr Log:\n%s" % (i, log)) - log = net['r%s' % i].getStdErr('ospfd') + log = net["r%s" % i].getStdErr("ospfd") if log: print("\nRouter r%s OSPFd StdErr Log:\n%s" % (i, log)) - log = net['r%s' % i].getStdErr('zebra') + log = net["r%s" % i].getStdErr("zebra") if log: print("\nRouter r%s Zebra StdErr Log:\n%s" % (i, log)) @@ -715,23 +861,27 @@ def test_shutdown_check_memleak(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) - if os.environ.get('TOPOTESTS_CHECK_MEMLEAK') is None: - print("SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n") - pytest.skip('Skipping test for memory leaks') - + if os.environ.get("TOPOTESTS_CHECK_MEMLEAK") is None: + print( + "SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n" + ) + pytest.skip("Skipping test for memory leaks") + thisDir = os.path.dirname(os.path.realpath(__file__)) for i in range(1, 5): - net['r%s' % i].stopRouter() - net['r%s' % i].report_memory_leaks(os.environ.get('TOPOTESTS_CHECK_MEMLEAK'), os.path.basename(__file__)) + net["r%s" % i].stopRouter() + net["r%s" % i].report_memory_leaks( + os.environ.get("TOPOTESTS_CHECK_MEMLEAK"), os.path.basename(__file__) + ) -if __name__ == '__main__': +if __name__ == "__main__": - setLogLevel('info') + setLogLevel("info") # To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli # retval = pytest.main(["-s", "--tb=no"]) retval = pytest.main(["-s"]) diff --git a/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py b/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py index 0fae64402a..600d640a70 100755 --- a/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py +++ b/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py @@ -69,7 +69,7 @@ from functools import partial # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -80,8 +80,10 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class TemplateTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -89,35 +91,36 @@ class TemplateTopo(Topo): # # Define FRR Routers # - for router in ['ce1', 'ce2', 'ce3', 'r1', 'r2', 'r3']: + for router in ["ce1", "ce2", "ce3", "r1", "r2", "r3"]: tgen.add_router(router) # # Define connections # - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['ce1']) - switch.add_link(tgen.gears['r1']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["ce1"]) + switch.add_link(tgen.gears["r1"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["ce2"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['ce2']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["ce3"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['ce3']) - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s4') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s5') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s6") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s6') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) def setup_module(mod): "Sets up the pytest environment" @@ -129,22 +132,20 @@ def setup_module(mod): # For all registered routers, load the zebra configuration file for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) # Don't start ospfd and ldpd in the CE nodes - if router.name[0] == 'r': + if router.name[0] == "r": router.load_config( - TopoRouter.RD_OSPF, - os.path.join(CWD, '{}/ospfd.conf'.format(rname)) + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_LDP, - os.path.join(CWD, '{}/ldpd.conf'.format(rname)) + TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): "Teardown the pytest environment" tgen = get_topogen() @@ -159,16 +160,16 @@ def router_compare_json_output(rname, command, reference): logger.info('Comparing router "%s" "%s" output', rname, command) tgen = get_topogen() - filename = '{}/{}/{}'.format(CWD, rname, reference) + filename = "{}/{}/{}".format(CWD, rname, reference) expected = json.loads(open(filename).read()) # Run test function until we get an result. Wait at most 80 seconds. - test_func = partial(topotest.router_json_cmp, - tgen.gears[rname], command, expected) + test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected) _, diff = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) assert diff is None, assertmsg + def test_ospf_convergence(): logger.info("Test: check OSPF adjacencies") tgen = get_topogen() @@ -177,8 +178,11 @@ def test_ospf_convergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3']: - router_compare_json_output(rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json") + for rname in ["r1", "r2", "r3"]: + router_compare_json_output( + rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json" + ) + def test_rib(): logger.info("Test: verify RIB") @@ -188,9 +192,10 @@ def test_rib(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3']: + for rname in ["r1", "r2", "r3"]: router_compare_json_output(rname, "show ip route json", "show_ip_route.ref") + def test_ldp_adjacencies(): logger.info("Test: verify LDP adjacencies") tgen = get_topogen() @@ -199,8 +204,11 @@ def test_ldp_adjacencies(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3']: - router_compare_json_output(rname, "show mpls ldp discovery json", "show_ldp_discovery.ref") + for rname in ["r1", "r2", "r3"]: + router_compare_json_output( + rname, "show mpls ldp discovery json", "show_ldp_discovery.ref" + ) + def test_ldp_neighbors(): logger.info("Test: verify LDP neighbors") @@ -210,8 +218,11 @@ def test_ldp_neighbors(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3']: - router_compare_json_output(rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref") + for rname in ["r1", "r2", "r3"]: + router_compare_json_output( + rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref" + ) + def test_ldp_bindings(): logger.info("Test: verify LDP bindings") @@ -221,8 +232,11 @@ def test_ldp_bindings(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3']: - router_compare_json_output(rname, "show mpls ldp binding json", "show_ldp_binding.ref") + for rname in ["r1", "r2", "r3"]: + router_compare_json_output( + rname, "show mpls ldp binding json", "show_ldp_binding.ref" + ) + def test_ldp_pwid_bindings(): logger.info("Test: verify LDP PW-ID bindings") @@ -232,8 +246,11 @@ def test_ldp_pwid_bindings(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3']: - router_compare_json_output(rname, "show l2vpn atom binding json", "show_l2vpn_binding.ref") + for rname in ["r1", "r2", "r3"]: + router_compare_json_output( + rname, "show l2vpn atom binding json", "show_l2vpn_binding.ref" + ) + def test_ldp_pseudowires(): logger.info("Test: verify LDP pseudowires") @@ -243,8 +260,11 @@ def test_ldp_pseudowires(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3']: - router_compare_json_output(rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref") + for rname in ["r1", "r2", "r3"]: + router_compare_json_output( + rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref" + ) + def test_ldp_pseudowires_after_link_down(): logger.info("Test: verify LDP pseudowires after r1-r2 link goes down") @@ -256,22 +276,26 @@ def test_ldp_pseudowires_after_link_down(): # Shut down r1-r2 link */ tgen = get_topogen() - tgen.gears['r1'].peer_link_enable('r1-eth1', False) + tgen.gears["r1"].peer_link_enable("r1-eth1", False) topotest.sleep(5, "Waiting for the network to reconverge") # check if the pseudowire is still up (using an alternate path for nexthop resolution) - for rname in ['r1', 'r2', 'r3']: - router_compare_json_output(rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref") + for rname in ["r1", "r2", "r3"]: + router_compare_json_output( + rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref" + ) + # Memory leak test template def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py index 997b72d691..cafba60abf 100644 --- a/tests/topotests/lib/bgp.py +++ b/tests/topotests/lib/bgp.py @@ -27,13 +27,17 @@ from lib import topotest from lib.topolog import logger # Import common_config to use commomnly used APIs -from lib.common_config import (create_common_configuration, - InvalidCLIError, - load_config_to_router, - check_address_types, - generate_ips, - find_interface_with_greater_ip, - run_frr_cmd, retry) +from lib.common_config import ( + create_common_configuration, + InvalidCLIError, + load_config_to_router, + check_address_types, + generate_ips, + validate_ip_address, + find_interface_with_greater_ip, + run_frr_cmd, + retry, +) BGP_CONVERGENCE_TIMEOUT = 10 @@ -79,6 +83,9 @@ def create_router_bgp(tgen, topo, input_dict=None, build=False): "holddowntimer": 180, "dest_link": { "r4": { + "allowas-in": { + "number_occurences":2 + }, "prefix_lists": [ { "name": "pf_list_1", @@ -126,24 +133,31 @@ def create_router_bgp(tgen, topo, input_dict=None, build=False): bgp_addr_data = bgp_data.setdefault("address_family", {}) if not bgp_addr_data: - logger.debug("Router %s: 'address_family' not present in " - "input_dict for BGP", router) + logger.debug( + "Router %s: 'address_family' not present in " "input_dict for BGP", + router, + ) else: ipv4_data = bgp_addr_data.setdefault("ipv4", {}) ipv6_data = bgp_addr_data.setdefault("ipv6", {}) - neigh_unicast = True if ipv4_data.setdefault("unicast", {}) \ - or ipv6_data.setdefault("unicast", {}) else False + neigh_unicast = ( + True + if ipv4_data.setdefault("unicast", {}) + or ipv6_data.setdefault("unicast", {}) + else False + ) if neigh_unicast: data_all_bgp = __create_bgp_unicast_neighbor( - tgen, topo, input_dict, router, - config_data=data_all_bgp) + tgen, topo, input_dict, router, config_data=data_all_bgp + ) try: - result = create_common_configuration(tgen, router, data_all_bgp, - "bgp", build) + result = create_common_configuration( + tgen, router, data_all_bgp, "bgp", build + ) except InvalidCLIError: # Traceback errormsg = traceback.format_exc() @@ -182,8 +196,9 @@ def __create_bgp_global(tgen, input_dict, router, build=False): config_data = [] if "local_as" not in bgp_data and build: - logger.error("Router %s: 'local_as' not present in input_dict" - "for BGP", router) + logger.error( + "Router %s: 'local_as' not present in input_dict" "for BGP", router + ) return False local_as = bgp_data.setdefault("local_as", "") @@ -194,19 +209,20 @@ def __create_bgp_global(tgen, input_dict, router, build=False): config_data.append(cmd) + # Skip RFC8212 in topotests + config_data.append("no bgp ebgp-requires-policy") + router_id = bgp_data.setdefault("router_id", None) del_router_id = bgp_data.setdefault("del_router_id", False) if del_router_id: config_data.append("no bgp router-id") if router_id: - config_data.append("bgp router-id {}".format( - router_id)) + config_data.append("bgp router-id {}".format(router_id)) return config_data -def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, - config_data=None): +def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, config_data=None): """ Helper API to create configuration for address-family unicast @@ -235,11 +251,8 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, addr_data = addr_dict["unicast"] if addr_data: - config_data.append("address-family {} unicast".format( - addr_type - )) - advertise_network = addr_data.setdefault("advertise_networks", - []) + config_data.append("address-family {} unicast".format(addr_type)) + advertise_network = addr_data.setdefault("advertise_networks", []) for advertise_network_dict in advertise_network: network = advertise_network_dict["network"] if type(network) is not list: @@ -250,12 +263,10 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, else: no_of_network = 1 - del_action = advertise_network_dict.setdefault("delete", - False) + del_action = advertise_network_dict.setdefault("delete", False) # Generating IPs for verification - prefix = str( - ipaddr.IPNetwork(unicode(network[0])).prefixlen) + prefix = str(ipaddr.IPNetwork(unicode(network[0])).prefixlen) network_list = generate_ips(network, no_of_network) for ip in network_list: ip = str(ipaddr.IPNetwork(unicode(ip)).network) @@ -271,20 +282,17 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, ibgp = max_paths.setdefault("ibgp", None) ebgp = max_paths.setdefault("ebgp", None) if ibgp: - config_data.append("maximum-paths ibgp {}".format( - ibgp - )) + config_data.append("maximum-paths ibgp {}".format(ibgp)) if ebgp: - config_data.append("maximum-paths {}".format( - ebgp - )) + config_data.append("maximum-paths {}".format(ebgp)) aggregate_addresses = addr_data.setdefault("aggregate_address", []) for aggregate_address in aggregate_addresses: network = aggregate_address.setdefault("network", None) if not network: - logger.debug("Router %s: 'network' not present in " - "input_dict for BGP", router) + logger.debug( + "Router %s: 'network' not present in " "input_dict for BGP", router + ) else: cmd = "aggregate-address {}".format(network) @@ -305,13 +313,12 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, if redistribute_data: for redistribute in redistribute_data: if "redist_type" not in redistribute: - logger.error("Router %s: 'redist_type' not present in " - "input_dict", router) + logger.error( + "Router %s: 'redist_type' not present in " "input_dict", router + ) else: - cmd = "redistribute {}".format( - redistribute["redist_type"]) - redist_attr = redistribute.setdefault("attribute", - None) + cmd = "redistribute {}".format(redistribute["redist_type"]) + redist_attr = redistribute.setdefault("attribute", None) if redist_attr: cmd = "{} {}".format(cmd, redist_attr) del_action = redistribute.setdefault("delete", False) @@ -320,8 +327,9 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, config_data.append(cmd) if "neighbor" in addr_data: - neigh_data = __create_bgp_neighbor(topo, input_dict, - router, addr_type, add_neigh) + neigh_data = __create_bgp_neighbor( + topo, input_dict, router, addr_type, add_neigh + ) config_data.extend(neigh_data) for addr_type, addr_dict in bgp_data.iteritems(): @@ -331,11 +339,11 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, addr_data = addr_dict["unicast"] if "neighbor" in addr_data: neigh_addr_data = __create_bgp_unicast_address_family( - topo, input_dict, router, addr_type, add_neigh) + topo, input_dict, router, addr_type, add_neigh + ) config_data.extend(neigh_addr_data) - logger.debug("Exiting lib API: __create_bgp_unicast_neighbor()") return config_data @@ -365,12 +373,10 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True): update_source = None if dest_link in nh_details["links"].keys(): - ip_addr = \ - nh_details["links"][dest_link][addr_type].split("/")[0] + ip_addr = nh_details["links"][dest_link][addr_type].split("/")[0] # Loopback interface if "source_link" in peer and peer["source_link"] == "lo": - update_source = topo[router]["links"]["lo"][ - addr_type].split("/")[0] + update_source = topo[router]["links"]["lo"][addr_type].split("/")[0] neigh_cxt = "neighbor {}".format(ip_addr) @@ -380,41 +386,44 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True): config_data.append("address-family ipv6 unicast") config_data.append("{} activate".format(neigh_cxt)) - disable_connected = peer.setdefault("disable_connected_check", - False) + disable_connected = peer.setdefault("disable_connected_check", False) keep_alive = peer.setdefault("keepalivetimer", 60) hold_down = peer.setdefault("holddowntimer", 180) password = peer.setdefault("password", None) max_hop_limit = peer.setdefault("ebgp_multihop", 1) if update_source: - config_data.append("{} update-source {}".format( - neigh_cxt, update_source)) + config_data.append( + "{} update-source {}".format(neigh_cxt, update_source) + ) if disable_connected: - config_data.append("{} disable-connected-check".format( - disable_connected)) + config_data.append( + "{} disable-connected-check".format(disable_connected) + ) if update_source: - config_data.append("{} update-source {}".format(neigh_cxt, - update_source)) + config_data.append( + "{} update-source {}".format(neigh_cxt, update_source) + ) if int(keep_alive) != 60 and int(hold_down) != 180: config_data.append( - "{} timers {} {}".format(neigh_cxt, keep_alive, - hold_down)) + "{} timers {} {}".format(neigh_cxt, keep_alive, hold_down) + ) if password: - config_data.append( - "{} password {}".format(neigh_cxt, password)) + config_data.append("{} password {}".format(neigh_cxt, password)) if max_hop_limit > 1: - config_data.append("{} ebgp-multihop {}".format(neigh_cxt, - max_hop_limit)) + config_data.append( + "{} ebgp-multihop {}".format(neigh_cxt, max_hop_limit) + ) config_data.append("{} enforce-multihop".format(neigh_cxt)) logger.debug("Exiting lib API: __create_bgp_unicast_neighbor()") return config_data -def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type, - add_neigh=True): +def __create_bgp_unicast_address_family( + topo, input_dict, router, addr_type, add_neigh=True +): """ API prints bgp global config to bgp_json file. @@ -440,37 +449,34 @@ def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type, nh_details = topo[peer_name] # Loopback interface if "source_link" in peer and peer["source_link"] == "lo": - for destRouterLink, data in sorted(nh_details["links"]. - iteritems()): + for destRouterLink, data in sorted(nh_details["links"].iteritems()): if "type" in data and data["type"] == "loopback": if dest_link == destRouterLink: - ip_addr = \ - nh_details["links"][destRouterLink][ - addr_type].split("/")[0] + ip_addr = nh_details["links"][destRouterLink][ + addr_type + ].split("/")[0] # Physical interface else: if dest_link in nh_details["links"].keys(): - ip_addr = nh_details["links"][dest_link][ - addr_type].split("/")[0] + ip_addr = nh_details["links"][dest_link][addr_type].split("/")[0] if addr_type == "ipv4" and bgp_data["ipv6"]: - deactivate = nh_details["links"][ - dest_link]["ipv6"].split("/")[0] + deactivate = nh_details["links"][dest_link]["ipv6"].split("/")[ + 0 + ] neigh_cxt = "neighbor {}".format(ip_addr) - config_data.append("address-family {} unicast".format( - addr_type - )) + config_data.append("address-family {} unicast".format(addr_type)) if deactivate: - config_data.append( - "no neighbor {} activate".format(deactivate)) + config_data.append("no neighbor {} activate".format(deactivate)) next_hop_self = peer.setdefault("next_hop_self", None) send_community = peer.setdefault("send_community", None) prefix_lists = peer.setdefault("prefix_lists", {}) route_maps = peer.setdefault("route_maps", {}) no_send_community = peer.setdefault("no_send_community", None) + allowas_in = peer.setdefault("allowas-in", None) # next-hop-self if next_hop_self: @@ -481,21 +487,30 @@ def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type, # no_send_community if no_send_community: - config_data.append("no {} send-community {}".format( - neigh_cxt, no_send_community)) + config_data.append( + "no {} send-community {}".format(neigh_cxt, no_send_community) + ) + + if "allowas_in" in peer: + allow_as_in = peer["allowas_in"] + config_data.append("{} allowas-in {}".format(neigh_cxt, allow_as_in)) + if "no_allowas_in" in peer: + allow_as_in = peer["no_allowas_in"] + config_data.append("no {} allowas-in {}".format(neigh_cxt, allow_as_in)) if prefix_lists: for prefix_list in prefix_lists: name = prefix_list.setdefault("name", {}) direction = prefix_list.setdefault("direction", "in") del_action = prefix_list.setdefault("delete", False) if not name: - logger.info("Router %s: 'name' not present in " - "input_dict for BGP neighbor prefix lists", - router) + logger.info( + "Router %s: 'name' not present in " + "input_dict for BGP neighbor prefix lists", + router, + ) else: - cmd = "{} prefix-list {} {}".format(neigh_cxt, name, - direction) + cmd = "{} prefix-list {} {}".format(neigh_cxt, name, direction) if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) @@ -506,16 +521,28 @@ def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type, direction = route_map.setdefault("direction", "in") del_action = route_map.setdefault("delete", False) if not name: - logger.info("Router %s: 'name' not present in " - "input_dict for BGP neighbor route name", - router) + logger.info( + "Router %s: 'name' not present in " + "input_dict for BGP neighbor route name", + router, + ) else: - cmd = "{} route-map {} {}".format(neigh_cxt, name, - direction) + cmd = "{} route-map {} {}".format(neigh_cxt, name, direction) if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) + if allowas_in: + number_occurences = allowas_in.setdefault("number_occurences", {}) + del_action = allowas_in.setdefault("delete", False) + + cmd = "{} allowas-in {}".format(neigh_cxt, number_occurences) + + if del_action: + cmd = "no {}".format(cmd) + + config_data.append(cmd) + return config_data @@ -564,12 +591,10 @@ def verify_router_id(tgen, topo, input_dict): rnode = tgen.routers()[router] - del_router_id = input_dict[router]["bgp"].setdefault( - "del_router_id", False) + del_router_id = input_dict[router]["bgp"].setdefault("del_router_id", False) logger.info("Checking router %s router-id", router) - show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", - isjson=True) + show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", isjson=True) router_id_out = show_bgp_json["ipv4Unicast"]["routerId"] router_id_out = ipaddr.IPv4Address(unicode(router_id_out)) @@ -582,12 +607,12 @@ def verify_router_id(tgen, topo, input_dict): router_id = ipaddr.IPv4Address(unicode(router_id)) if router_id == router_id_out: - logger.info("Found expected router-id %s for router %s", - router_id, router) + logger.info("Found expected router-id %s for router %s", router_id, router) else: - errormsg = "Router-id for router:{} mismatch, expected:" \ - " {} but found:{}".format(router, router_id, - router_id_out) + errormsg = ( + "Router-id for router:{} mismatch, expected:" + " {} but found:{}".format(router, router_id, router_id_out) + ) return errormsg logger.debug("Exiting lib API: verify_router_id()") @@ -617,9 +642,11 @@ def verify_bgp_convergence(tgen, topo): logger.debug("Entering lib API: verify_bgp_convergence()") for router, rnode in tgen.routers().iteritems(): + if "bgp" not in topo["routers"][router]: + continue + logger.info("Verifying BGP Convergence on router %s", router) - show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", - isjson=True) + show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", isjson=True) # Verifying output dictionary show_bgp_json is empty or not if not bool(show_bgp_json): errormsg = "BGP is not running" @@ -647,15 +674,12 @@ def verify_bgp_convergence(tgen, topo): for dest_link in peer_data["dest_link"].keys(): data = topo["routers"][bgp_neighbor]["links"] if dest_link in data: - neighbor_ip = \ - data[dest_link][addr_type].split("/")[0] + neighbor_ip = data[dest_link][addr_type].split("/")[0] if addr_type == "ipv4": - ipv4_data = show_bgp_json["ipv4Unicast"][ - "peers"] + ipv4_data = show_bgp_json["ipv4Unicast"]["peers"] nh_state = ipv4_data[neighbor_ip]["state"] else: - ipv6_data = show_bgp_json["ipv6Unicast"][ - "peers"] + ipv6_data = show_bgp_json["ipv6Unicast"]["peers"] nh_state = ipv6_data[neighbor_ip]["state"] if nh_state == "Established": @@ -663,8 +687,7 @@ def verify_bgp_convergence(tgen, topo): if no_of_peer == total_peer: logger.info("BGP is Converged for router %s", router) else: - errormsg = "BGP is not converged for router {}".format( - router) + errormsg = "BGP is not converged for router {}".format(router) return errormsg logger.debug("Exiting API: verify_bgp_convergence()") @@ -707,16 +730,9 @@ def modify_as_number(tgen, topo, input_dict): for router in input_dict.keys(): # Remove bgp configuration - router_dict.update({ - router: { - "bgp": { - "delete": True - } - } - }) + router_dict.update({router: {"bgp": {"delete": True}}}) - new_topo[router]["bgp"]["local_as"] = \ - input_dict[router]["bgp"]["local_as"] + new_topo[router]["bgp"]["local_as"] = input_dict[router]["bgp"]["local_as"] logger.info("Removing bgp configuration") create_router_bgp(tgen, topo, router_dict) @@ -777,8 +793,9 @@ def verify_as_numbers(tgen, topo, input_dict): logger.info("Verifying AS numbers for dut %s:", router) - show_ip_bgp_neighbor_json = run_frr_cmd(rnode, - "show ip bgp neighbor json", isjson=True) + show_ip_bgp_neighbor_json = run_frr_cmd( + rnode, "show ip bgp neighbor json", isjson=True + ) local_as = input_dict[router]["bgp"]["local_as"] bgp_addr_type = topo["routers"][router]["bgp"]["address_family"] @@ -786,8 +803,7 @@ def verify_as_numbers(tgen, topo, input_dict): if not check_address_types(addr_type): continue - bgp_neighbors = bgp_addr_type[addr_type]["unicast"][ - "neighbor"] + bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"] for bgp_neighbor, peer_data in bgp_neighbors.iteritems(): remote_as = input_dict[bgp_neighbor]["bgp"]["local_as"] @@ -796,32 +812,42 @@ def verify_as_numbers(tgen, topo, input_dict): data = topo["routers"][bgp_neighbor]["links"] if dest_link in data: - neighbor_ip = data[dest_link][addr_type]. \ - split("/")[0] + neighbor_ip = data[dest_link][addr_type].split("/")[0] neigh_data = show_ip_bgp_neighbor_json[neighbor_ip] # Verify Local AS for router if neigh_data["localAs"] != local_as: - errormsg = "Failed: Verify local_as for dut {}," \ - " found: {} but expected: {}".format( - router, neigh_data["localAs"], - local_as) + errormsg = ( + "Failed: Verify local_as for dut {}," + " found: {} but expected: {}".format( + router, neigh_data["localAs"], local_as + ) + ) return errormsg else: - logger.info("Verified local_as for dut %s, found" - " expected: %s", router, local_as) + logger.info( + "Verified local_as for dut %s, found" " expected: %s", + router, + local_as, + ) # Verify Remote AS for neighbor if neigh_data["remoteAs"] != remote_as: - errormsg = "Failed: Verify remote_as for dut " \ - "{}'s neighbor {}, found: {} but " \ - "expected: {}".format( - router, bgp_neighbor, - neigh_data["remoteAs"], remote_as) + errormsg = ( + "Failed: Verify remote_as for dut " + "{}'s neighbor {}, found: {} but " + "expected: {}".format( + router, bgp_neighbor, neigh_data["remoteAs"], remote_as + ) + ) return errormsg else: - logger.info("Verified remote_as for dut %s's " - "neighbor %s, found expected: %s", - router, bgp_neighbor, remote_as) + logger.info( + "Verified remote_as for dut %s's " + "neighbor %s, found expected: %s", + router, + bgp_neighbor, + remote_as, + ) logger.debug("Exiting lib API: verify_AS_numbers()") return True @@ -862,12 +888,14 @@ def clear_bgp_and_verify(tgen, topo, router): for retry in range(31): sleeptime = 3 # Waiting for BGP to converge - logger.info("Waiting for %s sec for BGP to converge on router" - " %s...", sleeptime, router) + logger.info( + "Waiting for %s sec for BGP to converge on router" " %s...", + sleeptime, + router, + ) sleep(sleeptime) - show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", - isjson=True) + show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", isjson=True) # Verifying output dictionary show_bgp_json is empty or not if not bool(show_bgp_json): errormsg = "BGP is not running" @@ -897,38 +925,39 @@ def clear_bgp_and_verify(tgen, topo, router): if dest_link in data: neighbor_ip = data[dest_link][addr_type].split("/")[0] if addr_type == "ipv4": - ipv4_data = show_bgp_json["ipv4Unicast"][ - "peers"] + ipv4_data = show_bgp_json["ipv4Unicast"]["peers"] nh_state = ipv4_data[neighbor_ip]["state"] # Peer up time dictionary - peer_uptime_before_clear_bgp[bgp_neighbor] = \ - ipv4_data[neighbor_ip]["peerUptimeEstablishedEpoch"] + peer_uptime_before_clear_bgp[bgp_neighbor] = ipv4_data[ + neighbor_ip + ]["peerUptimeEstablishedEpoch"] else: - ipv6_data = show_bgp_json["ipv6Unicast"][ - "peers"] + ipv6_data = show_bgp_json["ipv6Unicast"]["peers"] nh_state = ipv6_data[neighbor_ip]["state"] # Peer up time dictionary - peer_uptime_before_clear_bgp[bgp_neighbor] = \ - ipv6_data[neighbor_ip]["peerUptimeEstablishedEpoch"] + peer_uptime_before_clear_bgp[bgp_neighbor] = ipv6_data[ + neighbor_ip + ]["peerUptimeEstablishedEpoch"] if nh_state == "Established": no_of_peer += 1 if no_of_peer == total_peer: - logger.info("BGP is Converged for router %s before bgp" - " clear", router) + logger.info("BGP is Converged for router %s before bgp" " clear", router) break else: - logger.info("BGP is not yet Converged for router %s " - "before bgp clear", router) + logger.info( + "BGP is not yet Converged for router %s " "before bgp clear", router + ) else: - errormsg = "TIMEOUT!! BGP is not converged in 30 seconds for" \ - " router {}".format(router) + errormsg = ( + "TIMEOUT!! BGP is not converged in 30 seconds for" + " router {}".format(router) + ) return errormsg - logger.info(peer_uptime_before_clear_bgp) # Clearing BGP logger.info("Clearing BGP neighborship for router %s..", router) for addr_type in bgp_addr_type.keys(): @@ -942,13 +971,14 @@ def clear_bgp_and_verify(tgen, topo, router): for retry in range(31): sleeptime = 3 # Waiting for BGP to converge - logger.info("Waiting for %s sec for BGP to converge on router" - " %s...", sleeptime, router) + logger.info( + "Waiting for %s sec for BGP to converge on router" " %s...", + sleeptime, + router, + ) sleep(sleeptime) - - show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", - isjson=True) + show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", isjson=True) # Verifying output dictionary show_bgp_json is empty or not if not bool(show_bgp_json): errormsg = "BGP is not running" @@ -975,44 +1005,46 @@ def clear_bgp_and_verify(tgen, topo, router): data = topo["routers"][bgp_neighbor]["links"] if dest_link in data: - neighbor_ip = data[dest_link][addr_type].\ - split("/")[0] + neighbor_ip = data[dest_link][addr_type].split("/")[0] if addr_type == "ipv4": - ipv4_data = show_bgp_json["ipv4Unicast"][ - "peers"] + ipv4_data = show_bgp_json["ipv4Unicast"]["peers"] nh_state = ipv4_data[neighbor_ip]["state"] - peer_uptime_after_clear_bgp[bgp_neighbor] = \ - ipv4_data[neighbor_ip]["peerUptimeEstablishedEpoch"] + peer_uptime_after_clear_bgp[bgp_neighbor] = ipv4_data[ + neighbor_ip + ]["peerUptimeEstablishedEpoch"] else: - ipv6_data = show_bgp_json["ipv6Unicast"][ - "peers"] + ipv6_data = show_bgp_json["ipv6Unicast"]["peers"] nh_state = ipv6_data[neighbor_ip]["state"] # Peer up time dictionary - peer_uptime_after_clear_bgp[bgp_neighbor] = \ - ipv6_data[neighbor_ip]["peerUptimeEstablishedEpoch"] + peer_uptime_after_clear_bgp[bgp_neighbor] = ipv6_data[ + neighbor_ip + ]["peerUptimeEstablishedEpoch"] if nh_state == "Established": no_of_peer += 1 if no_of_peer == total_peer: - logger.info("BGP is Converged for router %s after bgp clear", - router) + logger.info("BGP is Converged for router %s after bgp clear", router) break else: - logger.info("BGP is not yet Converged for router %s after" - " bgp clear", router) + logger.info( + "BGP is not yet Converged for router %s after" " bgp clear", router + ) else: - errormsg = "TIMEOUT!! BGP is not converged in 30 seconds for" \ - " router {}".format(router) + errormsg = ( + "TIMEOUT!! BGP is not converged in 30 seconds for" + " router {}".format(router) + ) return errormsg - logger.info(peer_uptime_after_clear_bgp) + # Comparing peerUptimeEstablishedEpoch dictionaries if peer_uptime_before_clear_bgp != peer_uptime_after_clear_bgp: - logger.info("BGP neighborship is reset after clear BGP on router %s", - router) + logger.info("BGP neighborship is reset after clear BGP on router %s", router) else: - errormsg = "BGP neighborship is not reset after clear bgp on router" \ - " {}".format(router) + errormsg = ( + "BGP neighborship is not reset after clear bgp on router" + " {}".format(router) + ) return errormsg logger.debug("Exiting lib API: clear_bgp_and_verify()") @@ -1060,11 +1092,11 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict): rnode = router_list[router] - logger.info("Verifying bgp timers functionality, DUT is %s:", - router) + logger.info("Verifying bgp timers functionality, DUT is %s:", router) - show_ip_bgp_neighbor_json = \ - run_frr_cmd(rnode, "show ip bgp neighbor json", isjson=True) + show_ip_bgp_neighbor_json = run_frr_cmd( + rnode, "show ip bgp neighbor json", isjson=True + ) bgp_addr_type = input_dict[router]["bgp"]["address_family"] @@ -1072,8 +1104,7 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict): if not check_address_types(addr_type): continue - bgp_neighbors = bgp_addr_type[addr_type]["unicast"][ - "neighbor"] + bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"] for bgp_neighbor, peer_data in bgp_neighbors.iteritems(): for dest_link, peer_dict in peer_data["dest_link"].iteritems(): data = topo["routers"][bgp_neighbor]["links"] @@ -1082,32 +1113,41 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict): holddowntimer = peer_dict["holddowntimer"] if dest_link in data: - neighbor_ip = data[dest_link][addr_type]. \ - split("/")[0] + neighbor_ip = data[dest_link][addr_type].split("/")[0] neighbor_intf = data[dest_link]["interface"] # Verify HoldDownTimer for neighbor - bgpHoldTimeMsecs = show_ip_bgp_neighbor_json[ - neighbor_ip]["bgpTimerHoldTimeMsecs"] + bgpHoldTimeMsecs = show_ip_bgp_neighbor_json[neighbor_ip][ + "bgpTimerHoldTimeMsecs" + ] if bgpHoldTimeMsecs != holddowntimer * 1000: - errormsg = "Verifying holddowntimer for bgp " \ - "neighbor {} under dut {}, found: {} " \ - "but expected: {}".format( - neighbor_ip, router, - bgpHoldTimeMsecs, - holddowntimer * 1000) + errormsg = ( + "Verifying holddowntimer for bgp " + "neighbor {} under dut {}, found: {} " + "but expected: {}".format( + neighbor_ip, + router, + bgpHoldTimeMsecs, + holddowntimer * 1000, + ) + ) return errormsg # Verify KeepAliveTimer for neighbor - bgpKeepAliveTimeMsecs = show_ip_bgp_neighbor_json[ - neighbor_ip]["bgpTimerKeepAliveIntervalMsecs"] + bgpKeepAliveTimeMsecs = show_ip_bgp_neighbor_json[neighbor_ip][ + "bgpTimerKeepAliveIntervalMsecs" + ] if bgpKeepAliveTimeMsecs != keepalivetimer * 1000: - errormsg = "Verifying keepalivetimer for bgp " \ - "neighbor {} under dut {}, found: {} " \ - "but expected: {}".format( - neighbor_ip, router, - bgpKeepAliveTimeMsecs, - keepalivetimer * 1000) + errormsg = ( + "Verifying keepalivetimer for bgp " + "neighbor {} under dut {}, found: {} " + "but expected: {}".format( + neighbor_ip, + router, + bgpKeepAliveTimeMsecs, + keepalivetimer * 1000, + ) + ) return errormsg #################### @@ -1120,40 +1160,50 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict): # Wait till keep alive time logger.info("=" * 20) logger.info("Scenario 1:") - logger.info("Shutdown and bring up peer interface: %s " - "in keep alive time : %s sec and verify " - " BGP neighborship is intact in %s sec ", - neighbor_intf, keepalivetimer, - (holddowntimer - keepalivetimer)) + logger.info( + "Shutdown and bring up peer interface: %s " + "in keep alive time : %s sec and verify " + " BGP neighborship is intact in %s sec ", + neighbor_intf, + keepalivetimer, + (holddowntimer - keepalivetimer), + ) logger.info("=" * 20) logger.info("Waiting for %s sec..", keepalivetimer) sleep(keepalivetimer) # Shutting down peer ineterface - logger.info("Shutting down interface %s on router %s", - neighbor_intf, bgp_neighbor) + logger.info( + "Shutting down interface %s on router %s", + neighbor_intf, + bgp_neighbor, + ) topotest.interface_set_status( - router_list[bgp_neighbor], neighbor_intf, - ifaceaction=False) + router_list[bgp_neighbor], neighbor_intf, ifaceaction=False + ) # Bringing up peer interface sleep(5) - logger.info("Bringing up interface %s on router %s..", - neighbor_intf, bgp_neighbor) + logger.info( + "Bringing up interface %s on router %s..", + neighbor_intf, + bgp_neighbor, + ) topotest.interface_set_status( - router_list[bgp_neighbor], neighbor_intf, - ifaceaction=True) + router_list[bgp_neighbor], neighbor_intf, ifaceaction=True + ) # Verifying BGP neighborship is intact in # (holddown - keepalive) time - for timer in range(keepalivetimer, holddowntimer, - int(holddowntimer / 3)): + for timer in range( + keepalivetimer, holddowntimer, int(holddowntimer / 3) + ): logger.info("Waiting for %s sec..", keepalivetimer) sleep(keepalivetimer) sleep(2) - show_bgp_json = \ - run_frr_cmd(rnode, "show bgp summary json", - isjson=True) + show_bgp_json = run_frr_cmd( + rnode, "show bgp summary json", isjson=True + ) if addr_type == "ipv4": ipv4_data = show_bgp_json["ipv4Unicast"]["peers"] @@ -1162,17 +1212,22 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict): ipv6_data = show_bgp_json["ipv6Unicast"]["peers"] nh_state = ipv6_data[neighbor_ip]["state"] - if timer == \ - (holddowntimer - keepalivetimer): + if timer == (holddowntimer - keepalivetimer): if nh_state != "Established": - errormsg = "BGP neighborship has not gone " \ - "down in {} sec for neighbor {}" \ - .format(timer, bgp_neighbor) + errormsg = ( + "BGP neighborship has not gone " + "down in {} sec for neighbor {}".format( + timer, bgp_neighbor + ) + ) return errormsg else: - logger.info("BGP neighborship is intact in %s" - " sec for neighbor %s", - timer, bgp_neighbor) + logger.info( + "BGP neighborship is intact in %s" + " sec for neighbor %s", + timer, + bgp_neighbor, + ) #################### # Shutting down peer interface and verifying that BGP @@ -1180,27 +1235,36 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict): #################### logger.info("=" * 20) logger.info("Scenario 2:") - logger.info("Shutdown peer interface: %s and verify BGP" - " neighborship has gone down in hold down " - "time %s sec", neighbor_intf, holddowntimer) + logger.info( + "Shutdown peer interface: %s and verify BGP" + " neighborship has gone down in hold down " + "time %s sec", + neighbor_intf, + holddowntimer, + ) logger.info("=" * 20) - logger.info("Shutting down interface %s on router %s..", - neighbor_intf, bgp_neighbor) - topotest.interface_set_status(router_list[bgp_neighbor], - neighbor_intf, - ifaceaction=False) + logger.info( + "Shutting down interface %s on router %s..", + neighbor_intf, + bgp_neighbor, + ) + topotest.interface_set_status( + router_list[bgp_neighbor], neighbor_intf, ifaceaction=False + ) # Verifying BGP neighborship is going down in holddown time - for timer in range(keepalivetimer, - (holddowntimer + keepalivetimer), - int(holddowntimer / 3)): + for timer in range( + keepalivetimer, + (holddowntimer + keepalivetimer), + int(holddowntimer / 3), + ): logger.info("Waiting for %s sec..", keepalivetimer) sleep(keepalivetimer) sleep(2) - show_bgp_json = \ - run_frr_cmd(rnode, "show bgp summary json", - isjson=True) + show_bgp_json = run_frr_cmd( + rnode, "show bgp summary json", isjson=True + ) if addr_type == "ipv4": ipv4_data = show_bgp_json["ipv4Unicast"]["peers"] @@ -1211,22 +1275,29 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict): if timer == holddowntimer: if nh_state == "Established": - errormsg = "BGP neighborship has not gone " \ - "down in {} sec for neighbor {}" \ - .format(timer, bgp_neighbor) + errormsg = ( + "BGP neighborship has not gone " + "down in {} sec for neighbor {}".format( + timer, bgp_neighbor + ) + ) return errormsg else: - logger.info("BGP neighborship has gone down in" - " %s sec for neighbor %s", - timer, bgp_neighbor) + logger.info( + "BGP neighborship has gone down in" + " %s sec for neighbor %s", + timer, + bgp_neighbor, + ) logger.debug("Exiting lib API: verify_bgp_timers_and_functionality()") return True @retry(attempts=3, wait=4, return_is_str=True) -def verify_bgp_attributes(tgen, addr_type, dut, static_routes, rmap_name, - input_dict, seq_id=None): +def verify_bgp_attributes( + tgen, addr_type, dut, static_routes, rmap_name, input_dict, seq_id=None +): """ API will verify BGP attributes set by Route-map for given prefix and DUT. it will run "show bgp ipv4/ipv6 {prefix_address} json" command @@ -1256,7 +1327,7 @@ def verify_bgp_attributes(tgen, addr_type, dut, static_routes, rmap_name, } }, "set": { - "localpref": 150, + "locPrf": 150, "weight": 100 } }], @@ -1269,7 +1340,7 @@ def verify_bgp_attributes(tgen, addr_type, dut, static_routes, rmap_name, } }, "set": { - "med": 50 + "metric": 50 } }] } @@ -1288,7 +1359,7 @@ def verify_bgp_attributes(tgen, addr_type, dut, static_routes, rmap_name, if router != dut: continue - logger.info('Verifying BGP set attributes for dut {}:'.format(router)) + logger.info("Verifying BGP set attributes for dut {}:".format(router)) for static_route in static_routes: cmd = "show bgp {} {} json".format(addr_type, static_route) @@ -1297,8 +1368,7 @@ def verify_bgp_attributes(tgen, addr_type, dut, static_routes, rmap_name, dict_to_test = [] tmp_list = [] for rmap_router in input_dict.keys(): - for rmap, values in input_dict[rmap_router][ - "route_maps"].items(): + for rmap, values in input_dict[rmap_router]["route_maps"].items(): if rmap == rmap_name: dict_to_test = values for rmap_dict in values: @@ -1307,8 +1377,7 @@ def verify_bgp_attributes(tgen, addr_type, dut, static_routes, rmap_name, seq_id = [seq_id] if "seq_id" in rmap_dict: - rmap_seq_id = \ - rmap_dict["seq_id"] + rmap_seq_id = rmap_dict["seq_id"] for _seq_id in seq_id: if _seq_id == rmap_seq_id: tmp_list.append(rmap_dict) @@ -1318,55 +1387,56 @@ def verify_bgp_attributes(tgen, addr_type, dut, static_routes, rmap_name, for rmap_dict in dict_to_test: if "set" in rmap_dict: for criteria in rmap_dict["set"].keys(): - if criteria not in show_bgp_json[ - "paths"][0]: - errormsg = ("BGP attribute: {}" - " is not found in" - " cli: {} output " - "in router {}". - format(criteria, - cmd, - router)) + if criteria not in show_bgp_json["paths"][0]: + errormsg = ( + "BGP attribute: {}" + " is not found in" + " cli: {} output " + "in router {}".format(criteria, cmd, router) + ) return errormsg - if rmap_dict["set"][criteria] == \ - show_bgp_json["paths"][0][ - criteria]: - logger.info("Verifying BGP " - "attribute {} for" - " route: {} in " - "router: {}, found" - " expected value:" - " {}". - format(criteria, - static_route, - dut, - rmap_dict[ - "set"][ - criteria])) + if ( + rmap_dict["set"][criteria] + == show_bgp_json["paths"][0][criteria] + ): + logger.info( + "Verifying BGP " + "attribute {} for" + " route: {} in " + "router: {}, found" + " expected value:" + " {}".format( + criteria, + static_route, + dut, + rmap_dict["set"][criteria], + ) + ) else: - errormsg = \ - ("Failed: Verifying BGP " - "attribute {} for route:" - " {} in router: {}, " - " expected value: {} but" - " found: {}". - format(criteria, - static_route, - dut, - rmap_dict["set"] - [criteria], - show_bgp_json[ - 'paths'][ - 0][criteria])) + errormsg = ( + "Failed: Verifying BGP " + "attribute {} for route:" + " {} in router: {}, " + " expected value: {} but" + " found: {}".format( + criteria, + static_route, + dut, + rmap_dict["set"][criteria], + show_bgp_json["paths"][0][criteria], + ) + ) return errormsg logger.debug("Exiting lib API: verify_bgp_attributes()") return True + @retry(attempts=4, wait=2, return_is_str=True, initial_wait=2) -def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict, - attribute): +def verify_best_path_as_per_bgp_attribute( + tgen, addr_type, router, input_dict, attribute +): """ API is to verify best path according to BGP attributes for given routes. "show bgp ipv4/6 json" command will be run and verify best path according @@ -1406,7 +1476,7 @@ def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict, } } } - attribute = "localpref" + attribute = "locPrf" result = verify_best_path_as_per_bgp_attribute(tgen, "ipv4", dut, \ input_dict, attribute) Returns @@ -1443,40 +1513,38 @@ def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict, attribute_dict[next_hop_ip] = route_attribute[attribute] # AS_PATH attribute - if attribute == "aspath": + if attribute == "path": # Find next_hop for the route have minimum as_path - _next_hop = min(attribute_dict, key=lambda x: len(set( - attribute_dict[x]))) + _next_hop = min( + attribute_dict, key=lambda x: len(set(attribute_dict[x])) + ) compare = "SHORTEST" # LOCAL_PREF attribute - elif attribute == "localpref": + elif attribute == "locPrf": # Find next_hop for the route have highest local preference - _next_hop = max(attribute_dict, key=(lambda k: - attribute_dict[k])) + _next_hop = max(attribute_dict, key=(lambda k: attribute_dict[k])) compare = "HIGHEST" # WEIGHT attribute elif attribute == "weight": # Find next_hop for the route have highest weight - _next_hop = max(attribute_dict, key=(lambda k: - attribute_dict[k])) + _next_hop = max(attribute_dict, key=(lambda k: attribute_dict[k])) compare = "HIGHEST" # ORIGIN attribute elif attribute == "origin": # Find next_hop for the route have IGP as origin, - # - rule is IGP>EGP>INCOMPLETE - _next_hop = [key for (key, value) in - attribute_dict.iteritems() - if value == "IGP"][0] + _next_hop = [ + key for (key, value) in attribute_dict.iteritems() if value == "IGP" + ][0] compare = "" # MED attribute - elif attribute == "med": + elif attribute == "metric": # Find next_hop for the route have LOWEST MED - _next_hop = min(attribute_dict, key=(lambda k: - attribute_dict[k])) + _next_hop = min(attribute_dict, key=(lambda k: attribute_dict[k])) compare = "LOWEST" # Show ip route @@ -1489,8 +1557,7 @@ def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict, # Verifying output dictionary rib_routes_json is not empty if not bool(rib_routes_json): - errormsg = "No route found in RIB of router {}..". \ - format(router) + errormsg = "No route found in RIB of router {}..".format(router) return errormsg st_found = False @@ -1499,31 +1566,41 @@ def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict, if route in rib_routes_json: st_found = True # Verify next_hop in rib_routes_json - if rib_routes_json[route][0]["nexthops"][0]["ip"] in \ - attribute_dict: + if rib_routes_json[route][0]["nexthops"][0]["ip"] in attribute_dict: nh_found = True else: - errormsg = "Incorrect Nexthop for BGP route {} in " \ - "RIB of router {}, Expected: {}, Found:" \ - " {}\n".format(route, router, - rib_routes_json[route][0][ - "nexthops"][0]["ip"], - _next_hop) + errormsg = ( + "Incorrect Nexthop for BGP route {} in " + "RIB of router {}, Expected: {}, Found:" + " {}\n".format( + route, + router, + rib_routes_json[route][0]["nexthops"][0]["ip"], + _next_hop, + ) + ) return errormsg if st_found and nh_found: logger.info( "Best path for prefix: %s with next_hop: %s is " "installed according to %s %s: (%s) in RIB of " - "router %s", route, _next_hop, compare, - attribute, attribute_dict[_next_hop], router) + "router %s", + route, + _next_hop, + compare, + attribute, + attribute_dict[_next_hop], + router, + ) logger.debug("Exiting lib API: verify_best_path_as_per_bgp_attribute()") return True -def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict, - attribute): +def verify_best_path_as_per_admin_distance( + tgen, addr_type, router, input_dict, attribute +): """ API is to verify best path according to admin distance for given route. "show ip/ipv6 route json" command will be run and verify @@ -1548,7 +1625,7 @@ def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict, {"network": "200.50.2.0/32", \ "admin_distance": 60, "next_hop": "10.0.0.18"}] }} - attribute = "localpref" + attribute = "locPrf" result = verify_best_path_as_per_admin_distance(tgen, "ipv4", dut, \ input_dict, attribute): Returns @@ -1574,7 +1651,8 @@ def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict, for routes_from_router in input_dict.keys(): sh_ip_route_json = router_list[routes_from_router].vtysh_cmd( - command, isjson=True) + command, isjson=True + ) networks = input_dict[routes_from_router]["static_routes"] for network in networks: route = network["network"] @@ -1590,8 +1668,7 @@ def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict, attribute_dict[next_hop_ip] = route_attribute["distance"] # Find next_hop for the route have LOWEST Admin Distance - _next_hop = min(attribute_dict, key=(lambda k: - attribute_dict[k])) + _next_hop = min(attribute_dict, key=(lambda k: attribute_dict[k])) compare = "LOWEST" # Show ip route @@ -1608,21 +1685,523 @@ def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict, if route in rib_routes_json: st_found = True # Verify next_hop in rib_routes_json - if rib_routes_json[route][0]["nexthops"][0]["ip"] == \ - _next_hop: + if rib_routes_json[route][0]["nexthops"][0]["ip"] == _next_hop: nh_found = True else: - errormsg = ("Nexthop {} is Missing for BGP route {}" - " in RIB of router {}\n".format(_next_hop, - route, router)) + errormsg = ( + "Nexthop {} is Missing for BGP route {}" + " in RIB of router {}\n".format(_next_hop, route, router) + ) return errormsg if st_found and nh_found: - logger.info("Best path for prefix: %s is installed according" - " to %s %s: (%s) in RIB of router %s", route, - compare, attribute, - attribute_dict[_next_hop], router) + logger.info( + "Best path for prefix: %s is installed according" + " to %s %s: (%s) in RIB of router %s", + route, + compare, + attribute, + attribute_dict[_next_hop], + router, + ) + + logger.info("Exiting lib API: verify_best_path_as_per_admin_distance()") + return True + + +@retry(attempts=5, wait=2, return_is_str=True, initial_wait=2) +def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, aspath=None): + """ + This API is to verify whether bgp rib has any + matching route for a nexthop. + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: input dut router name + * `addr_type` : ip type ipv4/ipv6 + * `input_dict` : input dict, has details of static routes + * `next_hop`[optional]: next_hop which needs to be verified, + default = static + * 'aspath'[optional]: aspath which needs to be verified + + Usage + ----- + dut = 'r1' + next_hop = "192.168.1.10" + input_dict = topo['routers'] + aspath = "100 200 300" + result = verify_bgp_rib(tgen, addr_type, dut, tgen, input_dict, + next_hop, aspath) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: verify_bgp_rib()") + + router_list = tgen.routers() + additional_nexthops_in_required_nhs = [] + list1 = [] + list2 = [] + for routerInput in input_dict.keys(): + for router, rnode in router_list.iteritems(): + if router != dut: + continue + + # Verifying RIB routes + command = "show bgp" + + # Static routes + sleep(2) + logger.info("Checking router {} BGP RIB:".format(dut)) + + if "static_routes" in input_dict[routerInput]: + static_routes = input_dict[routerInput]["static_routes"] + + for static_route in static_routes: + found_routes = [] + missing_routes = [] + st_found = False + nh_found = False + vrf = static_route.setdefault("vrf", None) + if vrf: + cmd = "{} vrf {} {}".format(command, vrf, addr_type) + + else: + cmd = "{} {}".format(command, addr_type) + + cmd = "{} json".format(cmd) + + rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True) + + # Verifying output dictionary rib_routes_json is not empty + if bool(rib_routes_json) == False: + errormsg = "No route found in rib of router {}..".format(router) + return errormsg + + network = static_route["network"] + + if "no_of_ip" in static_route: + no_of_ip = static_route["no_of_ip"] + else: + no_of_ip = 1 + + # Generating IPs for verification + ip_list = generate_ips(network, no_of_ip) + + for st_rt in ip_list: + st_rt = str(ipaddr.IPNetwork(unicode(st_rt))) + + _addr_type = validate_ip_address(st_rt) + if _addr_type != addr_type: + continue + + if st_rt in rib_routes_json["routes"]: + st_found = True + found_routes.append(st_rt) + + if next_hop: + if not isinstance(next_hop, list): + next_hop = [next_hop] + list1 = next_hop + found_hops = [ + rib_r["ip"] + for rib_r in rib_routes_json["routes"][st_rt][0][ + "nexthops" + ] + ] + list2 = found_hops + missing_list_of_nexthops = set(list2).difference(list1) + additional_nexthops_in_required_nhs = set( + list1 + ).difference(list2) + + if list2: + if additional_nexthops_in_required_nhs: + logger.info( + "Missing nexthop %s for route" + " %s in RIB of router %s\n", + additional_nexthops_in_required_nhs, + st_rt, + dut, + ) + errormsg = ( + "Nexthop {} is Missing for " + "route {} in RIB of router {}\n".format( + additional_nexthops_in_required_nhs, + st_rt, + dut, + ) + ) + return errormsg + else: + nh_found = True + if aspath: + found_paths = rib_routes_json["routes"][st_rt][0][ + "path" + ] + if aspath == found_paths: + aspath_found = True + logger.info( + "Found AS path {} for route" + " {} in RIB of router " + "{}\n".format(aspath, st_rt, dut) + ) + else: + errormsg = ( + "AS Path {} is missing for route" + "for route {} in RIB of router {}\n".format( + aspath, st_rt, dut + ) + ) + return errormsg + + else: + missing_routes.append(st_rt) + + if nh_found: + logger.info( + "Found next_hop {} for all bgp" + " routes in RIB of" + " router {}\n".format(next_hop, router) + ) + + if len(missing_routes) > 0: + errormsg = ( + "Missing route in RIB of router {}, " + "routes: {}\n".format(dut, missing_routes) + ) + return errormsg + + if found_routes: + logger.info( + "Verified routes in router {} BGP RIB, " + "found routes are: {} \n".format(dut, found_routes) + ) + continue + + if "bgp" not in input_dict[routerInput]: + continue + + # Advertise networks + bgp_data_list = input_dict[routerInput]["bgp"] + + if type(bgp_data_list) is not list: + bgp_data_list = [bgp_data_list] + + for bgp_data in bgp_data_list: + vrf_id = bgp_data.setdefault("vrf", None) + if vrf_id: + cmd = "{} vrf {} {}".format(command, vrf_id, addr_type) + else: + cmd = "{} {}".format(command, addr_type) + + cmd = "{} json".format(cmd) + + rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True) + + # Verifying output dictionary rib_routes_json is not empty + if bool(rib_routes_json) == False: + errormsg = "No route found in rib of router {}..".format(router) + return errormsg + + bgp_net_advertise = bgp_data["address_family"][addr_type]["unicast"] + advertise_network = bgp_net_advertise.setdefault( + "advertise_networks", [] + ) + + for advertise_network_dict in advertise_network: + found_routes = [] + missing_routes = [] + found = False + + network = advertise_network_dict["network"] + + if "no_of_network" in advertise_network_dict: + no_of_network = advertise_network_dict["no_of_network"] + else: + no_of_network = 1 + + # Generating IPs for verification + ip_list = generate_ips(network, no_of_network) + + for st_rt in ip_list: + st_rt = str(ipaddr.IPNetwork(unicode(st_rt))) + + _addr_type = validate_ip_address(st_rt) + if _addr_type != addr_type: + continue + + if st_rt in rib_routes_json["routes"]: + found = True + found_routes.append(st_rt) + else: + found = False + missing_routes.append(st_rt) + + if len(missing_routes) > 0: + errormsg = ( + "Missing route in BGP RIB of router {}," + " are: {}\n".format(dut, missing_routes) + ) + return errormsg + + if found_routes: + logger.info( + "Verified routes in router {} BGP RIB, found " + "routes are: {}\n".format(dut, found_routes) + ) + + logger.debug("Exiting lib API: verify_bgp_rib()") + return True + + +@retry(attempts=5, wait=2, return_is_str=True, initial_wait=2) +def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, aspath=None): + """ + This API is to verify whether bgp rib has any + matching route for a nexthop. + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: input dut router name + * `addr_type` : ip type ipv4/ipv6 + * `input_dict` : input dict, has details of static routes + * `next_hop`[optional]: next_hop which needs to be verified, + default = static + * 'aspath'[optional]: aspath which needs to be verified + + Usage + ----- + dut = 'r1' + next_hop = "192.168.1.10" + input_dict = topo['routers'] + aspath = "100 200 300" + result = verify_bgp_rib(tgen, addr_type, dut, tgen, input_dict, + next_hop, aspath) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: verify_bgp_rib()") + + router_list = tgen.routers() + additional_nexthops_in_required_nhs = [] + list1 = [] + list2 = [] + for routerInput in input_dict.keys(): + for router, rnode in router_list.iteritems(): + if router != dut: + continue + + # Verifying RIB routes + command = "show bgp" + + # Static routes + sleep(2) + logger.info("Checking router {} BGP RIB:".format(dut)) + + if "static_routes" in input_dict[routerInput]: + static_routes = input_dict[routerInput]["static_routes"] + + for static_route in static_routes: + found_routes = [] + missing_routes = [] + st_found = False + nh_found = False + vrf = static_route.setdefault("vrf", None) + if vrf: + cmd = "{} vrf {} {}".format(command, vrf, addr_type) + + else: + cmd = "{} {}".format(command, addr_type) + + cmd = "{} json".format(cmd) + + rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True) + + # Verifying output dictionary rib_routes_json is not empty + if bool(rib_routes_json) == False: + errormsg = "No route found in rib of router {}..".format(router) + return errormsg + + network = static_route["network"] + + if "no_of_ip" in static_route: + no_of_ip = static_route["no_of_ip"] + else: + no_of_ip = 1 + + # Generating IPs for verification + ip_list = generate_ips(network, no_of_ip) + + for st_rt in ip_list: + st_rt = str(ipaddr.IPNetwork(unicode(st_rt))) + + _addr_type = validate_ip_address(st_rt) + if _addr_type != addr_type: + continue + + if st_rt in rib_routes_json["routes"]: + st_found = True + found_routes.append(st_rt) + + if next_hop: + if not isinstance(next_hop, list): + next_hop = [next_hop] + list1 = next_hop + found_hops = [ + rib_r["ip"] + for rib_r in rib_routes_json["routes"][st_rt][0][ + "nexthops" + ] + ] + list2 = found_hops + missing_list_of_nexthops = set(list2).difference(list1) + additional_nexthops_in_required_nhs = set( + list1 + ).difference(list2) + + if list2: + if additional_nexthops_in_required_nhs: + logger.info( + "Missing nexthop %s for route" + " %s in RIB of router %s\n", + additional_nexthops_in_required_nhs, + st_rt, + dut, + ) + errormsg = ( + "Nexthop {} is Missing for " + "route {} in RIB of router {}\n".format( + additional_nexthops_in_required_nhs, + st_rt, + dut, + ) + ) + return errormsg + else: + nh_found = True + if aspath: + found_paths = rib_routes_json["routes"][st_rt][0][ + "path" + ] + if aspath == found_paths: + aspath_found = True + logger.info( + "Found AS path {} for route" + " {} in RIB of router " + "{}\n".format(aspath, st_rt, dut) + ) + else: + errormsg = ( + "AS Path {} is missing for route" + "for route {} in RIB of router {}\n".format( + aspath, st_rt, dut + ) + ) + return errormsg + + else: + missing_routes.append(st_rt) + + if nh_found: + logger.info( + "Found next_hop {} for all bgp" + " routes in RIB of" + " router {}\n".format(next_hop, router) + ) + + if len(missing_routes) > 0: + errormsg = ( + "Missing route in RIB of router {}, " + "routes: {}\n".format(dut, missing_routes) + ) + return errormsg + + if found_routes: + logger.info( + "Verified routes in router {} BGP RIB, " + "found routes are: {} \n".format(dut, found_routes) + ) + continue + + if "bgp" not in input_dict[routerInput]: + continue + + # Advertise networks + bgp_data_list = input_dict[routerInput]["bgp"] + + if type(bgp_data_list) is not list: + bgp_data_list = [bgp_data_list] + + for bgp_data in bgp_data_list: + vrf_id = bgp_data.setdefault("vrf", None) + if vrf_id: + cmd = "{} vrf {} {}".format(command, vrf_id, addr_type) + else: + cmd = "{} {}".format(command, addr_type) + + cmd = "{} json".format(cmd) + + rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True) + + # Verifying output dictionary rib_routes_json is not empty + if bool(rib_routes_json) == False: + errormsg = "No route found in rib of router {}..".format(router) + return errormsg + + bgp_net_advertise = bgp_data["address_family"][addr_type]["unicast"] + advertise_network = bgp_net_advertise.setdefault( + "advertise_networks", [] + ) + + for advertise_network_dict in advertise_network: + found_routes = [] + missing_routes = [] + found = False + + network = advertise_network_dict["network"] + + if "no_of_network" in advertise_network_dict: + no_of_network = advertise_network_dict["no_of_network"] + else: + no_of_network = 1 + + # Generating IPs for verification + ip_list = generate_ips(network, no_of_network) + + for st_rt in ip_list: + st_rt = str(ipaddr.IPNetwork(unicode(st_rt))) + + _addr_type = validate_ip_address(st_rt) + if _addr_type != addr_type: + continue + + if st_rt in rib_routes_json["routes"]: + found = True + found_routes.append(st_rt) + else: + found = False + missing_routes.append(st_rt) + + if len(missing_routes) > 0: + errormsg = ( + "Missing route in BGP RIB of router {}," + " are: {}\n".format(dut, missing_routes) + ) + return errormsg + + if found_routes: + logger.info( + "Verified routes in router {} BGP RIB, found " + "routes are: {}\n".format(dut, found_routes) + ) - logger.info( - "Exiting lib API: verify_best_path_as_per_admin_distance()") + logger.debug("Exiting lib API: verify_bgp_rib()") return True diff --git a/tests/topotests/lib/bgprib.py b/tests/topotests/lib/bgprib.py index 5a81036643..3d92718c78 100644 --- a/tests/topotests/lib/bgprib.py +++ b/tests/topotests/lib/bgprib.py @@ -16,14 +16,14 @@ # with this program; see the file COPYING; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# +# # want_rd_routes = [ # {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'}, # {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'}, -# +# # {'rd':'10:3', 'p':'5.1.0.0/24', 'n':'3.3.3.3'}, # ] -# +# # ribRequireVpnRoutes('r2','Customer routes',want_rd_routes) # # want_unicast_routes = [ @@ -34,116 +34,133 @@ # ribRequireUnicastRoutes('r1','ipv4','','Customer routes in default',want_unicast_routes) # -from lutil import luCommand,luResult +from lutil import luCommand, luResult import json import re # gpz: get rib in json form and compare against desired routes class BgpRib: - def routes_include_wanted(self,pfxtbl,want,debug): - # helper function to RequireVpnRoutes - for pfx in pfxtbl.iterkeys(): - if debug: - print 'trying pfx ' + pfx - if pfx != want['p']: - if debug: - print 'want pfx=' + want['p'] + ', not ' + pfx - continue - if debug: - print 'have pfx=' + pfx - for r in pfxtbl[pfx]: - if debug: - print 'trying route' - nexthops = r['nexthops'] - for nh in nexthops: - if debug: - print 'trying nh ' + nh['ip'] - if nh['ip'] == want['n']: - if debug: - print 'found ' + want['n'] - return 1 - else: - if debug: - print 'want nh=' + want['n'] + ', not ' + nh['ip'] - if debug: - print 'missing route: pfx=' + want['p'] + ', nh=' + want['n'] - return 0 + def routes_include_wanted(self, pfxtbl, want, debug): + # helper function to RequireVpnRoutes + for pfx in pfxtbl.iterkeys(): + if debug: + print "trying pfx " + pfx + if pfx != want["p"]: + if debug: + print "want pfx=" + want["p"] + ", not " + pfx + continue + if debug: + print "have pfx=" + pfx + for r in pfxtbl[pfx]: + if debug: + print "trying route" + nexthops = r["nexthops"] + for nh in nexthops: + if debug: + print "trying nh " + nh["ip"] + if nh["ip"] == want["n"]: + if debug: + print "found " + want["n"] + return 1 + else: + if debug: + print "want nh=" + want["n"] + ", not " + nh["ip"] + if debug: + print "missing route: pfx=" + want["p"] + ", nh=" + want["n"] + return 0 def RequireVpnRoutes(self, target, title, wantroutes, debug=0): - import json + import json + logstr = "RequireVpnRoutes " + str(wantroutes) - #non json form for humans - luCommand(target,'vtysh -c "show bgp ipv4 vpn"','.','None','Get VPN RIB (non-json)') - ret = luCommand(target,'vtysh -c "show bgp ipv4 vpn json"','.*','None','Get VPN RIB (json)') - if re.search(r'^\s*$', ret): + # non json form for humans + luCommand( + target, + 'vtysh -c "show bgp ipv4 vpn"', + ".", + "None", + "Get VPN RIB (non-json)", + ) + ret = luCommand( + target, + 'vtysh -c "show bgp ipv4 vpn json"', + ".*", + "None", + "Get VPN RIB (json)", + ) + if re.search(r"^\s*$", ret): # degenerate case: empty json means no routes if len(wantroutes) > 0: luResult(target, False, title, logstr) return luResult(target, True, title, logstr) - rib = json.loads(ret) - rds = rib['routes']['routeDistinguishers'] - for want in wantroutes: - found = 0 - if debug: - print "want rd " + want['rd'] - for rd in rds.iterkeys(): - if rd != want['rd']: - continue - if debug: - print "found rd " + rd - table = rds[rd] - if self.routes_include_wanted(table,want,debug): - found = 1 - break - if not found: - luResult(target, False, title, logstr) - return - luResult(target, True, title, logstr) + rib = json.loads(ret) + rds = rib["routes"]["routeDistinguishers"] + for want in wantroutes: + found = 0 + if debug: + print "want rd " + want["rd"] + for rd in rds.iterkeys(): + if rd != want["rd"]: + continue + if debug: + print "found rd " + rd + table = rds[rd] + if self.routes_include_wanted(table, want, debug): + found = 1 + break + if not found: + luResult(target, False, title, logstr) + return + luResult(target, True, title, logstr) - def RequireUnicastRoutes(self,target,afi,vrf,title,wantroutes,debug=0): + def RequireUnicastRoutes(self, target, afi, vrf, title, wantroutes, debug=0): logstr = "RequireVpnRoutes " + str(wantroutes) - vrfstr = '' - if vrf != '': - vrfstr = 'vrf %s' % (vrf) + vrfstr = "" + if vrf != "": + vrfstr = "vrf %s" % (vrf) - if (afi != 'ipv4') and (afi != 'ipv6'): - print "ERROR invalid afi"; + if (afi != "ipv4") and (afi != "ipv6"): + print "ERROR invalid afi" - cmdstr = 'show bgp %s %s unicast' % (vrfstr, afi) - #non json form for humans - cmd = 'vtysh -c "%s"' % cmdstr - luCommand(target,cmd,'.','None','Get %s %s RIB (non-json)' % (vrfstr, afi)) + cmdstr = "show bgp %s %s unicast" % (vrfstr, afi) + # non json form for humans + cmd = 'vtysh -c "%s"' % cmdstr + luCommand(target, cmd, ".", "None", "Get %s %s RIB (non-json)" % (vrfstr, afi)) cmd = 'vtysh -c "%s json"' % cmdstr - ret = luCommand(target,cmd,'.*','None','Get %s %s RIB (json)' % (vrfstr, afi)) - if re.search(r'^\s*$', ret): + ret = luCommand( + target, cmd, ".*", "None", "Get %s %s RIB (json)" % (vrfstr, afi) + ) + if re.search(r"^\s*$", ret): # degenerate case: empty json means no routes if len(wantroutes) > 0: luResult(target, False, title, logstr) return luResult(target, True, title, logstr) - rib = json.loads(ret) + rib = json.loads(ret) try: - table = rib['routes'] - # KeyError: 'routes' probably means missing/bad VRF + table = rib["routes"] + # KeyError: 'routes' probably means missing/bad VRF except KeyError as err: - if vrf != '': - errstr = '-script ERROR: check if wrong vrf (%s)' % (vrf) + if vrf != "": + errstr = "-script ERROR: check if wrong vrf (%s)" % (vrf) else: - errstr = '-script ERROR: check if vrf missing' - luResult(target, False, title + errstr, logstr) - return - for want in wantroutes: - if not self.routes_include_wanted(table,want,debug): - luResult(target, False, title, logstr) - return - luResult(target, True, title, logstr) + errstr = "-script ERROR: check if vrf missing" + luResult(target, False, title + errstr, logstr) + return + for want in wantroutes: + if not self.routes_include_wanted(table, want, debug): + luResult(target, False, title, logstr) + return + luResult(target, True, title, logstr) -BgpRib=BgpRib() +BgpRib = BgpRib() + def bgpribRequireVpnRoutes(target, title, wantroutes, debug=0): BgpRib.RequireVpnRoutes(target, title, wantroutes, debug) + def bgpribRequireUnicastRoutes(target, afi, vrf, title, wantroutes, debug=0): BgpRib.RequireUnicastRoutes(target, afi, vrf, title, wantroutes, debug) diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py index fc7581b1f2..5ee59070cc 100644 --- a/tests/topotests/lib/common_config.py +++ b/tests/topotests/lib/common_config.py @@ -79,9 +79,9 @@ if config.has_option("topogen", "frrtest_log_dir"): frrtest_log_file = frrtest_log_dir + logfile_name + str(time_stamp) print("frrtest_log_file..", frrtest_log_file) - logger = logger_config.get_logger(name="test_execution_logs", - log_level=loglevel, - target=frrtest_log_file) + logger = logger_config.get_logger( + name="test_execution_logs", log_level=loglevel, target=frrtest_log_file + ) print("Logs will be sent to logfile: {}".format(frrtest_log_file)) if config.has_option("topogen", "show_router_config"): @@ -94,10 +94,7 @@ ADDRESS_TYPES = os.environ.get("ADDRESS_TYPES") # Saves sequence id numbers -SEQ_ID = { - "prefix_lists": {}, - "route_maps": {} -} +SEQ_ID = {"prefix_lists": {}, "route_maps": {}} def get_seq_id(obj_type, router, obj_name): @@ -145,6 +142,7 @@ def set_seq_id(obj_type, router, id, obj_name): class InvalidCLIError(Exception): """Raise when the CLI command is wrong""" + pass @@ -169,16 +167,19 @@ def run_frr_cmd(rnode, cmd, isjson=False): else: print_data = ret_data - logger.info('Output for command [ %s] on router %s:\n%s', - cmd.rstrip("json"), rnode.name, print_data) + logger.info( + "Output for command [ %s] on router %s:\n%s", + cmd.rstrip("json"), + rnode.name, + print_data, + ) return ret_data else: - raise InvalidCLIError('No actual cmd passed') + raise InvalidCLIError("No actual cmd passed") -def create_common_configuration(tgen, router, data, config_type=None, - build=False): +def create_common_configuration(tgen, router, data, config_type=None, build=False): """ API to create object of class FRRConfig and also create frr_json.conf file. It will create interface and common configurations and save it to @@ -201,15 +202,17 @@ def create_common_configuration(tgen, router, data, config_type=None, fname = "{}/{}/{}".format(TMPDIR, router, FRRCFG_FILE) - config_map = OrderedDict({ - "general_config": "! FRR General Config\n", - "interface_config": "! Interfaces Config\n", - "static_route": "! Static Route Config\n", - "prefix_list": "! Prefix List Config\n", - "bgp_community_list": "! Community List Config\n", - "route_maps": "! Route Maps Config\n", - "bgp": "! BGP Config\n" - }) + config_map = OrderedDict( + { + "general_config": "! FRR General Config\n", + "interface_config": "! Interfaces Config\n", + "static_route": "! Static Route Config\n", + "prefix_list": "! Prefix List Config\n", + "bgp_community_list": "! Community List Config\n", + "route_maps": "! Route Maps Config\n", + "bgp": "! BGP Config\n", + } + ) if build: mode = "a" @@ -225,8 +228,9 @@ def create_common_configuration(tgen, router, data, config_type=None, frr_cfg_fd.write("\n") except IOError as err: - logger.error("Unable to open FRR Config File. error(%s): %s" % - (err.errno, err.strerror)) + logger.error( + "Unable to open FRR Config File. error(%s): %s" % (err.errno, err.strerror) + ) return False finally: frr_cfg_fd.close() @@ -257,8 +261,7 @@ def reset_config_on_routers(tgen, routerName=None): continue router = router_list[rname] - logger.info("Configuring router %s to initial test configuration", - rname) + logger.info("Configuring router %s to initial test configuration", rname) cfg = router.run("vtysh -c 'show running'") fname = "{}/{}/frr.sav".format(TMPDIR, rname) dname = "{}/{}/delta.conf".format(TMPDIR, rname) @@ -266,9 +269,11 @@ def reset_config_on_routers(tgen, routerName=None): for line in cfg.split("\n"): line = line.strip() - if (line == "Building configuration..." or - line == "Current configuration:" or - not line): + if ( + line == "Building configuration..." + or line == "Current configuration:" + or not line + ): continue f.write(line) f.write("\n") @@ -279,37 +284,39 @@ def reset_config_on_routers(tgen, routerName=None): init_cfg_file = "{}/{}/frr_json_initial.conf".format(TMPDIR, rname) tempdir = mkdtemp() - with open(os.path.join(tempdir, 'vtysh.conf'), 'w') as fd: + with open(os.path.join(tempdir, "vtysh.conf"), "w") as fd: pass - command = "/usr/lib/frr/frr-reload.py --confdir {} --input {} --test {} > {}". \ - format(tempdir, run_cfg_file, init_cfg_file, dname) - result = call(command, shell=True, stderr=SUB_STDOUT, - stdout=SUB_PIPE) + command = "/usr/lib/frr/frr-reload.py --confdir {} --input {} --test {} > {}".format( + tempdir, run_cfg_file, init_cfg_file, dname + ) + result = call(command, shell=True, stderr=SUB_STDOUT, stdout=SUB_PIPE) - os.unlink(os.path.join(tempdir, 'vtysh.conf')) + os.unlink(os.path.join(tempdir, "vtysh.conf")) os.rmdir(tempdir) # Assert if command fail if result > 0: - logger.error("Delta file creation failed. Command executed %s", - command) - with open(run_cfg_file, 'r') as fd: - logger.info('Running configuration saved in %s is:\n%s', - run_cfg_file, fd.read()) - with open(init_cfg_file, 'r') as fd: - logger.info('Test configuration saved in %s is:\n%s', - init_cfg_file, fd.read()) - - err_cmd = ['/usr/bin/vtysh', '-m', '-f', run_cfg_file] + logger.error("Delta file creation failed. Command executed %s", command) + with open(run_cfg_file, "r") as fd: + logger.info( + "Running configuration saved in %s is:\n%s", run_cfg_file, fd.read() + ) + with open(init_cfg_file, "r") as fd: + logger.info( + "Test configuration saved in %s is:\n%s", init_cfg_file, fd.read() + ) + + err_cmd = ["/usr/bin/vtysh", "-m", "-f", run_cfg_file] result = Popen(err_cmd, stdout=SUB_PIPE, stderr=SUB_PIPE) output = result.communicate() for out_data in output: - temp_data = out_data.decode('utf-8').lower() + temp_data = out_data.decode("utf-8").lower() for out_err in ERROR_LIST: if out_err.lower() in temp_data: - logger.error("Found errors while validating data in" - " %s", run_cfg_file) + logger.error( + "Found errors while validating data in" " %s", run_cfg_file + ) raise InvalidCLIError(out_data) raise InvalidCLIError("Unknown error in %s", output) @@ -319,18 +326,19 @@ def reset_config_on_routers(tgen, routerName=None): t_delta = f.read() for line in t_delta.split("\n"): line = line.strip() - if (line == "Lines To Delete" or - line == "===============" or - line == "Lines To Add" or - line == "============" or - not line): + if ( + line == "Lines To Delete" + or line == "===============" + or line == "Lines To Add" + or line == "============" + or not line + ): continue delta.write(line) delta.write("\n") delta.write("end\n") - output = router.vtysh_multicmd(delta.getvalue(), - pretty_output=False) + output = router.vtysh_multicmd(delta.getvalue(), pretty_output=False) delta.close() delta = StringIO.StringIO() @@ -343,8 +351,7 @@ def reset_config_on_routers(tgen, routerName=None): # Router current configuration to log file or console if # "show_router_config" is defined in "pytest.ini" if show_router_config: - logger.info("Configuration on router {} after config reset:". - format(rname)) + logger.info("Configuration on router {} after config reset:".format(rname)) logger.info(delta.getvalue()) delta.close() @@ -373,12 +380,13 @@ def load_config_to_router(tgen, routerName, save_bkup=False): router = router_list[rname] try: frr_cfg_file = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_FILE) - frr_cfg_bkup = "{}/{}/{}".format(TMPDIR, rname, - FRRCFG_BKUP_FILE) + frr_cfg_bkup = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_BKUP_FILE) with open(frr_cfg_file, "r+") as cfg: data = cfg.read() - logger.info("Applying following configuration on router" - " {}:\n{}".format(rname, data)) + logger.info( + "Applying following configuration on router" + " {}:\n{}".format(rname, data) + ) if save_bkup: with open(frr_cfg_bkup, "w") as bkup: bkup.write(data) @@ -390,8 +398,10 @@ def load_config_to_router(tgen, routerName, save_bkup=False): cfg.truncate(0) except IOError as err: - errormsg = ("Unable to open config File. error(%s):" - " %s", (err.errno, err.strerror)) + errormsg = ( + "Unable to open config File. error(%s):" " %s", + (err.errno, err.strerror), + ) return errormsg # Router current configuration to log file or console if @@ -418,8 +428,9 @@ def start_topology(tgen): # Starting deamons router_list = tgen.routers() - ROUTER_LIST = sorted(router_list.keys(), - key=lambda x: int(re_search('\d+', x).group(0))) + ROUTER_LIST = sorted( + router_list.keys(), key=lambda x: int(re_search("\d+", x).group(0)) + ) TMPDIR = os.path.join(LOGDIR, tgen.modname) router_list = tgen.routers() @@ -430,31 +441,27 @@ def start_topology(tgen): # Creating router named dir and empty zebra.conf bgpd.conf files # inside the current directory - if os.path.isdir('{}'.format(rname)): + if os.path.isdir("{}".format(rname)): os.system("rm -rf {}".format(rname)) - os.mkdir('{}'.format(rname)) - os.system('chmod -R go+rw {}'.format(rname)) - os.chdir('{}/{}'.format(TMPDIR, rname)) - os.system('touch zebra.conf bgpd.conf') + os.mkdir("{}".format(rname)) + os.system("chmod -R go+rw {}".format(rname)) + os.chdir("{}/{}".format(TMPDIR, rname)) + os.system("touch zebra.conf bgpd.conf") else: - os.mkdir('{}'.format(rname)) - os.system('chmod -R go+rw {}'.format(rname)) - os.chdir('{}/{}'.format(TMPDIR, rname)) - os.system('touch zebra.conf bgpd.conf') + os.mkdir("{}".format(rname)) + os.system("chmod -R go+rw {}".format(rname)) + os.chdir("{}/{}".format(TMPDIR, rname)) + os.system("touch zebra.conf bgpd.conf") except IOError as (errno, strerror): logger.error("I/O error({0}): {1}".format(errno, strerror)) # Loading empty zebra.conf file to router, to start the zebra deamon router.load_config( - TopoRouter.RD_ZEBRA, - '{}/{}/zebra.conf'.format(TMPDIR, rname) + TopoRouter.RD_ZEBRA, "{}/{}/zebra.conf".format(TMPDIR, rname) ) # Loading empty bgpd.conf file to router, to start the bgp deamon - router.load_config( - TopoRouter.RD_BGP, - '{}/{}/bgpd.conf'.format(TMPDIR, rname) - ) + router.load_config(TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(TMPDIR, rname)) # Starting routers logger.info("Starting all routers once topology is created") @@ -483,6 +490,7 @@ def number_to_column(routerName): # Common APIs, will be used by all protocols ############################################# + def validate_ip_address(ip_address): """ Validates the type of ip address @@ -518,8 +526,9 @@ def validate_ip_address(ip_address): return "ipv6" if not v4 and not v6: - raise Exception("InvalidIpAddr", "%s is neither valid IPv4 or IPv6" - " address" % ip_address) + raise Exception( + "InvalidIpAddr", "%s is neither valid IPv4 or IPv6" " address" % ip_address + ) def check_address_types(addr_type=None): @@ -542,8 +551,11 @@ def check_address_types(addr_type=None): return addr_types if addr_type not in addr_types: - logger.error("{} not in supported/configured address types {}". - format(addr_type, addr_types)) + logger.error( + "{} not in supported/configured address types {}".format( + addr_type, addr_types + ) + ) return False return True @@ -589,8 +601,7 @@ def generate_ips(network, no_of_ips): return ipaddress_list -def find_interface_with_greater_ip(topo, router, loopback=True, - interface=True): +def find_interface_with_greater_ip(topo, router, loopback=True, interface=True): """ Returns highest interface ip for ipv4/ipv6. If loopback is there then it will return highest IP from loopback IPs otherwise from physical @@ -608,12 +619,14 @@ def find_interface_with_greater_ip(topo, router, loopback=True, if loopback: if "type" in data and data["type"] == "loopback": lo_exists = True - ip_address = topo["routers"][router]["links"][ - destRouterLink]["ipv4"].split("/")[0] + ip_address = topo["routers"][router]["links"][destRouterLink][ + "ipv4" + ].split("/")[0] lo_list.append(ip_address) if interface: - ip_address = topo["routers"][router]["links"][ - destRouterLink]["ipv4"].split("/")[0] + ip_address = topo["routers"][router]["links"][destRouterLink]["ipv4"].split( + "/" + )[0] interfaces_list.append(ip_address) if lo_exists: @@ -625,17 +638,17 @@ def find_interface_with_greater_ip(topo, router, loopback=True, def write_test_header(tc_name): """ Display message at beginning of test case""" count = 20 - logger.info("*"*(len(tc_name)+count)) + logger.info("*" * (len(tc_name) + count)) step("START -> Testcase : %s" % tc_name, reset=True) - logger.info("*"*(len(tc_name)+count)) + logger.info("*" * (len(tc_name) + count)) def write_test_footer(tc_name): """ Display message at end of test case""" count = 21 - logger.info("="*(len(tc_name)+count)) + logger.info("=" * (len(tc_name) + count)) logger.info("Testcase : %s -> PASSED", tc_name) - logger.info("="*(len(tc_name)+count)) + logger.info("=" * (len(tc_name) + count)) def interface_status(tgen, topo, input_dict): @@ -664,8 +677,8 @@ def interface_status(tgen, topo, input_dict): global frr_cfg for router in input_dict.keys(): - interface_list = input_dict[router]['interface_list'] - status = input_dict[router].setdefault('status', 'up') + interface_list = input_dict[router]["interface_list"] + status = input_dict[router].setdefault("status", "up") for intf in interface_list: rnode = tgen.routers()[router] interface_set_status(rnode, intf, status) @@ -698,11 +711,10 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0): """ def _retry(func): - @wraps(func) def func_retry(*args, **kwargs): - _wait = kwargs.pop('wait', wait) - _attempts = kwargs.pop('attempts', attempts) + _wait = kwargs.pop("wait", wait) + _attempts = kwargs.pop("attempts", attempts) _attempts = int(_attempts) if _attempts < 0: raise ValueError("attempts must be 0 or greater") @@ -711,11 +723,11 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0): logger.info("Waiting for [%s]s as initial delay", initial_wait) sleep(initial_wait) - _return_is_str = kwargs.pop('return_is_str', return_is_str) + _return_is_str = kwargs.pop("return_is_str", return_is_str) for i in range(1, _attempts + 1): try: - _expected = kwargs.setdefault('expected', True) - kwargs.pop('expected') + _expected = kwargs.setdefault("expected", True) + kwargs.pop("expected") ret = func(*args, **kwargs) logger.debug("Function returned %s" % ret) if return_is_str and isinstance(ret, bool) and _expected: @@ -727,17 +739,17 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0): return ret except Exception as err: if _attempts == i: - logger.info("Max number of attempts (%r) reached", - _attempts) + logger.info("Max number of attempts (%r) reached", _attempts) raise else: logger.info("Function returned %s", err) if i < _attempts: - logger.info("Retry [#%r] after sleeping for %ss" - % (i, _wait)) + logger.info("Retry [#%r] after sleeping for %ss" % (i, _wait)) sleep(_wait) + func_retry._original = func return func_retry + return _retry @@ -745,6 +757,7 @@ class Stepper: """ Prints step number for the test case step being executed """ + count = 1 def __call__(self, msg, reset): @@ -795,24 +808,17 @@ def create_interfaces_cfg(tgen, topo, build=False): interface_name = destRouterLink else: interface_name = data["interface"] - interface_data.append("interface {}".format( - str(interface_name) - )) + interface_data.append("interface {}".format(str(interface_name))) if "ipv4" in data: intf_addr = c_data["links"][destRouterLink]["ipv4"] - interface_data.append("ip address {}".format( - intf_addr - )) + interface_data.append("ip address {}".format(intf_addr)) if "ipv6" in data: intf_addr = c_data["links"][destRouterLink]["ipv6"] - interface_data.append("ipv6 address {}".format( - intf_addr - )) - - result = create_common_configuration(tgen, c_router, - interface_data, - "interface_config", - build=build) + interface_data.append("ipv6 address {}".format(intf_addr)) + + result = create_common_configuration( + tgen, c_router, interface_data, "interface_config", build=build + ) except InvalidCLIError: # Traceback errormsg = traceback.format_exc() @@ -880,13 +886,10 @@ def create_static_routes(tgen, input_dict, build=False): del_action = static_route.setdefault("delete", False) # No of IPs no_of_ip = static_route.setdefault("no_of_ip", 1) - admin_distance = static_route.setdefault("admin_distance", - None) + admin_distance = static_route.setdefault("admin_distance", None) tag = static_route.setdefault("tag", None) - if "next_hop" not in static_route or \ - "network" not in static_route: - errormsg = "'next_hop' or 'network' missing in" \ - " input_dict" + if "next_hop" not in static_route or "network" not in static_route: + errormsg = "'next_hop' or 'network' missing in" " input_dict" return errormsg next_hop = static_route["next_hop"] @@ -914,10 +917,9 @@ def create_static_routes(tgen, input_dict, build=False): static_routes_list.append(cmd) - result = create_common_configuration(tgen, router, - static_routes_list, - "static_route", - build=build) + result = create_common_configuration( + tgen, router, static_routes_list, "static_route", build=build + ) except InvalidCLIError: # Traceback @@ -992,10 +994,8 @@ def create_prefix_lists(tgen, input_dict, build=False): for prefix_name, prefix_list in prefix_data.iteritems(): for prefix_dict in prefix_list: - if "action" not in prefix_dict or \ - "network" not in prefix_dict: - errormsg = "'action' or network' missing in" \ - " input_dict" + if "action" not in prefix_dict or "network" not in prefix_dict: + errormsg = "'action' or network' missing in" " input_dict" return errormsg network_addr = prefix_dict["network"] @@ -1005,11 +1005,9 @@ def create_prefix_lists(tgen, input_dict, build=False): seqid = prefix_dict.setdefault("seqid", None) del_action = prefix_dict.setdefault("delete", False) if seqid is None: - seqid = get_seq_id("prefix_lists", router, - prefix_name) + seqid = get_seq_id("prefix_lists", router, prefix_name) else: - set_seq_id("prefix_lists", router, seqid, - prefix_name) + set_seq_id("prefix_lists", router, seqid, prefix_name) if addr_type == "ipv4": protocol = "ip" @@ -1028,10 +1026,9 @@ def create_prefix_lists(tgen, input_dict, build=False): cmd = "no {}".format(cmd) config_data.append(cmd) - result = create_common_configuration(tgen, router, - config_data, - "prefix_list", - build=build) + result = create_common_configuration( + tgen, router, config_data, "prefix_list", build=build + ) except InvalidCLIError: # Traceback @@ -1101,9 +1098,9 @@ def create_route_maps(tgen, input_dict, build=False): "tag": "tag_id" }, "set": { - "localpref": 150, - "med": 30, - "aspath": { + "locPrf": 150, + "metric": 30, + "path": { "num": 20000, "action": "prepend", }, @@ -1137,8 +1134,7 @@ def create_route_maps(tgen, input_dict, build=False): logger.debug("route_maps not present in input_dict") continue rmap_data = [] - for rmap_name, rmap_value in \ - input_dict[router]["route_maps"].iteritems(): + for rmap_name, rmap_value in input_dict[router]["route_maps"].iteritems(): for rmap_dict in rmap_value: del_action = rmap_dict.setdefault("delete", False) @@ -1160,38 +1156,39 @@ def create_route_maps(tgen, input_dict, build=False): else: set_seq_id("route_maps", router, seq_id, rmap_name) - rmap_data.append("route-map {} {} {}".format( - rmap_name, rmap_action, seq_id - )) + rmap_data.append( + "route-map {} {} {}".format(rmap_name, rmap_action, seq_id) + ) if "continue" in rmap_dict: continue_to = rmap_dict["continue"] if continue_to: - rmap_data.append("on-match goto {}". - format(continue_to)) + rmap_data.append("on-match goto {}".format(continue_to)) else: - logger.error("In continue, 'route-map entry " - "sequence number' is not provided") + logger.error( + "In continue, 'route-map entry " + "sequence number' is not provided" + ) return False if "goto" in rmap_dict: go_to = rmap_dict["goto"] if go_to: - rmap_data.append("on-match goto {}". - format(go_to)) + rmap_data.append("on-match goto {}".format(go_to)) else: - logger.error("In goto, 'Goto Clause number' is not" - " provided") + logger.error( + "In goto, 'Goto Clause number' is not" " provided" + ) return False if "call" in rmap_dict: call_rmap = rmap_dict["call"] if call_rmap: - rmap_data.append("call {}". - format(call_rmap)) + rmap_data.append("call {}".format(call_rmap)) else: - logger.error("In call, 'destination Route-Map' is" - " not provided") + logger.error( + "In call, 'destination Route-Map' is" " not provided" + ) return False # Verifying if SET criteria is defined @@ -1199,24 +1196,22 @@ def create_route_maps(tgen, input_dict, build=False): set_data = rmap_dict["set"] ipv4_data = set_data.setdefault("ipv4", {}) ipv6_data = set_data.setdefault("ipv6", {}) - local_preference = set_data.setdefault("localpref", - None) - metric = set_data.setdefault("med", None) - as_path = set_data.setdefault("aspath", {}) + local_preference = set_data.setdefault("locPrf", None) + metric = set_data.setdefault("metric", None) + as_path = set_data.setdefault("path", {}) weight = set_data.setdefault("weight", None) community = set_data.setdefault("community", {}) - large_community = set_data.setdefault( - "large_community", {}) - large_comm_list = set_data.setdefault( - "large_comm_list", {}) + large_community = set_data.setdefault("large_community", {}) + large_comm_list = set_data.setdefault("large_comm_list", {}) set_action = set_data.setdefault("set_action", None) nexthop = set_data.setdefault("nexthop", None) origin = set_data.setdefault("origin", None) # Local Preference if local_preference: - rmap_data.append("set local-preference {}". - format(local_preference)) + rmap_data.append( + "set local-preference {}".format(local_preference) + ) # Metric if metric: @@ -1231,8 +1226,9 @@ def create_route_maps(tgen, input_dict, build=False): as_num = as_path.setdefault("as_num", None) as_action = as_path.setdefault("as_action", None) if as_action and as_num: - rmap_data.append("set as-path {} {}". - format(as_action, as_num)) + rmap_data.append( + "set as-path {} {}".format(as_action, as_num) + ) # Community if community: @@ -1244,14 +1240,12 @@ def create_route_maps(tgen, input_dict, build=False): cmd = "{} {}".format(cmd, comm_action) rmap_data.append(cmd) else: - logger.error("In community, AS Num not" - " provided") + logger.error("In community, AS Num not" " provided") return False if large_community: num = large_community.setdefault("num", None) - comm_action = large_community.setdefault("action", - None) + comm_action = large_community.setdefault("action", None) if num: cmd = "set large-community {}".format(num) if comm_action: @@ -1259,13 +1253,13 @@ def create_route_maps(tgen, input_dict, build=False): rmap_data.append(cmd) else: - logger.error("In large_community, AS Num not" - " provided") + logger.error( + "In large_community, AS Num not" " provided" + ) return False if large_comm_list: id = large_comm_list.setdefault("id", None) - del_comm = large_comm_list.setdefault("delete", - None) + del_comm = large_comm_list.setdefault("delete", None) if id: cmd = "set large-comm-list {}".format(id) if del_comm: @@ -1273,43 +1267,36 @@ def create_route_maps(tgen, input_dict, build=False): rmap_data.append(cmd) else: - logger.error("In large_comm_list 'id' not" - " provided") + logger.error("In large_comm_list 'id' not" " provided") return False # Weight if weight: - rmap_data.append("set weight {}".format( - weight)) + rmap_data.append("set weight {}".format(weight)) if ipv6_data: nexthop = ipv6_data.setdefault("nexthop", None) if nexthop: - rmap_data.append("set ipv6 next-hop {}".format( - nexthop - )) + rmap_data.append("set ipv6 next-hop {}".format(nexthop)) # Adding MATCH and SET sequence to RMAP if defined if "match" in rmap_dict: match_data = rmap_dict["match"] ipv4_data = match_data.setdefault("ipv4", {}) ipv6_data = match_data.setdefault("ipv6", {}) - community = match_data.setdefault( - "community_list",{}) - large_community = match_data.setdefault( - "large_community", {} - ) + community = match_data.setdefault("community_list", {}) + large_community = match_data.setdefault("large_community", {}) large_community_list = match_data.setdefault( "large_community_list", {} ) if ipv4_data: # fetch prefix list data from rmap - prefix_name = \ - ipv4_data.setdefault("prefix_lists", - None) + prefix_name = ipv4_data.setdefault("prefix_lists", None) if prefix_name: - rmap_data.append("match ip address" - " prefix-list {}".format(prefix_name)) + rmap_data.append( + "match ip address" + " prefix-list {}".format(prefix_name) + ) # fetch tag data from rmap tag = ipv4_data.setdefault("tag", None) @@ -1318,16 +1305,19 @@ def create_route_maps(tgen, input_dict, build=False): # fetch large community data from rmap large_community_list = ipv4_data.setdefault( - "large_community_list",{}) + "large_community_list", {} + ) large_community = match_data.setdefault( - "large_community", {}) + "large_community", {} + ) if ipv6_data: - prefix_name = ipv6_data.setdefault("prefix_lists", - None) + prefix_name = ipv6_data.setdefault("prefix_lists", None) if prefix_name: - rmap_data.append("match ipv6 address" - " prefix-list {}".format(prefix_name)) + rmap_data.append( + "match ipv6 address" + " prefix-list {}".format(prefix_name) + ) # fetch tag data from rmap tag = ipv6_data.setdefault("tag", None) @@ -1336,54 +1326,64 @@ def create_route_maps(tgen, input_dict, build=False): # fetch large community data from rmap large_community_list = ipv6_data.setdefault( - "large_community_list",{}) + "large_community_list", {} + ) large_community = match_data.setdefault( - "large_community", {}) + "large_community", {} + ) if community: if "id" not in community: - logger.error("'id' is mandatory for " - "community-list in match" - " criteria") + logger.error( + "'id' is mandatory for " + "community-list in match" + " criteria" + ) return False cmd = "match community {}".format(community["id"]) - exact_match = community.setdefault("exact_match", - False) + exact_match = community.setdefault("exact_match", False) if exact_match: cmd = "{} exact-match".format(cmd) rmap_data.append(cmd) if large_community: if "id" not in large_community: - logger.error("'id' is mandatory for " - "large-community-list in match " - "criteria") + logger.error( + "'id' is mandatory for " + "large-community-list in match " + "criteria" + ) return False cmd = "match large-community {}".format( - large_community["id"]) + large_community["id"] + ) exact_match = large_community.setdefault( - "exact_match", False) + "exact_match", False + ) if exact_match: cmd = "{} exact-match".format(cmd) rmap_data.append(cmd) if large_community_list: if "id" not in large_community_list: - logger.error("'id' is mandatory for " - "large-community-list in match " - "criteria") + logger.error( + "'id' is mandatory for " + "large-community-list in match " + "criteria" + ) return False cmd = "match large-community {}".format( - large_community_list["id"]) + large_community_list["id"] + ) exact_match = large_community_list.setdefault( - "exact_match", False) + "exact_match", False + ) if exact_match: cmd = "{} exact-match".format(cmd) rmap_data.append(cmd) - result = create_common_configuration(tgen, router, - rmap_data, - "route_maps", - build=build) + result = create_common_configuration( + tgen, router, rmap_data, "route_maps", build=build + ) except InvalidCLIError: # Traceback @@ -1424,12 +1424,7 @@ def delete_route_maps(tgen, input_dict): rmap_data = input_dict[router] rmap_data["route_maps"] = {} for route_map_name in route_maps: - rmap_data["route_maps"].update({ - route_map_name: - [{ - "delete": True - }] - }) + rmap_data["route_maps"].update({route_map_name: [{"delete": True}]}) return create_route_maps(tgen, input_dict) @@ -1478,10 +1473,9 @@ def create_bgp_community_lists(tgen, input_dict, build=False): community_list = input_dict[router]["bgp_community_lists"] for community_dict in community_list: del_action = community_dict.setdefault("delete", False) - community_type = community_dict.setdefault("community_type", - None) + community_type = community_dict.setdefault("community_type", None) action = community_dict.setdefault("action", None) - value = community_dict.setdefault("value", '') + value = community_dict.setdefault("value", "") large = community_dict.setdefault("large", None) name = community_dict.setdefault("name", None) if large: @@ -1490,28 +1484,30 @@ def create_bgp_community_lists(tgen, input_dict, build=False): cmd = "bgp community-list" if not large and not (community_type and action and value): - errormsg = "community_type, action and value are " \ - "required in bgp_community_list" + errormsg = ( + "community_type, action and value are " + "required in bgp_community_list" + ) logger.error(errormsg) return False try: community_type = int(community_type) - cmd = "{} {} {} {}".format(cmd, community_type, action, - value) + cmd = "{} {} {} {}".format(cmd, community_type, action, value) except ValueError: cmd = "{} {} {} {} {}".format( - cmd, community_type, name, action, value) + cmd, community_type, name, action, value + ) if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) - result = create_common_configuration(tgen, router, config_data, - "bgp_community_list", - build=build) + result = create_common_configuration( + tgen, router, config_data, "bgp_community_list", build=build + ) except InvalidCLIError: # Traceback @@ -1634,8 +1630,9 @@ def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None): # Verifying output dictionary rib_routes_json is not empty if bool(rib_routes_json) is False: - errormsg = "No {} route found in rib of router {}..". \ - format(protocol, router) + errormsg = "No {} route found in rib of router {}..".format( + protocol, router + ) return errormsg if "static_routes" in input_dict[routerInput]: @@ -1665,47 +1662,62 @@ def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None): if type(next_hop) is not list: next_hop = [next_hop] - found_hops = [rib_r["ip"] for rib_r in - rib_routes_json[st_rt][0][ - "nexthops"]] + found_hops = [ + rib_r["ip"] + for rib_r in rib_routes_json[st_rt][0]["nexthops"] + ] for nh in found_hops: nh_found = False if nh and nh in next_hop: nh_found = True else: - errormsg = ("Nexthop {} is Missing for {}" - " route {} in RIB of router" - " {}\n".format(next_hop, - protocol, - st_rt, dut)) + errormsg = ( + "Nexthop {} is Missing for {}" + " route {} in RIB of router" + " {}\n".format( + next_hop, protocol, st_rt, dut + ) + ) return errormsg else: missing_routes.append(st_rt) if nh_found: - logger.info("Found next_hop %s for all routes in RIB of" - " router %s\n", next_hop, dut) + logger.info( + "Found next_hop %s for all routes in RIB of" " router %s\n", + next_hop, + dut, + ) if not st_found and len(missing_routes) > 0: - errormsg = "Missing route in RIB of router {}, routes: " \ - "{}\n".format(dut, missing_routes) + errormsg = ( + "Missing route in RIB of router {}, routes: " + "{}\n".format(dut, missing_routes) + ) return errormsg - logger.info("Verified routes in router %s RIB, found routes" - " are: %s\n", dut, found_routes) + logger.info( + "Verified routes in router %s RIB, found routes" " are: %s\n", + dut, + found_routes, + ) continue if "bgp" in input_dict[routerInput]: - if 'advertise_networks' in input_dict[routerInput]["bgp"]\ - ["address_family"][addr_type]["unicast"]: + if ( + "advertise_networks" + in input_dict[routerInput]["bgp"]["address_family"][addr_type][ + "unicast" + ] + ): found_routes = [] missing_routes = [] - advertise_network = input_dict[routerInput]["bgp"]\ - ["address_family"][addr_type]["unicast"]\ - ["advertise_networks"] + advertise_network = input_dict[routerInput]["bgp"][ + "address_family" + ][addr_type]["unicast"]["advertise_networks"] for advertise_network_dict in advertise_network: start_ip = advertise_network_dict["network"] @@ -1730,34 +1742,43 @@ def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None): next_hop = [next_hop] for index, nh in enumerate(next_hop): - if rib_routes_json[st_rt][0]\ - ['nexthops'][index]['ip'] == nh: + if ( + rib_routes_json[st_rt][0]["nexthops"][ + index + ]["ip"] + == nh + ): nh_found = True else: - errormsg=("Nexthop {} is Missing" - " for {} route {} in " - "RIB of router {}\n".\ - format(next_hop, - protocol, - st_rt, dut)) + errormsg = ( + "Nexthop {} is Missing" + " for {} route {} in " + "RIB of router {}\n".format( + next_hop, protocol, st_rt, dut + ) + ) return errormsg else: missing_routes.append(st_rt) if nh_found: - logger.info("Found next_hop {} for all routes in RIB" - " of router {}\n".format(next_hop, dut)) + logger.info( + "Found next_hop {} for all routes in RIB" + " of router {}\n".format(next_hop, dut) + ) if not found and len(missing_routes) > 0: - errormsg = ("Missing {} route in RIB of router {}, " - "routes: {} \n".\ - format(addr_type, dut, missing_routes)) + errormsg = ( + "Missing {} route in RIB of router {}, " + "routes: {} \n".format(addr_type, dut, missing_routes) + ) return errormsg - logger.info("Verified {} routes in router {} RIB, found" - " routes are: {}\n".\ - format(addr_type, dut, found_routes)) + logger.info( + "Verified {} routes in router {} RIB, found" + " routes are: {}\n".format(addr_type, dut, found_routes) + ) logger.debug("Exiting lib API: verify_rib()") return True @@ -1810,8 +1831,11 @@ def verify_admin_distance_for_static_routes(tgen, input_dict): command = "show ipv6 route json" show_ip_route_json = run_frr_cmd(rnode, command, isjson=True) - logger.info("Verifying admin distance for static route %s" - " under dut %s:", static_route, router) + logger.info( + "Verifying admin distance for static route %s" " under dut %s:", + static_route, + router, + ) network = static_route["network"] next_hop = static_route["next_hop"] admin_distance = static_route["admin_distance"] @@ -1819,23 +1843,32 @@ def verify_admin_distance_for_static_routes(tgen, input_dict): if network in show_ip_route_json: if route_data["nexthops"][0]["ip"] == next_hop: if route_data["distance"] != admin_distance: - errormsg = ("Verification failed: admin distance" - " for static route {} under dut {}," - " found:{} but expected:{}". - format(static_route, router, - route_data["distance"], - admin_distance)) + errormsg = ( + "Verification failed: admin distance" + " for static route {} under dut {}," + " found:{} but expected:{}".format( + static_route, + router, + route_data["distance"], + admin_distance, + ) + ) return errormsg else: - logger.info("Verification successful: admin" - " distance for static route %s under" - " dut %s, found:%s", static_route, - router, route_data["distance"]) + logger.info( + "Verification successful: admin" + " distance for static route %s under" + " dut %s, found:%s", + static_route, + router, + route_data["distance"], + ) else: - errormsg = ("Static route {} not found in " - "show_ip_route_json for dut {}". - format(network, router)) + errormsg = ( + "Static route {} not found in " + "show_ip_route_json for dut {}".format(network, router) + ) return errormsg logger.debug("Exiting lib API: verify_admin_distance_for_static_routes()") @@ -1885,12 +1918,17 @@ def verify_prefix_lists(tgen, input_dict): for prefix_list in prefix_lists_addr[addr_type].keys(): if prefix_list in show_prefix_list: - errormsg = ("Prefix list {} is/are present in the router" - " {}".format(prefix_list, router)) + errormsg = ( + "Prefix list {} is/are present in the router" + " {}".format(prefix_list, router) + ) return errormsg - logger.info("Prefix list %s is/are not present in the router" - " from router %s", prefix_list, router) + logger.info( + "Prefix list %s is/are not present in the router" " from router %s", + prefix_list, + router, + ) logger.debug("Exiting lib API: verify_prefix_lists()") return True @@ -1933,12 +1971,16 @@ def verify_route_maps(tgen, input_dict): route_maps = input_dict[router]["route_maps"] for route_map in route_maps: if route_map in show_route_maps: - errormsg = ("Route map {} is not deleted from router" - " {}".format(route_map, router)) + errormsg = "Route map {} is not deleted from router" " {}".format( + route_map, router + ) return errormsg - logger.info("Route map %s is/are deleted successfully from" - " router %s", route_maps, router) + logger.info( + "Route map %s is/are deleted successfully from" " router %s", + route_maps, + router, + ) logger.debug("Exiting lib API: verify_route_maps()") return True @@ -1977,47 +2019,60 @@ def verify_bgp_community(tgen, addr_type, router, network, input_dict=None): rnode = tgen.routers()[router] - logger.debug("Verifying BGP community attributes on dut %s: for %s " - "network %s", router, addr_type, network) + logger.debug( + "Verifying BGP community attributes on dut %s: for %s " "network %s", + router, + addr_type, + network, + ) for net in network: cmd = "show bgp {} {} json".format(addr_type, net) show_bgp_json = rnode.vtysh_cmd(cmd, isjson=True) logger.info(show_bgp_json) if "paths" not in show_bgp_json: - return "Prefix {} not found in BGP table of router: {}". \ - format(net, router) + return "Prefix {} not found in BGP table of router: {}".format(net, router) as_paths = show_bgp_json["paths"] found = False for i in range(len(as_paths)): - if "largeCommunity" in show_bgp_json["paths"][i] or \ - "community" in show_bgp_json["paths"][i]: + if ( + "largeCommunity" in show_bgp_json["paths"][i] + or "community" in show_bgp_json["paths"][i] + ): found = True - logger.info("Large Community attribute is found for route:" - " %s in router: %s", net, router) + logger.info( + "Large Community attribute is found for route:" " %s in router: %s", + net, + router, + ) if input_dict is not None: for criteria, comm_val in input_dict.items(): - show_val = show_bgp_json["paths"][i][criteria][ - "string"] + show_val = show_bgp_json["paths"][i][criteria]["string"] if comm_val == show_val: - logger.info("Verifying BGP %s for prefix: %s" - " in router: %s, found expected" - " value: %s", criteria, net, router, - comm_val) + logger.info( + "Verifying BGP %s for prefix: %s" + " in router: %s, found expected" + " value: %s", + criteria, + net, + router, + comm_val, + ) else: - errormsg = "Failed: Verifying BGP attribute" \ - " {} for route: {} in router: {}" \ - ", expected value: {} but found" \ - ": {}".format( - criteria, net, router, comm_val, - show_val) + errormsg = ( + "Failed: Verifying BGP attribute" + " {} for route: {} in router: {}" + ", expected value: {} but found" + ": {}".format(criteria, net, router, comm_val, show_val) + ) return errormsg if not found: errormsg = ( "Large Community attribute is not found for route: " - "{} in router: {} ".format(net, router)) + "{} in router: {} ".format(net, router) + ) return errormsg logger.debug("Exiting lib API: verify_bgp_community()") @@ -2057,25 +2112,24 @@ def verify_create_community_list(tgen, input_dict): rnode = tgen.routers()[router] - logger.info("Verifying large-community is created for dut %s:", - router) + logger.info("Verifying large-community is created for dut %s:", router) for comm_data in input_dict[router]["bgp_community_lists"]: comm_name = comm_data["name"] comm_type = comm_data["community_type"] - show_bgp_community = \ - run_frr_cmd(rnode, - "show bgp large-community-list {} detail". - format(comm_name)) + show_bgp_community = run_frr_cmd( + rnode, "show bgp large-community-list {} detail".format(comm_name) + ) # Verify community list and type - if comm_name in show_bgp_community and comm_type in \ - show_bgp_community: - logger.info("BGP %s large-community-list %s is" - " created", comm_type, comm_name) + if comm_name in show_bgp_community and comm_type in show_bgp_community: + logger.info( + "BGP %s large-community-list %s is" " created", comm_type, comm_name + ) else: - errormsg = "BGP {} large-community-list {} is not" \ - " created".format(comm_type, comm_name) + errormsg = "BGP {} large-community-list {} is not" " created".format( + comm_type, comm_name + ) return errormsg logger.debug("Exiting lib API: verify_create_community_list()") diff --git a/tests/topotests/lib/test/test_json.py b/tests/topotests/lib/test/test_json.py index 3927ba095d..ba118d607a 100755 --- a/tests/topotests/lib/test/test_json.py +++ b/tests/topotests/lib/test/test_json.py @@ -32,36 +32,37 @@ import pytest # Save the Current Working Directory to find lib files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../../')) +sys.path.append(os.path.join(CWD, "../../")) # pylint: disable=C0413 from lib.topotest import json_cmp + def test_json_intersect_true(): "Test simple correct JSON intersections" dcomplete = { - 'i1': 'item1', - 'i2': 'item2', - 'i3': 'item3', - 'i100': 'item4', + "i1": "item1", + "i2": "item2", + "i3": "item3", + "i100": "item4", } dsub1 = { - 'i1': 'item1', - 'i3': 'item3', + "i1": "item1", + "i3": "item3", } dsub2 = { - 'i1': 'item1', - 'i2': 'item2', + "i1": "item1", + "i2": "item2", } dsub3 = { - 'i100': 'item4', - 'i2': 'item2', + "i100": "item4", + "i2": "item2", } dsub4 = { - 'i50': None, - 'i100': 'item4', + "i50": None, + "i100": "item4", } assert json_cmp(dcomplete, dsub1) is None @@ -69,99 +70,66 @@ def test_json_intersect_true(): assert json_cmp(dcomplete, dsub3) is None assert json_cmp(dcomplete, dsub4) is None + def test_json_intersect_false(): "Test simple incorrect JSON intersections" dcomplete = { - 'i1': 'item1', - 'i2': 'item2', - 'i3': 'item3', - 'i100': 'item4', + "i1": "item1", + "i2": "item2", + "i3": "item3", + "i100": "item4", } # Incorrect value for 'i1' dsub1 = { - 'i1': 'item3', - 'i3': 'item3', + "i1": "item3", + "i3": "item3", } # Non-existing key 'i5' dsub2 = { - 'i1': 'item1', - 'i5': 'item2', + "i1": "item1", + "i5": "item2", } # Key should not exist dsub3 = { - 'i100': None, + "i100": None, } assert json_cmp(dcomplete, dsub1) is not None assert json_cmp(dcomplete, dsub2) is not None assert json_cmp(dcomplete, dsub3) is not None + def test_json_intersect_multilevel_true(): "Test multi level correct JSON intersections" dcomplete = { - 'i1': 'item1', - 'i2': 'item2', - 'i3': { - 'i100': 'item100', + "i1": "item1", + "i2": "item2", + "i3": {"i100": "item100",}, + "i4": { + "i41": {"i411": "item411",}, + "i42": {"i421": "item421", "i422": "item422",}, }, - 'i4': { - 'i41': { - 'i411': 'item411', - }, - 'i42': { - 'i421': 'item421', - 'i422': 'item422', - } - } } dsub1 = { - 'i1': 'item1', - 'i3': { - 'i100': 'item100', - }, - 'i10': None, + "i1": "item1", + "i3": {"i100": "item100",}, + "i10": None, } dsub2 = { - 'i1': 'item1', - 'i2': 'item2', - 'i3': {}, + "i1": "item1", + "i2": "item2", + "i3": {}, } dsub3 = { - 'i2': 'item2', - 'i4': { - 'i41': { - 'i411': 'item411', - }, - 'i42': { - 'i422': 'item422', - 'i450': None, - } - } - } - dsub4 = { - 'i2': 'item2', - 'i4': { - 'i41': {}, - 'i42': { - 'i450': None, - } - } - } - dsub5 = { - 'i2': 'item2', - 'i3': { - 'i100': 'item100', - }, - 'i4': { - 'i42': { - 'i450': None, - } - } + "i2": "item2", + "i4": {"i41": {"i411": "item411",}, "i42": {"i422": "item422", "i450": None,}}, } + dsub4 = {"i2": "item2", "i4": {"i41": {}, "i42": {"i450": None,}}} + dsub5 = {"i2": "item2", "i3": {"i100": "item100",}, "i4": {"i42": {"i450": None,}}} assert json_cmp(dcomplete, dsub1) is None assert json_cmp(dcomplete, dsub2) is None @@ -169,78 +137,43 @@ def test_json_intersect_multilevel_true(): assert json_cmp(dcomplete, dsub4) is None assert json_cmp(dcomplete, dsub5) is None + def test_json_intersect_multilevel_false(): "Test multi level incorrect JSON intersections" dcomplete = { - 'i1': 'item1', - 'i2': 'item2', - 'i3': { - 'i100': 'item100', + "i1": "item1", + "i2": "item2", + "i3": {"i100": "item100",}, + "i4": { + "i41": {"i411": "item411",}, + "i42": {"i421": "item421", "i422": "item422",}, }, - 'i4': { - 'i41': { - 'i411': 'item411', - }, - 'i42': { - 'i421': 'item421', - 'i422': 'item422', - } - } } # Incorrect sub-level value dsub1 = { - 'i1': 'item1', - 'i3': { - 'i100': 'item00', - }, - 'i10': None, + "i1": "item1", + "i3": {"i100": "item00",}, + "i10": None, } # Inexistent sub-level dsub2 = { - 'i1': 'item1', - 'i2': 'item2', - 'i3': None, + "i1": "item1", + "i2": "item2", + "i3": None, } # Inexistent sub-level value dsub3 = { - 'i1': 'item1', - 'i3': { - 'i100': None, - }, + "i1": "item1", + "i3": {"i100": None,}, } # Inexistent sub-sub-level value - dsub4 = { - 'i4': { - 'i41': { - 'i412': 'item412', - }, - 'i42': { - 'i421': 'item421', - } - } - } + dsub4 = {"i4": {"i41": {"i412": "item412",}, "i42": {"i421": "item421",}}} # Invalid sub-sub-level value - dsub5 = { - 'i4': { - 'i41': { - 'i411': 'item411', - }, - 'i42': { - 'i421': 'item420000', - } - } - } + dsub5 = {"i4": {"i41": {"i411": "item411",}, "i42": {"i421": "item420000",}}} # sub-sub-level should be value - dsub6 = { - 'i4': { - 'i41': { - 'i411': 'item411', - }, - 'i42': 'foobar', - } - } + dsub6 = {"i4": {"i41": {"i411": "item411",}, "i42": "foobar",}} assert json_cmp(dcomplete, dsub1) is not None assert json_cmp(dcomplete, dsub2) is not None @@ -249,80 +182,54 @@ def test_json_intersect_multilevel_false(): assert json_cmp(dcomplete, dsub5) is not None assert json_cmp(dcomplete, dsub6) is not None + def test_json_with_list_sucess(): "Test successful json comparisons that have lists." dcomplete = { - 'list': [ - { - 'i1': 'item 1', - 'i2': 'item 2', - }, - { - 'i10': 'item 10', - }, - ], - 'i100': 'item 100', + "list": [{"i1": "item 1", "i2": "item 2",}, {"i10": "item 10",},], + "i100": "item 100", } # Test list type dsub1 = { - 'list': [], + "list": [], } # Test list correct list items dsub2 = { - 'list': [ - { - 'i1': 'item 1', - }, - ], - 'i100': 'item 100', + "list": [{"i1": "item 1",},], + "i100": "item 100", } # Test list correct list size dsub3 = { - 'list': [ - {}, {}, - ], + "list": [{}, {},], } assert json_cmp(dcomplete, dsub1) is None assert json_cmp(dcomplete, dsub2) is None assert json_cmp(dcomplete, dsub3) is None + def test_json_with_list_failure(): "Test failed json comparisons that have lists." dcomplete = { - 'list': [ - { - 'i1': 'item 1', - 'i2': 'item 2', - }, - { - 'i10': 'item 10', - }, - ], - 'i100': 'item 100', + "list": [{"i1": "item 1", "i2": "item 2",}, {"i10": "item 10",},], + "i100": "item 100", } # Test list type dsub1 = { - 'list': {}, + "list": {}, } # Test list incorrect list items dsub2 = { - 'list': [ - { - 'i1': 'item 2', - }, - ], - 'i100': 'item 100', + "list": [{"i1": "item 2",},], + "i100": "item 100", } # Test list correct list size dsub3 = { - 'list': [ - {}, {}, {}, - ], + "list": [{}, {}, {},], } assert json_cmp(dcomplete, dsub1) is not None @@ -334,53 +241,20 @@ def test_json_list_start_success(): "Test JSON encoded data that starts with a list that should succeed." dcomplete = [ - { - "id": 100, - "value": "abc", - }, - { - "id": 200, - "value": "abcd", - }, - { - "id": 300, - "value": "abcde", - }, + {"id": 100, "value": "abc",}, + {"id": 200, "value": "abcd",}, + {"id": 300, "value": "abcde",}, ] - dsub1 = [ - { - "id": 100, - "value": "abc", - } - ] + dsub1 = [{"id": 100, "value": "abc",}] - dsub2 = [ - { - "id": 100, - "value": "abc", - }, - { - "id": 200, - "value": "abcd", - } - ] + dsub2 = [{"id": 100, "value": "abc",}, {"id": 200, "value": "abcd",}] - dsub3 = [ - { - "id": 300, - "value": "abcde", - } - ] + dsub3 = [{"id": 300, "value": "abcde",}] - dsub4 = [ - ] + dsub4 = [] - dsub5 = [ - { - "id": 100, - } - ] + dsub5 = [{"id": 100,}] assert json_cmp(dcomplete, dsub1) is None assert json_cmp(dcomplete, dsub2) is None @@ -393,58 +267,18 @@ def test_json_list_start_failure(): "Test JSON encoded data that starts with a list that should fail." dcomplete = [ - { - "id": 100, - "value": "abc" - }, - { - "id": 200, - "value": "abcd" - }, - { - "id": 300, - "value": "abcde" - }, + {"id": 100, "value": "abc"}, + {"id": 200, "value": "abcd"}, + {"id": 300, "value": "abcde"}, ] - dsub1 = [ - { - "id": 100, - "value": "abcd", - } - ] + dsub1 = [{"id": 100, "value": "abcd",}] - dsub2 = [ - { - "id": 100, - "value": "abc", - }, - { - "id": 200, - "value": "abc", - } - ] + dsub2 = [{"id": 100, "value": "abc",}, {"id": 200, "value": "abc",}] - dsub3 = [ - { - "id": 100, - "value": "abc", - }, - { - "id": 350, - "value": "abcde", - } - ] + dsub3 = [{"id": 100, "value": "abc",}, {"id": 350, "value": "abcde",}] - dsub4 = [ - { - "value": "abcx", - }, - { - "id": 300, - "value": "abcde", - } - ] + dsub4 = [{"value": "abcx",}, {"id": 300, "value": "abcde",}] assert json_cmp(dcomplete, dsub1) is not None assert json_cmp(dcomplete, dsub2) is not None @@ -452,5 +286,5 @@ def test_json_list_start_failure(): assert json_cmp(dcomplete, dsub4) is not None -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(pytest.main()) diff --git a/tests/topotests/lib/test/test_run_and_expect.py b/tests/topotests/lib/test/test_run_and_expect.py index 3c22c20e7b..d65d5baf37 100755 --- a/tests/topotests/lib/test/test_run_and_expect.py +++ b/tests/topotests/lib/test/test_run_and_expect.py @@ -32,11 +32,12 @@ import pytest # Save the Current Working Directory to find lib files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../../')) +sys.path.append(os.path.join(CWD, "../../")) # pylint: disable=C0413 from lib.topotest import run_and_expect_type + def test_run_and_expect_type(): "Test basic `run_and_expect_type` functionality." @@ -45,12 +46,16 @@ def test_run_and_expect_type(): return True # Test value success. - success, value = run_and_expect_type(return_true, bool, count=1, wait=0, avalue=True) + success, value = run_and_expect_type( + return_true, bool, count=1, wait=0, avalue=True + ) assert success is True assert value is True # Test value failure. - success, value = run_and_expect_type(return_true, bool, count=1, wait=0, avalue=False) + success, value = run_and_expect_type( + return_true, bool, count=1, wait=0, avalue=False + ) assert success is False assert value is True @@ -70,5 +75,5 @@ def test_run_and_expect_type(): assert value is True -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(pytest.main()) diff --git a/tests/topotests/lib/test/test_version.py b/tests/topotests/lib/test/test_version.py index 9204ac2084..7c2df00337 100755 --- a/tests/topotests/lib/test/test_version.py +++ b/tests/topotests/lib/test/test_version.py @@ -32,21 +32,22 @@ import pytest # Save the Current Working Directory to find lib files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../../')) +sys.path.append(os.path.join(CWD, "../../")) # pylint: disable=C0413 from lib.topotest import version_cmp + def test_valid_versions(): "Test valid version compare results" - curver = '3.0' - samever = '3' - oldver = '2.0' - newver = '3.0.1' - newerver = '3.0.11' - vercustom = '3.0-dev' - verysmallinc = '3.0.0.0.0.0.0.1' + curver = "3.0" + samever = "3" + oldver = "2.0" + newver = "3.0.1" + newerver = "3.0.11" + vercustom = "3.0-dev" + verysmallinc = "3.0.0.0.0.0.0.1" assert version_cmp(curver, oldver) == 1 assert version_cmp(curver, newver) == -1 @@ -64,14 +65,15 @@ def test_valid_versions(): assert version_cmp(verysmallinc, verysmallinc) == 0 assert version_cmp(vercustom, verysmallinc) == -1 + def test_invalid_versions(): "Test invalid version strings" - curver = '3.0' - badver1 = '.1' - badver2 = '-1.0' - badver3 = '.' - badver4 = '3.-0.3' + curver = "3.0" + badver1 = ".1" + badver2 = "-1.0" + badver3 = "." + badver4 = "3.-0.3" with pytest.raises(ValueError): assert version_cmp(curver, badver1) @@ -79,9 +81,10 @@ def test_invalid_versions(): assert version_cmp(curver, badver3) assert version_cmp(curver, badver4) + def test_regression_1(): """ Test regression on the following type of comparison: '3.0.2' > '3' Expected result is 1. """ - assert version_cmp('3.0.2', '3') == 1 + assert version_cmp("3.0.2", "3") == 1 diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py index 6859f5a076..6a6bbc7c78 100644 --- a/tests/topotests/lib/topogen.py +++ b/tests/topotests/lib/topogen.py @@ -70,6 +70,7 @@ CWD = os.path.dirname(os.path.realpath(__file__)) # all test functions without declaring a test local variable. global_tgen = None + def get_topogen(topo=None): """ Helper function to retrieve Topogen. Must be called with `topo` when called @@ -79,31 +80,34 @@ def get_topogen(topo=None): global_tgen.topo = topo return global_tgen + def set_topogen(tgen): "Helper function to set Topogen" # pylint: disable=W0603 global global_tgen global_tgen = tgen + # # Main class: topology builder # # Topogen configuration defaults tgen_defaults = { - 'verbosity': 'info', - 'frrdir': '/usr/lib/frr', - 'quaggadir': '/usr/lib/quagga', - 'routertype': 'frr', - 'memleak_path': None, + "verbosity": "info", + "frrdir": "/usr/lib/frr", + "quaggadir": "/usr/lib/quagga", + "routertype": "frr", + "memleak_path": None, } + class Topogen(object): "A topology test builder helper." - CONFIG_SECTION = 'topogen' + CONFIG_SECTION = "topogen" - def __init__(self, cls, modname='unnamed'): + def __init__(self, cls, modname="unnamed"): """ Topogen initialization function, takes the following arguments: * `cls`: the topology class that is child of mininet.topo @@ -117,16 +121,16 @@ class Topogen(object): self.switchn = 1 self.modname = modname self.errorsd = {} - self.errors = '' + self.errors = "" self.peern = 1 self._init_topo(cls) - logger.info('loading topology: {}'.format(self.modname)) + logger.info("loading topology: {}".format(self.modname)) @staticmethod def _mininet_reset(): "Reset the mininet environment" # Clean up the mininet environment - os.system('mn -c > /dev/null 2>&1') + os.system("mn -c > /dev/null 2>&1") def _init_topo(self, cls): """ @@ -138,10 +142,10 @@ class Topogen(object): # Test for MPLS Kernel modules available self.hasmpls = False - if not topotest.module_present('mpls-router'): - logger.info('MPLS tests will not run (missing mpls-router kernel module)') - elif not topotest.module_present('mpls-iptunnel'): - logger.info('MPLS tests will not run (missing mpls-iptunnel kernel module)') + if not topotest.module_present("mpls-router"): + logger.info("MPLS tests will not run (missing mpls-router kernel module)") + elif not topotest.module_present("mpls-iptunnel"): + logger.info("MPLS tests will not run (missing mpls-iptunnel kernel module)") else: self.hasmpls = True # Load the default topology configurations @@ -160,7 +164,7 @@ class Topogen(object): topotests. """ self.config = configparser.ConfigParser(tgen_defaults) - pytestini_path = os.path.join(CWD, '../pytest.ini') + pytestini_path = os.path.join(CWD, "../pytest.ini") self.config.read(pytestini_path) def add_router(self, name=None, cls=topotest.Router, **params): @@ -173,15 +177,15 @@ class Topogen(object): Returns a TopoRouter. """ if name is None: - name = 'r{}'.format(self.routern) + name = "r{}".format(self.routern) if name in self.gears: - raise KeyError('router already exists') + raise KeyError("router already exists") - params['frrdir'] = self.config.get(self.CONFIG_SECTION, 'frrdir') - params['quaggadir'] = self.config.get(self.CONFIG_SECTION, 'quaggadir') - params['memleak_path'] = self.config.get(self.CONFIG_SECTION, 'memleak_path') - if not params.has_key('routertype'): - params['routertype'] = self.config.get(self.CONFIG_SECTION, 'routertype') + params["frrdir"] = self.config.get(self.CONFIG_SECTION, "frrdir") + params["quaggadir"] = self.config.get(self.CONFIG_SECTION, "quaggadir") + params["memleak_path"] = self.config.get(self.CONFIG_SECTION, "memleak_path") + if not params.has_key("routertype"): + params["routertype"] = self.config.get(self.CONFIG_SECTION, "routertype") self.gears[name] = TopoRouter(self, cls, name, **params) self.routern += 1 @@ -195,9 +199,9 @@ class Topogen(object): Returns the switch name and number. """ if name is None: - name = 's{}'.format(self.switchn) + name = "s{}".format(self.switchn) if name in self.gears: - raise KeyError('switch already exists') + raise KeyError("switch already exists") self.gears[name] = TopoSwitch(self, cls, name) self.switchn += 1 @@ -211,9 +215,9 @@ class Topogen(object): * `defaultRoute`: the peer default route (e.g. 'via 1.2.3.1') """ if name is None: - name = 'peer{}'.format(self.peern) + name = "peer{}".format(self.peern) if name in self.gears: - raise KeyError('exabgp peer already exists') + raise KeyError("exabgp peer already exists") self.gears[name] = TopoExaBGP(self, name, ip=ip, defaultRoute=defaultRoute) self.peern += 1 @@ -228,9 +232,9 @@ class Topogen(object): * TopoSwitch """ if not isinstance(node1, TopoGear): - raise ValueError('invalid node1 type') + raise ValueError("invalid node1 type") if not isinstance(node2, TopoGear): - raise ValueError('invalid node2 type') + raise ValueError("invalid node2 type") if ifname1 is None: ifname1 = node1.new_link() @@ -239,8 +243,7 @@ class Topogen(object): node1.register_link(ifname1, node2, ifname2) node2.register_link(ifname2, node1, ifname1) - self.topo.addLink(node1.name, node2.name, - intfName1=ifname1, intfName2=ifname2) + self.topo.addLink(node1.name, node2.name, intfName1=ifname1, intfName2=ifname2) def get_gears(self, geartype): """ @@ -262,8 +265,11 @@ class Topogen(object): # Do stuff ``` """ - return dict((name, gear) for name, gear in self.gears.iteritems() - if isinstance(gear, geartype)) + return dict( + (name, gear) + for name, gear in self.gears.iteritems() + if isinstance(gear, geartype) + ) def routers(self): """ @@ -291,16 +297,16 @@ class Topogen(object): """ # If log_level is not specified use the configuration. if log_level is None: - log_level = self.config.get(self.CONFIG_SECTION, 'verbosity') + log_level = self.config.get(self.CONFIG_SECTION, "verbosity") # Set python logger level logger_config.set_log_level(log_level) # Run mininet - if log_level == 'debug': + if log_level == "debug": setLogLevel(log_level) - logger.info('starting topology: {}'.format(self.modname)) + logger.info("starting topology: {}".format(self.modname)) self.net.start() def start_router(self, router=None): @@ -326,7 +332,7 @@ class Topogen(object): first is a simple kill with no sleep, the second will sleep if not killed and try with a different signal. """ - logger.info('stopping topology: {}'.format(self.modname)) + logger.info("stopping topology: {}".format(self.modname)) errors = "" for gear in self.gears.values(): gear.stop(False, False) @@ -344,7 +350,8 @@ class Topogen(object): """ if not sys.stdin.isatty(): raise EnvironmentError( - 'you must run pytest with \'-s\' in order to use mininet CLI') + "you must run pytest with '-s' in order to use mininet CLI" + ) CLI(self.net) @@ -354,8 +361,9 @@ class Topogen(object): if self.routers_have_failure(): return False - memleak_file = (os.environ.get('TOPOTESTS_CHECK_MEMLEAK') or - self.config.get(self.CONFIG_SECTION, 'memleak_path')) + memleak_file = os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.config.get( + self.CONFIG_SECTION, "memleak_path" + ) if memleak_file is None: return False return True @@ -382,7 +390,7 @@ class Topogen(object): code = len(self.errorsd) self.errorsd[code] = message - self.errors += '\n{}: {}'.format(code, message) + self.errors += "\n{}: {}".format(code, message) def has_errors(self): "Returns whether errors exist or not." @@ -393,23 +401,25 @@ class Topogen(object): if self.has_errors(): return True - errors = '' + errors = "" router_list = self.routers().values() for router in router_list: result = router.check_router_running() - if result != '': - errors += result + '\n' + if result != "": + errors += result + "\n" - if errors != '': - self.set_error(errors, 'router_error') + if errors != "": + self.set_error(errors, "router_error") assert False, errors return True return False + # # Topology gears (equipment) # + class TopoGear(object): "Abstract class for type checking" @@ -421,11 +431,11 @@ class TopoGear(object): self.linkn = 0 def __str__(self): - links = '' + links = "" for myif, dest in self.links.iteritems(): _, destif = dest - if links != '': - links += ',' + if links != "": + links += "," links += '"{}"<->"{}"'.format(myif, destif) return 'TopoGear<name="{}",links=[{}]>'.format(self.name, links) @@ -462,20 +472,22 @@ class TopoGear(object): enabled: whether we should enable or disable the interface """ if myif not in self.links.keys(): - raise KeyError('interface doesn\'t exists') + raise KeyError("interface doesn't exists") if enabled is True: - operation = 'up' + operation = "up" else: - operation = 'down' + operation = "down" - logger.info('setting node "{}" link "{}" to state "{}"'.format( - self.name, myif, operation - )) - extract='' + logger.info( + 'setting node "{}" link "{}" to state "{}"'.format( + self.name, myif, operation + ) + ) + extract = "" if netns is not None: - extract = 'ip netns exec {} '.format(netns) - return self.run('{}ip link set dev {} {}'.format(extract, myif, operation)) + extract = "ip netns exec {} ".format(netns) + return self.run("{}ip link set dev {} {}".format(extract, myif, operation)) def peer_link_enable(self, myif, enabled=True, netns=None): """ @@ -487,7 +499,7 @@ class TopoGear(object): peer disables their interface our interface status changes to no link. """ if myif not in self.links.keys(): - raise KeyError('interface doesn\'t exists') + raise KeyError("interface doesn't exists") node, nodeif = self.links[myif] node.link_enable(nodeif, enabled, netns) @@ -498,7 +510,7 @@ class TopoGear(object): NOTE: This function should only be called by Topogen. """ - ifname = '{}-eth{}'.format(self.name, self.linkn) + ifname = "{}-eth{}".format(self.name, self.linkn) self.linkn += 1 return ifname @@ -509,10 +521,11 @@ class TopoGear(object): NOTE: This function should only be called by Topogen. """ if myif in self.links.keys(): - raise KeyError('interface already exists') + raise KeyError("interface already exists") self.links[myif] = (node, nodeif) + class TopoRouter(TopoGear): """ Router abstraction. @@ -520,11 +533,11 @@ class TopoRouter(TopoGear): # The default required directories by Quagga/FRR PRIVATE_DIRS = [ - '/etc/frr', - '/etc/quagga', - '/var/run/frr', - '/var/run/quagga', - '/var/log' + "/etc/frr", + "/etc/quagga", + "/var/run/frr", + "/var/run/quagga", + "/var/log", ] # Router Daemon enumeration definition. @@ -543,20 +556,20 @@ class TopoRouter(TopoGear): RD_BFD = 13 RD_SHARP = 14 RD = { - RD_ZEBRA: 'zebra', - RD_RIP: 'ripd', - RD_RIPNG: 'ripngd', - RD_OSPF: 'ospfd', - RD_OSPF6: 'ospf6d', - RD_ISIS: 'isisd', - RD_BGP: 'bgpd', - RD_PIM: 'pimd', - RD_LDP: 'ldpd', - RD_EIGRP: 'eigrpd', - RD_NHRP: 'nhrpd', - RD_STATIC: 'staticd', - RD_BFD: 'bfdd', - RD_SHARP: 'sharpd', + RD_ZEBRA: "zebra", + RD_RIP: "ripd", + RD_RIPNG: "ripngd", + RD_OSPF: "ospfd", + RD_OSPF6: "ospf6d", + RD_ISIS: "isisd", + RD_BGP: "bgpd", + RD_PIM: "pimd", + RD_LDP: "ldpd", + RD_EIGRP: "eigrpd", + RD_NHRP: "nhrpd", + RD_STATIC: "staticd", + RD_BFD: "bfdd", + RD_SHARP: "sharpd", } def __init__(self, tgen, cls, name, **params): @@ -574,34 +587,34 @@ class TopoRouter(TopoGear): self.name = name self.cls = cls self.options = {} - self.routertype = params.get('routertype', 'frr') - if not params.has_key('privateDirs'): - params['privateDirs'] = self.PRIVATE_DIRS + self.routertype = params.get("routertype", "frr") + if not params.has_key("privateDirs"): + params["privateDirs"] = self.PRIVATE_DIRS - self.options['memleak_path'] = params.get('memleak_path', None) + self.options["memleak_path"] = params.get("memleak_path", None) # Create new log directory - self.logdir = '/tmp/topotests/{}'.format(self.tgen.modname) + self.logdir = "/tmp/topotests/{}".format(self.tgen.modname) # Clean up before starting new log files: avoids removing just created # log files. self._prepare_tmpfiles() # Propagate the router log directory - params['logdir'] = self.logdir + params["logdir"] = self.logdir - #setup the per node directory - dir = '{}/{}'.format(self.logdir, self.name) - os.system('mkdir -p ' + dir) - os.system('chmod -R go+rw /tmp/topotests') + # setup the per node directory + dir = "{}/{}".format(self.logdir, self.name) + os.system("mkdir -p " + dir) + os.system("chmod -R go+rw /tmp/topotests") # Open router log file - logfile = '{0}/{1}.log'.format(self.logdir, name) + logfile = "{0}/{1}.log".format(self.logdir, name) self.logger = logger_config.get_logger(name=name, target=logfile) self.tgen.topo.addNode(self.name, cls=self.cls, **params) def __str__(self): gear = super(TopoRouter, self).__str__() - gear += ' TopoRouter<>' + gear += " TopoRouter<>" return gear def _prepare_tmpfiles(self): @@ -622,9 +635,9 @@ class TopoRouter(TopoGear): os.chmod(self.logdir, 0o1777) # Try to find relevant old logfiles in /tmp and delete them - map(os.remove, glob.glob('{}/{}/*.log'.format(self.logdir, self.name))) + map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name))) # Remove old core files - map(os.remove, glob.glob('{}/{}/*.dmp'.format(self.logdir, self.name))) + map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name))) def check_capability(self, daemon, param): """ @@ -651,7 +664,7 @@ class TopoRouter(TopoGear): """ Run a series of checks and returns a status string. """ - self.logger.info('checking if daemons are running') + self.logger.info("checking if daemons are running") return self.tgen.net[self.name].checkRouterRunning() def start(self): @@ -663,7 +676,7 @@ class TopoRouter(TopoGear): * Start daemons (e.g. FRR/Quagga) * Configure daemon logging files """ - self.logger.debug('starting') + self.logger.debug("starting") nrouter = self.tgen.net[self.name] result = nrouter.startRouter(self.tgen) @@ -672,15 +685,17 @@ class TopoRouter(TopoGear): for daemon, enabled in nrouter.daemons.iteritems(): if enabled == 0: continue - self.vtysh_cmd('configure terminal\nlog commands\nlog file {}.log'.format( - daemon), daemon=daemon) + self.vtysh_cmd( + "configure terminal\nlog commands\nlog file {}.log".format(daemon), + daemon=daemon, + ) - if result != '': + if result != "": self.tgen.set_error(result) else: # Enable MPLS processing on all interfaces. for interface in self.links.keys(): - set_sysctl(nrouter, 'net.mpls.conf.{}.input'.format(interface), 1) + set_sysctl(nrouter, "net.mpls.conf.{}.input".format(interface), 1) return result @@ -689,7 +704,7 @@ class TopoRouter(TopoGear): Stop router: * Kill daemons """ - self.logger.debug('stopping') + self.logger.debug("stopping") return self.tgen.net[self.name].stopRouter(wait, assertOnError) def vtysh_cmd(self, command, isjson=False, daemon=None): @@ -701,25 +716,26 @@ class TopoRouter(TopoGear): return output for each command. See vtysh_multicmd() for more details. """ # Detect multi line commands - if command.find('\n') != -1: + if command.find("\n") != -1: return self.vtysh_multicmd(command, daemon=daemon) - dparam = '' + dparam = "" if daemon is not None: - dparam += '-d {}'.format(daemon) + dparam += "-d {}".format(daemon) vtysh_command = 'vtysh {} -c "{}" 2>/dev/null'.format(dparam, command) output = self.run(vtysh_command) - self.logger.info('\nvtysh command => {}\nvtysh output <= {}'.format( - command, output)) + self.logger.info( + "\nvtysh command => {}\nvtysh output <= {}".format(command, output) + ) if isjson is False: return output try: return json.loads(output) except ValueError: - logger.warning('vtysh_cmd: failed to convert json output') + logger.warning("vtysh_cmd: failed to convert json output") return {} def vtysh_multicmd(self, commands, pretty_output=True, daemon=None): @@ -734,21 +750,22 @@ class TopoRouter(TopoGear): # Prepare the temporary file that will hold the commands fname = topotest.get_file(commands) - dparam = '' + dparam = "" if daemon is not None: - dparam += '-d {}'.format(daemon) + dparam += "-d {}".format(daemon) # Run the commands and delete the temporary file if pretty_output: - vtysh_command = 'vtysh {} < {}'.format(dparam, fname) + vtysh_command = "vtysh {} < {}".format(dparam, fname) else: - vtysh_command = 'vtysh {} -f {}'.format(dparam, fname) + vtysh_command = "vtysh {} -f {}".format(dparam, fname) res = self.run(vtysh_command) os.unlink(fname) - self.logger.info('\nvtysh command => "{}"\nvtysh output <= "{}"'.format( - vtysh_command, res)) + self.logger.info( + '\nvtysh command => "{}"\nvtysh output <= "{}"'.format(vtysh_command, res) + ) return res @@ -760,27 +777,29 @@ class TopoRouter(TopoGear): NOTE: to run this you must have the environment variable TOPOTESTS_CHECK_MEMLEAK set or memleak_path configured in `pytest.ini`. """ - memleak_file = os.environ.get('TOPOTESTS_CHECK_MEMLEAK') or self.options['memleak_path'] + memleak_file = ( + os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.options["memleak_path"] + ) if memleak_file is None: return self.stop() - self.logger.info('running memory leak report') + self.logger.info("running memory leak report") self.tgen.net[self.name].report_memory_leaks(memleak_file, testname) def version_info(self): "Get equipment information from 'show version'." - output = self.vtysh_cmd('show version').split('\n')[0] - columns = topotest.normalize_text(output).split(' ') + output = self.vtysh_cmd("show version").split("\n")[0] + columns = topotest.normalize_text(output).split(" ") try: return { - 'type': columns[0], - 'version': columns[1], + "type": columns[0], + "version": columns[1], } except IndexError: return { - 'type': None, - 'version': None, + "type": None, + "version": None, } def has_version(self, cmpop, version): @@ -802,19 +821,21 @@ class TopoRouter(TopoGear): Compares router type with `rtype`. Returns `True` if the type matches, otherwise `false`. """ - curtype = self.version_info()['type'] + curtype = self.version_info()["type"] return rtype == curtype def has_mpls(self): nrouter = self.tgen.net[self.name] return nrouter.hasmpls + class TopoSwitch(TopoGear): """ Switch abstraction. Has the following properties: * cls: switch class that will be used to instantiate * name: switch name """ + # pylint: disable=too-few-public-methods def __init__(self, tgen, cls, name): @@ -827,9 +848,10 @@ class TopoSwitch(TopoGear): def __str__(self): gear = super(TopoSwitch, self).__str__() - gear += ' TopoSwitch<>' + gear += " TopoSwitch<>" return gear + class TopoHost(TopoGear): "Host abstraction." # pylint: disable=too-few-public-methods @@ -853,18 +875,21 @@ class TopoHost(TopoGear): def __str__(self): gear = super(TopoHost, self).__str__() gear += ' TopoHost<ip="{}",defaultRoute="{}",privateDirs="{}">'.format( - self.options['ip'], self.options['defaultRoute'], - str(self.options['privateDirs'])) + self.options["ip"], + self.options["defaultRoute"], + str(self.options["privateDirs"]), + ) return gear + class TopoExaBGP(TopoHost): "ExaBGP peer abstraction." # pylint: disable=too-few-public-methods PRIVATE_DIRS = [ - '/etc/exabgp', - '/var/run/exabgp', - '/var/log', + "/etc/exabgp", + "/var/run/exabgp", + "/var/log", ] def __init__(self, tgen, name, **params): @@ -878,13 +903,13 @@ class TopoExaBGP(TopoHost): has a privateDirs already defined and contains functions to handle ExaBGP things. """ - params['privateDirs'] = self.PRIVATE_DIRS + params["privateDirs"] = self.PRIVATE_DIRS super(TopoExaBGP, self).__init__(tgen, name, **params) self.tgen.topo.addHost(name, **params) def __str__(self): gear = super(TopoExaBGP, self).__str__() - gear += ' TopoExaBGP<>'.format() + gear += " TopoExaBGP<>".format() return gear def start(self, peer_dir, env_file=None): @@ -895,22 +920,22 @@ class TopoExaBGP(TopoHost): * Make all python files runnable * Run ExaBGP with env file `env_file` and configuration peer*/exabgp.cfg """ - self.run('mkdir /etc/exabgp') - self.run('chmod 755 /etc/exabgp') - self.run('cp {}/* /etc/exabgp/'.format(peer_dir)) + self.run("mkdir /etc/exabgp") + self.run("chmod 755 /etc/exabgp") + self.run("cp {}/* /etc/exabgp/".format(peer_dir)) if env_file is not None: - self.run('cp {} /etc/exabgp/exabgp.env'.format(env_file)) - self.run('chmod 644 /etc/exabgp/*') - self.run('chmod a+x /etc/exabgp/*.py') - self.run('chown -R exabgp:exabgp /etc/exabgp') - output = self.run('exabgp -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg') + self.run("cp {} /etc/exabgp/exabgp.env".format(env_file)) + self.run("chmod 644 /etc/exabgp/*") + self.run("chmod a+x /etc/exabgp/*.py") + self.run("chown -R exabgp:exabgp /etc/exabgp") + output = self.run("exabgp -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg") if output == None or len(output) == 0: - output = '<none>' - logger.info('{} exabgp started, output={}'.format(self.name, output)) + output = "<none>" + logger.info("{} exabgp started, output={}".format(self.name, output)) def stop(self, wait=True, assertOnError=True): "Stop ExaBGP peer and kill the daemon" - self.run('kill `cat /var/run/exabgp/exabgp.pid`') + self.run("kill `cat /var/run/exabgp/exabgp.pid`") return "" @@ -928,160 +953,189 @@ def diagnose_env_linux(): ret = True # Test log path exists before installing handler. - if not os.path.isdir('/tmp'): - logger.warning('could not find /tmp for logs') + if not os.path.isdir("/tmp"): + logger.warning("could not find /tmp for logs") else: - os.system('mkdir /tmp/topotests') + os.system("mkdir /tmp/topotests") # Log diagnostics to file so it can be examined later. - fhandler = logging.FileHandler(filename='/tmp/topotests/diagnostics.txt') + fhandler = logging.FileHandler(filename="/tmp/topotests/diagnostics.txt") fhandler.setLevel(logging.DEBUG) fhandler.setFormatter( - logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s') + logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s") ) logger.addHandler(fhandler) - logger.info('Running environment diagnostics') + logger.info("Running environment diagnostics") # Load configuration config = configparser.ConfigParser(tgen_defaults) - pytestini_path = os.path.join(CWD, '../pytest.ini') + pytestini_path = os.path.join(CWD, "../pytest.ini") config.read(pytestini_path) # Assert that we are running as root if os.getuid() != 0: - logger.error('you must run topotest as root') + logger.error("you must run topotest as root") ret = False # Assert that we have mininet - if os.system('which mn >/dev/null 2>/dev/null') != 0: - logger.error('could not find mininet binary (mininet is not installed)') + if os.system("which mn >/dev/null 2>/dev/null") != 0: + logger.error("could not find mininet binary (mininet is not installed)") ret = False # Assert that we have iproute installed - if os.system('which ip >/dev/null 2>/dev/null') != 0: - logger.error('could not find ip binary (iproute is not installed)') + if os.system("which ip >/dev/null 2>/dev/null") != 0: + logger.error("could not find ip binary (iproute is not installed)") ret = False # Assert that we have gdb installed - if os.system('which gdb >/dev/null 2>/dev/null') != 0: - logger.error('could not find gdb binary (gdb is not installed)') + if os.system("which gdb >/dev/null 2>/dev/null") != 0: + logger.error("could not find gdb binary (gdb is not installed)") ret = False # Assert that FRR utilities exist - frrdir = config.get('topogen', 'frrdir') + frrdir = config.get("topogen", "frrdir") hasfrr = False if not os.path.isdir(frrdir): - logger.error('could not find {} directory'.format(frrdir)) + logger.error("could not find {} directory".format(frrdir)) ret = False else: hasfrr = True try: - pwd.getpwnam('frr')[2] + pwd.getpwnam("frr")[2] except KeyError: logger.warning('could not find "frr" user') try: - grp.getgrnam('frr')[2] + grp.getgrnam("frr")[2] except KeyError: logger.warning('could not find "frr" group') try: - if 'frr' not in grp.getgrnam('frrvty').gr_mem: - logger.error('"frr" user and group exist, but user is not under "frrvty"') + if "frr" not in grp.getgrnam("frrvty").gr_mem: + logger.error( + '"frr" user and group exist, but user is not under "frrvty"' + ) except KeyError: logger.warning('could not find "frrvty" group') - for fname in ['zebra', 'ospfd', 'ospf6d', 'bgpd', 'ripd', 'ripngd', - 'isisd', 'pimd', 'ldpd']: + for fname in [ + "zebra", + "ospfd", + "ospf6d", + "bgpd", + "ripd", + "ripngd", + "isisd", + "pimd", + "ldpd", + ]: path = os.path.join(frrdir, fname) if not os.path.isfile(path): # LDPd is an exception - if fname == 'ldpd': - logger.info('could not find {} in {}'.format(fname, frrdir) + - '(LDPd tests will not run)') + if fname == "ldpd": + logger.info( + "could not find {} in {}".format(fname, frrdir) + + "(LDPd tests will not run)" + ) continue - logger.warning('could not find {} in {}'.format(fname, frrdir)) + logger.warning("could not find {} in {}".format(fname, frrdir)) ret = False else: - if fname != 'zebra': + if fname != "zebra": continue - os.system( - '{} -v 2>&1 >/tmp/topotests/frr_zebra.txt'.format(path) - ) + os.system("{} -v 2>&1 >/tmp/topotests/frr_zebra.txt".format(path)) # Assert that Quagga utilities exist - quaggadir = config.get('topogen', 'quaggadir') + quaggadir = config.get("topogen", "quaggadir") if hasfrr: # if we have frr, don't check for quagga pass elif not os.path.isdir(quaggadir): - logger.info('could not find {} directory (quagga tests will not run)'.format(quaggadir)) + logger.info( + "could not find {} directory (quagga tests will not run)".format(quaggadir) + ) else: ret = True try: - pwd.getpwnam('quagga')[2] + pwd.getpwnam("quagga")[2] except KeyError: logger.info('could not find "quagga" user') try: - grp.getgrnam('quagga')[2] + grp.getgrnam("quagga")[2] except KeyError: logger.info('could not find "quagga" group') try: - if 'quagga' not in grp.getgrnam('quaggavty').gr_mem: - logger.error('"quagga" user and group exist, but user is not under "quaggavty"') + if "quagga" not in grp.getgrnam("quaggavty").gr_mem: + logger.error( + '"quagga" user and group exist, but user is not under "quaggavty"' + ) except KeyError: logger.warning('could not find "quaggavty" group') - for fname in ['zebra', 'ospfd', 'ospf6d', 'bgpd', 'ripd', 'ripngd', - 'isisd', 'pimd']: + for fname in [ + "zebra", + "ospfd", + "ospf6d", + "bgpd", + "ripd", + "ripngd", + "isisd", + "pimd", + ]: path = os.path.join(quaggadir, fname) if not os.path.isfile(path): - logger.warning('could not find {} in {}'.format(fname, quaggadir)) + logger.warning("could not find {} in {}".format(fname, quaggadir)) ret = False else: - if fname != 'zebra': + if fname != "zebra": continue - os.system( - '{} -v 2>&1 >/tmp/topotests/quagga_zebra.txt'.format(path) - ) + os.system("{} -v 2>&1 >/tmp/topotests/quagga_zebra.txt".format(path)) # Test MPLS availability krel = platform.release() - if topotest.version_cmp(krel, '4.5') < 0: - logger.info('LDPd tests will not run (have kernel "{}", but it requires 4.5)'.format(krel)) + if topotest.version_cmp(krel, "4.5") < 0: + logger.info( + 'LDPd tests will not run (have kernel "{}", but it requires 4.5)'.format( + krel + ) + ) # Test for MPLS Kernel modules available - if not topotest.module_present('mpls-router', load=False) != 0: - logger.info('LDPd tests will not run (missing mpls-router kernel module)') - if not topotest.module_present('mpls-iptunnel', load=False) != 0: - logger.info('LDPd tests will not run (missing mpls-iptunnel kernel module)') + if not topotest.module_present("mpls-router", load=False) != 0: + logger.info("LDPd tests will not run (missing mpls-router kernel module)") + if not topotest.module_present("mpls-iptunnel", load=False) != 0: + logger.info("LDPd tests will not run (missing mpls-iptunnel kernel module)") # TODO remove me when we start supporting exabgp >= 4 try: - output = subprocess.check_output(['exabgp', '-v']) - line = output.split('\n')[0] - version = line.split(' ')[2] - if topotest.version_cmp(version, '4') >= 0: - logger.warning('BGP topologies are still using exabgp version 3, expect failures') + output = subprocess.check_output(["exabgp", "-v"]) + line = output.split("\n")[0] + version = line.split(" ")[2] + if topotest.version_cmp(version, "4") >= 0: + logger.warning( + "BGP topologies are still using exabgp version 3, expect failures" + ) # We want to catch all exceptions # pylint: disable=W0702 except: - logger.warning('failed to find exabgp or returned error') + logger.warning("failed to find exabgp or returned error") # After we logged the output to file, remove the handler. logger.removeHandler(fhandler) return ret + def diagnose_env_freebsd(): return True + def diagnose_env(): if sys.platform.startswith("linux"): return diagnose_env_linux() diff --git a/tests/topotests/lib/topojson.py b/tests/topotests/lib/topojson.py index fff5a1e82f..b25317ba7f 100644 --- a/tests/topotests/lib/topojson.py +++ b/tests/topotests/lib/topojson.py @@ -29,13 +29,14 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from lib.common_config import ( - number_to_row, number_to_column, + number_to_row, + number_to_column, load_config_to_router, create_interfaces_cfg, create_static_routes, create_prefix_lists, create_route_maps, - create_bgp_community_lists + create_bgp_community_lists, ) from lib.bgp import create_router_bgp @@ -53,56 +54,69 @@ def build_topo_from_json(tgen, topo): * `topo`: json file data """ - ROUTER_LIST = sorted(topo['routers'].keys(), - key=lambda x: int(re_search('\d+', x).group(0))) + ROUTER_LIST = sorted( + topo["routers"].keys(), key=lambda x: int(re_search("\d+", x).group(0)) + ) listRouters = ROUTER_LIST[:] for routerN in ROUTER_LIST: - logger.info('Topo: Add router {}'.format(routerN)) + logger.info("Topo: Add router {}".format(routerN)) tgen.add_router(routerN) listRouters.append(routerN) - if 'ipv4base' in topo: - ipv4Next = ipaddr.IPv4Address(topo['link_ip_start']['ipv4']) - ipv4Step = 2 ** (32 - topo['link_ip_start']['v4mask']) - if topo['link_ip_start']['v4mask'] < 32: + if "ipv4base" in topo: + ipv4Next = ipaddr.IPv4Address(topo["link_ip_start"]["ipv4"]) + ipv4Step = 2 ** (32 - topo["link_ip_start"]["v4mask"]) + if topo["link_ip_start"]["v4mask"] < 32: ipv4Next += 1 - if 'ipv6base' in topo: - ipv6Next = ipaddr.IPv6Address(topo['link_ip_start']['ipv6']) - ipv6Step = 2 ** (128 - topo['link_ip_start']['v6mask']) - if topo['link_ip_start']['v6mask'] < 127: + if "ipv6base" in topo: + ipv6Next = ipaddr.IPv6Address(topo["link_ip_start"]["ipv6"]) + ipv6Step = 2 ** (128 - topo["link_ip_start"]["v6mask"]) + if topo["link_ip_start"]["v6mask"] < 127: ipv6Next += 1 for router in listRouters: - topo['routers'][router]['nextIfname'] = 0 + topo["routers"][router]["nextIfname"] = 0 while listRouters != []: curRouter = listRouters.pop(0) # Physical Interfaces - if 'links' in topo['routers'][curRouter]: + if "links" in topo["routers"][curRouter]: + def link_sort(x): - if x == 'lo': + if x == "lo": return 0 - elif 'link' in x: - return int(x.split('-link')[1]) + elif "link" in x: + return int(x.split("-link")[1]) else: - return int(re_search('\d+', x).group(0)) - for destRouterLink, data in sorted(topo['routers'][curRouter]['links']. \ - iteritems(), - key=lambda x: link_sort(x[0])): - currRouter_lo_json = \ - topo['routers'][curRouter]['links'][destRouterLink] + return int(re_search("\d+", x).group(0)) + + for destRouterLink, data in sorted( + topo["routers"][curRouter]["links"].iteritems(), + key=lambda x: link_sort(x[0]), + ): + currRouter_lo_json = topo["routers"][curRouter]["links"][destRouterLink] # Loopback interfaces - if 'type' in data and data['type'] == 'loopback': - if 'ipv4' in currRouter_lo_json and \ - currRouter_lo_json['ipv4'] == 'auto': - currRouter_lo_json['ipv4'] = '{}{}.{}/{}'. \ - format(topo['lo_prefix']['ipv4'], number_to_row(curRouter), \ - number_to_column(curRouter), topo['lo_prefix']['v4mask']) - if 'ipv6' in currRouter_lo_json and \ - currRouter_lo_json['ipv6'] == 'auto': - currRouter_lo_json['ipv6'] = '{}{}:{}/{}'. \ - format(topo['lo_prefix']['ipv6'], number_to_row(curRouter), \ - number_to_column(curRouter), topo['lo_prefix']['v6mask']) + if "type" in data and data["type"] == "loopback": + if ( + "ipv4" in currRouter_lo_json + and currRouter_lo_json["ipv4"] == "auto" + ): + currRouter_lo_json["ipv4"] = "{}{}.{}/{}".format( + topo["lo_prefix"]["ipv4"], + number_to_row(curRouter), + number_to_column(curRouter), + topo["lo_prefix"]["v4mask"], + ) + if ( + "ipv6" in currRouter_lo_json + and currRouter_lo_json["ipv6"] == "auto" + ): + currRouter_lo_json["ipv6"] = "{}{}:{}/{}".format( + topo["lo_prefix"]["ipv6"], + number_to_row(curRouter), + number_to_column(curRouter), + topo["lo_prefix"]["v6mask"], + ) if "-" in destRouterLink: # Spliting and storing destRouterLink data in tempList @@ -119,52 +133,63 @@ def build_topo_from_json(tgen, topo): curRouterLink = curRouter if destRouter in listRouters: - currRouter_link_json = \ - topo['routers'][curRouter]['links'][destRouterLink] - destRouter_link_json = \ - topo['routers'][destRouter]['links'][curRouterLink] + currRouter_link_json = topo["routers"][curRouter]["links"][ + destRouterLink + ] + destRouter_link_json = topo["routers"][destRouter]["links"][ + curRouterLink + ] # Assigning name to interfaces - currRouter_link_json['interface'] = \ - '{}-{}-eth{}'.format(curRouter, destRouter, topo['routers'] \ - [curRouter]['nextIfname']) - destRouter_link_json['interface'] = \ - '{}-{}-eth{}'.format(destRouter, curRouter, topo['routers'] \ - [destRouter]['nextIfname']) + currRouter_link_json["interface"] = "{}-{}-eth{}".format( + curRouter, destRouter, topo["routers"][curRouter]["nextIfname"] + ) + destRouter_link_json["interface"] = "{}-{}-eth{}".format( + destRouter, curRouter, topo["routers"][destRouter]["nextIfname"] + ) - topo['routers'][curRouter]['nextIfname'] += 1 - topo['routers'][destRouter]['nextIfname'] += 1 + topo["routers"][curRouter]["nextIfname"] += 1 + topo["routers"][destRouter]["nextIfname"] += 1 # Linking routers to each other as defined in JSON file - tgen.gears[curRouter].add_link(tgen.gears[destRouter], - topo['routers'][curRouter]['links'][destRouterLink] \ - ['interface'], topo['routers'][destRouter]['links'] \ - [curRouterLink]['interface']) + tgen.gears[curRouter].add_link( + tgen.gears[destRouter], + topo["routers"][curRouter]["links"][destRouterLink][ + "interface" + ], + topo["routers"][destRouter]["links"][curRouterLink][ + "interface" + ], + ) # IPv4 - if 'ipv4' in currRouter_link_json: - if currRouter_link_json['ipv4'] == 'auto': - currRouter_link_json['ipv4'] = \ - '{}/{}'.format(ipv4Next, topo['link_ip_start'][ \ - 'v4mask']) - destRouter_link_json['ipv4'] = \ - '{}/{}'.format(ipv4Next + 1, topo['link_ip_start'][ \ - 'v4mask']) + if "ipv4" in currRouter_link_json: + if currRouter_link_json["ipv4"] == "auto": + currRouter_link_json["ipv4"] = "{}/{}".format( + ipv4Next, topo["link_ip_start"]["v4mask"] + ) + destRouter_link_json["ipv4"] = "{}/{}".format( + ipv4Next + 1, topo["link_ip_start"]["v4mask"] + ) ipv4Next += ipv4Step # IPv6 - if 'ipv6' in currRouter_link_json: - if currRouter_link_json['ipv6'] == 'auto': - currRouter_link_json['ipv6'] = \ - '{}/{}'.format(ipv6Next, topo['link_ip_start'][ \ - 'v6mask']) - destRouter_link_json['ipv6'] = \ - '{}/{}'.format(ipv6Next + 1, topo['link_ip_start'][ \ - 'v6mask']) + if "ipv6" in currRouter_link_json: + if currRouter_link_json["ipv6"] == "auto": + currRouter_link_json["ipv6"] = "{}/{}".format( + ipv6Next, topo["link_ip_start"]["v6mask"] + ) + destRouter_link_json["ipv6"] = "{}/{}".format( + ipv6Next + 1, topo["link_ip_start"]["v6mask"] + ) ipv6Next = ipaddr.IPv6Address(int(ipv6Next) + ipv6Step) - logger.debug("Generated link data for router: %s\n%s", curRouter, - json_dumps(topo["routers"][curRouter]["links"], - indent=4, sort_keys=True)) + logger.debug( + "Generated link data for router: %s\n%s", + curRouter, + json_dumps( + topo["routers"][curRouter]["links"], indent=4, sort_keys=True + ), + ) def build_config_from_json(tgen, topo, save_bkup=True): @@ -176,27 +201,27 @@ def build_config_from_json(tgen, topo, save_bkup=True): * `topo`: json file data """ - func_dict = OrderedDict([ - ("links", create_interfaces_cfg), - ("static_routes", create_static_routes), - ("prefix_lists", create_prefix_lists), - ("bgp_community_list", create_bgp_community_lists), - ("route_maps", create_route_maps), - ("bgp", create_router_bgp) - ]) + func_dict = OrderedDict( + [ + ("links", create_interfaces_cfg), + ("static_routes", create_static_routes), + ("prefix_lists", create_prefix_lists), + ("bgp_community_list", create_bgp_community_lists), + ("route_maps", create_route_maps), + ("bgp", create_router_bgp), + ] + ) data = topo["routers"] for func_type in func_dict.keys(): - logger.info('Checking for {} configuration in input data'.format( - func_type)) + logger.info("Checking for {} configuration in input data".format(func_type)) func_dict.get(func_type)(tgen, data, build=True) - for router in sorted(topo['routers'].keys()): - logger.debug('Configuring router {}...'.format(router)) + for router in sorted(topo["routers"].keys()): + logger.debug("Configuring router {}...".format(router)) result = load_config_to_router(tgen, router, save_bkup) if not result: logger.info("Failed while configuring {}".format(router)) pytest.exit(1) - diff --git a/tests/topotests/lib/topolog.py b/tests/topotests/lib/topolog.py index f149f34eb3..0dfa870930 100644 --- a/tests/topotests/lib/topolog.py +++ b/tests/topotests/lib/topolog.py @@ -31,22 +31,25 @@ import logging # Helper dictionary to convert Topogen logging levels to Python's logging. DEBUG_TOPO2LOGGING = { - 'debug': logging.DEBUG, - 'info': logging.INFO, - 'output': logging.INFO, - 'warning': logging.WARNING, - 'error': logging.ERROR, - 'critical': logging.CRITICAL, + "debug": logging.DEBUG, + "info": logging.INFO, + "output": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, } + class InfoFilter(logging.Filter): def filter(self, rec): return rec.levelno in (logging.DEBUG, logging.INFO) + # # Logger class definition # + class Logger(object): """ Logger class that encapsulates logging functions, internaly it uses Python @@ -58,32 +61,32 @@ class Logger(object): def __init__(self): # Create default global logger self.log_level = logging.INFO - self.logger = logging.Logger('topolog', level=self.log_level) + self.logger = logging.Logger("topolog", level=self.log_level) handler_stdout = logging.StreamHandler(sys.stdout) handler_stdout.setLevel(logging.DEBUG) handler_stdout.addFilter(InfoFilter()) handler_stdout.setFormatter( - logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s') + logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s") ) handler_stderr = logging.StreamHandler() handler_stderr.setLevel(logging.WARNING) handler_stderr.setFormatter( - logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s') + logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s") ) self.logger.addHandler(handler_stdout) self.logger.addHandler(handler_stderr) # Handle more loggers - self.loggers = {'topolog': self.logger} + self.loggers = {"topolog": self.logger} def set_log_level(self, level): "Set the logging level" self.log_level = DEBUG_TOPO2LOGGING.get(level) self.logger.setLevel(self.log_level) - def get_logger(self, name='topolog', log_level=None, target=sys.stdout): + def get_logger(self, name="topolog", log_level=None, target=sys.stdout): """ Get a new logger entry. Allows creating different loggers for formating, filtering or handling (file, stream or stdout/stderr). @@ -100,12 +103,13 @@ class Logger(object): handler = logging.StreamHandler(stream=target) handler.setFormatter( - logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s') + logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s") ) nlogger.addHandler(handler) self.loggers[name] = nlogger return nlogger + # # Global variables # diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py index 9e1d344687..fab101cb25 100644 --- a/tests/topotests/lib/topotest.py +++ b/tests/topotests/lib/topotest.py @@ -50,6 +50,7 @@ from mininet.log import setLogLevel, info from mininet.cli import CLI from mininet.link import Intf + class json_cmp_result(object): "json_cmp result class for better assertion messages" @@ -66,7 +67,7 @@ class json_cmp_result(object): return len(self.errors) > 0 def __str__(self): - return '\n'.join(self.errors) + return "\n".join(self.errors) def json_diff(d1, d2): @@ -74,12 +75,12 @@ def json_diff(d1, d2): Returns a string with the difference between JSON data. """ json_format_opts = { - 'indent': 4, - 'sort_keys': True, + "indent": 4, + "sort_keys": True, } dstr1 = json.dumps(d1, **json_format_opts) dstr2 = json.dumps(d2, **json_format_opts) - return difflines(dstr2, dstr1, title1='Expected value', title2='Current value', n=0) + return difflines(dstr2, dstr1, title1="Expected value", title2="Current value", n=0) def _json_list_cmp(list1, list2, parent, result): @@ -87,18 +88,21 @@ def _json_list_cmp(list1, list2, parent, result): # Check second list2 type if not isinstance(list1, type([])) or not isinstance(list2, type([])): result.add_error( - '{} has different type than expected '.format(parent) + - '(have {}, expected {}):\n{}'.format( - type(list1), type(list2), json_diff(list1, list2))) + "{} has different type than expected ".format(parent) + + "(have {}, expected {}):\n{}".format( + type(list1), type(list2), json_diff(list1, list2) + ) + ) return # Check list size if len(list2) > len(list1): result.add_error( - '{} too few items '.format(parent) + - '(have {}, expected {}:\n {})'.format( - len(list1), len(list2), - json_diff(list1, list2))) + "{} too few items ".format(parent) + + "(have {}, expected {}:\n {})".format( + len(list1), len(list2), json_diff(list1, list2) + ) + ) return # List all unmatched items errors @@ -106,7 +110,7 @@ def _json_list_cmp(list1, list2, parent, result): for expected in list2: matched = False for value in list1: - if json_cmp({'json': value}, {'json': expected}) is None: + if json_cmp({"json": value}, {"json": expected}) is None: matched = True break @@ -116,8 +120,8 @@ def _json_list_cmp(list1, list2, parent, result): # If there are unmatched items, error out. if unmatched: result.add_error( - '{} value is different (\n{})'.format( - parent, json_diff(list1, list2))) + "{} value is different (\n{})".format(parent, json_diff(list1, list2)) + ) def json_cmp(d1, d2): @@ -131,7 +135,7 @@ def json_cmp(d1, d2): Note: key absence can be tested by adding a key with value `None`. """ - squeue = [(d1, d2, 'json')] + squeue = [(d1, d2, "json")] result = json_cmp_result() for s in squeue: @@ -150,23 +154,33 @@ def json_cmp(d1, d2): s2_req = set([key for key in nd2 if nd2[key] is not None]) diff = s2_req - s1 if diff != set({}): - result.add_error('expected key(s) {} in {} (have {}):\n{}'.format( - str(list(diff)), parent, str(list(s1)), json_diff(nd1, nd2))) + result.add_error( + "expected key(s) {} in {} (have {}):\n{}".format( + str(list(diff)), parent, str(list(s1)), json_diff(nd1, nd2) + ) + ) for key in s2.intersection(s1): # Test for non existence of key in d2 if nd2[key] is None: - result.add_error('"{}" should not exist in {} (have {}):\n{}'.format( - key, parent, str(s1), json_diff(nd1[key], nd2[key]))) + result.add_error( + '"{}" should not exist in {} (have {}):\n{}'.format( + key, parent, str(s1), json_diff(nd1[key], nd2[key]) + ) + ) continue # If nd1 key is a dict, we have to recurse in it later. if isinstance(nd2[key], type({})): if not isinstance(nd1[key], type({})): result.add_error( - '{}["{}"] has different type than expected '.format(parent, key) + - '(have {}, expected {}):\n{}'.format( - type(nd1[key]), type(nd2[key]), json_diff(nd1[key], nd2[key]))) + '{}["{}"] has different type than expected '.format(parent, key) + + "(have {}, expected {}):\n{}".format( + type(nd1[key]), + type(nd2[key]), + json_diff(nd1[key], nd2[key]), + ) + ) continue nparent = '{}["{}"]'.format(parent, key) squeue.append((nd1[key], nd2[key], nparent)) @@ -181,7 +195,9 @@ def json_cmp(d1, d2): if nd1[key] != nd2[key]: result.add_error( '{}["{}"] value is different (\n{})'.format( - parent, key, json_diff(nd1[key], nd2[key]))) + parent, key, json_diff(nd1[key], nd2[key]) + ) + ) continue if result.has_errors(): @@ -194,10 +210,12 @@ def router_output_cmp(router, cmd, expected): """ Runs `cmd` in router and compares the output with `expected`. """ - return difflines(normalize_text(router.vtysh_cmd(cmd)), - normalize_text(expected), - title1="Current output", - title2="Expected output") + return difflines( + normalize_text(router.vtysh_cmd(cmd)), + normalize_text(expected), + title1="Current output", + title2="Expected output", + ) def router_json_cmp(router, cmd, data): @@ -232,7 +250,9 @@ def run_and_expect(func, what, count=20, wait=3): logger.info( "'{}' polling started (interval {} secs, maximum wait {} secs)".format( - func_name, wait, int(wait * count))) + func_name, wait, int(wait * count) + ) + ) while count > 0: result = func() @@ -242,13 +262,17 @@ def run_and_expect(func, what, count=20, wait=3): continue end_time = time.time() - logger.info("'{}' succeeded after {:.2f} seconds".format( - func_name, end_time - start_time)) + logger.info( + "'{}' succeeded after {:.2f} seconds".format( + func_name, end_time - start_time + ) + ) return (True, result) end_time = time.time() - logger.error("'{}' failed after {:.2f} seconds".format( - func_name, end_time - start_time)) + logger.error( + "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time) + ) return (False, result) @@ -273,12 +297,16 @@ def run_and_expect_type(func, etype, count=20, wait=3, avalue=None): logger.info( "'{}' polling started (interval {} secs, maximum wait {} secs)".format( - func_name, wait, int(wait * count))) + func_name, wait, int(wait * count) + ) + ) while count > 0: result = func() if not isinstance(result, etype): - logger.debug("Expected result type '{}' got '{}' instead".format(etype, type(result))) + logger.debug( + "Expected result type '{}' got '{}' instead".format(etype, type(result)) + ) time.sleep(wait) count -= 1 continue @@ -290,13 +318,17 @@ def run_and_expect_type(func, etype, count=20, wait=3, avalue=None): continue end_time = time.time() - logger.info("'{}' succeeded after {:.2f} seconds".format( - func_name, end_time - start_time)) + logger.info( + "'{}' succeeded after {:.2f} seconds".format( + func_name, end_time - start_time + ) + ) return (True, result) end_time = time.time() - logger.error("'{}' failed after {:.2f} seconds".format( - func_name, end_time - start_time)) + logger.error( + "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time) + ) return (False, result) @@ -305,12 +337,15 @@ def int2dpid(dpid): try: dpid = hex(dpid)[2:] - dpid = '0'*(16-len(dpid))+dpid + dpid = "0" * (16 - len(dpid)) + dpid return dpid except IndexError: - raise Exception('Unable to derive default datapath ID - ' - 'please either specify a dpid or use a ' - 'canonical switch name such as s23.') + raise Exception( + "Unable to derive default datapath ID - " + "please either specify a dpid or use a " + "canonical switch name such as s23." + ) + def pid_exists(pid): "Check whether pid exists in the current process table." @@ -333,70 +368,78 @@ def pid_exists(pid): else: return True + def get_textdiff(text1, text2, title1="", title2="", **opts): "Returns empty string if same or formatted diff" - diff = '\n'.join(difflib.unified_diff(text1, text2, - fromfile=title1, tofile=title2, **opts)) + diff = "\n".join( + difflib.unified_diff(text1, text2, fromfile=title1, tofile=title2, **opts) + ) # Clean up line endings diff = os.linesep.join([s for s in diff.splitlines() if s]) return diff -def difflines(text1, text2, title1='', title2='', **opts): + +def difflines(text1, text2, title1="", title2="", **opts): "Wrapper for get_textdiff to avoid string transformations." - text1 = ('\n'.join(text1.rstrip().splitlines()) + '\n').splitlines(1) - text2 = ('\n'.join(text2.rstrip().splitlines()) + '\n').splitlines(1) + text1 = ("\n".join(text1.rstrip().splitlines()) + "\n").splitlines(1) + text2 = ("\n".join(text2.rstrip().splitlines()) + "\n").splitlines(1) return get_textdiff(text1, text2, title1, title2, **opts) + def get_file(content): """ Generates a temporary file in '/tmp' with `content` and returns the file name. """ - fde = tempfile.NamedTemporaryFile(mode='w', delete=False) + fde = tempfile.NamedTemporaryFile(mode="w", delete=False) fname = fde.name fde.write(content) fde.close() return fname + def normalize_text(text): """ Strips formating spaces/tabs, carriage returns and trailing whitespace. """ - text = re.sub(r'[ \t]+', ' ', text) - text = re.sub(r'\r', '', text) + text = re.sub(r"[ \t]+", " ", text) + text = re.sub(r"\r", "", text) # Remove whitespace in the middle of text. - text = re.sub(r'[ \t]+\n', '\n', text) + text = re.sub(r"[ \t]+\n", "\n", text) # Remove whitespace at the end of the text. text = text.rstrip() return text + def module_present_linux(module, load): """ Returns whether `module` is present. If `load` is true, it will try to load it via modprobe. """ - with open('/proc/modules', 'r') as modules_file: - if module.replace('-','_') in modules_file.read(): + with open("/proc/modules", "r") as modules_file: + if module.replace("-", "_") in modules_file.read(): return True - cmd = '/sbin/modprobe {}{}'.format('' if load else '-n ', - module) + cmd = "/sbin/modprobe {}{}".format("" if load else "-n ", module) if os.system(cmd) != 0: return False else: return True + def module_present_freebsd(module, load): return True + def module_present(module, load=True): if sys.platform.startswith("linux"): return module_present_linux(module, load) elif sys.platform.startswith("freebsd"): return module_present_freebsd(module, load) + def version_cmp(v1, v2): """ Compare two version strings and returns: @@ -407,15 +450,15 @@ def version_cmp(v1, v2): Raises `ValueError` if versions are not well formated. """ - vregex = r'(?P<whole>\d+(\.(\d+))*)' + vregex = r"(?P<whole>\d+(\.(\d+))*)" v1m = re.match(vregex, v1) v2m = re.match(vregex, v2) if v1m is None or v2m is None: raise ValueError("got a invalid version string") # Split values - v1g = v1m.group('whole').split('.') - v2g = v2m.group('whole').split('.') + v1g = v1m.group("whole").split(".") + v2g = v2m.group("whole").split(".") # Get the longest version string vnum = len(v1g) @@ -452,35 +495,42 @@ def version_cmp(v1, v2): return -1 return 0 + def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None): if ifaceaction: - str_ifaceaction = 'no shutdown' + str_ifaceaction = "no shutdown" else: - str_ifaceaction = 'shutdown' + str_ifaceaction = "shutdown" if vrf_name == None: - cmd = 'vtysh -c \"configure terminal\" -c \"interface {0}\" -c \"{1}\"'.format(ifacename, str_ifaceaction) + cmd = 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format( + ifacename, str_ifaceaction + ) else: - cmd = 'vtysh -c \"configure terminal\" -c \"interface {0} vrf {1}\" -c \"{2}\"'.format(ifacename, vrf_name, str_ifaceaction) + cmd = 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format( + ifacename, vrf_name, str_ifaceaction + ) node.run(cmd) + def ip4_route_zebra(node, vrf_name=None): """ Gets an output of 'show ip route' command. It can be used with comparing the output to a reference """ if vrf_name == None: - tmp = node.vtysh_cmd('show ip route') + tmp = node.vtysh_cmd("show ip route") else: - tmp = node.vtysh_cmd('show ip route vrf {0}'.format(vrf_name)) + tmp = node.vtysh_cmd("show ip route vrf {0}".format(vrf_name)) output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp) lines = output.splitlines() header_found = False while lines and (not lines[0].strip() or not header_found): - if '> - selected route' in lines[0]: + if "> - selected route" in lines[0]: header_found = True lines = lines[1:] - return '\n'.join(lines) + return "\n".join(lines) + def ip6_route_zebra(node, vrf_name=None): """ @@ -489,40 +539,42 @@ def ip6_route_zebra(node, vrf_name=None): """ if vrf_name == None: - tmp = node.vtysh_cmd('show ipv6 route') + tmp = node.vtysh_cmd("show ipv6 route") else: - tmp = node.vtysh_cmd('show ipv6 route vrf {0}'.format(vrf_name)) + tmp = node.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name)) # Mask out timestamp output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp) # Mask out the link-local addresses - output = re.sub(r'fe80::[^ ]+,', 'fe80::XXXX:XXXX:XXXX:XXXX,', output) + output = re.sub(r"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output) lines = output.splitlines() header_found = False while lines and (not lines[0].strip() or not header_found): - if '> - selected route' in lines[0]: + if "> - selected route" in lines[0]: header_found = True lines = lines[1:] - return '\n'.join(lines) + return "\n".join(lines) def proto_name_to_number(protocol): return { - 'bgp': '186', - 'isis': '187', - 'ospf': '188', - 'rip': '189', - 'ripng': '190', - 'nhrp': '191', - 'eigrp': '192', - 'ldp': '193', - 'sharp': '194', - 'pbr': '195', - 'static': '196' - }.get(protocol, protocol) # default return same as input + "bgp": "186", + "isis": "187", + "ospf": "188", + "rip": "189", + "ripng": "190", + "nhrp": "191", + "eigrp": "192", + "ldp": "193", + "sharp": "194", + "pbr": "195", + "static": "196", + }.get( + protocol, protocol + ) # default return same as input def ip4_route(node): @@ -543,28 +595,29 @@ def ip4_route(node): } } """ - output = normalize_text(node.run('ip route')).splitlines() + output = normalize_text(node.run("ip route")).splitlines() result = {} for line in output: - columns = line.split(' ') + columns = line.split(" ") route = result[columns[0]] = {} prev = None for column in columns: - if prev == 'dev': - route['dev'] = column - if prev == 'via': - route['via'] = column - if prev == 'proto': + if prev == "dev": + route["dev"] = column + if prev == "via": + route["via"] = column + if prev == "proto": # translate protocol names back to numbers - route['proto'] = proto_name_to_number(column) - if prev == 'metric': - route['metric'] = column - if prev == 'scope': - route['scope'] = column + route["proto"] = proto_name_to_number(column) + if prev == "metric": + route["metric"] = column + if prev == "scope": + route["scope"] = column prev = column return result + def ip6_route(node): """ Gets a structured return of the command 'ip -6 route'. It can be used in @@ -582,80 +635,103 @@ def ip6_route(node): } } """ - output = normalize_text(node.run('ip -6 route')).splitlines() + output = normalize_text(node.run("ip -6 route")).splitlines() result = {} for line in output: - columns = line.split(' ') + columns = line.split(" ") route = result[columns[0]] = {} prev = None for column in columns: - if prev == 'dev': - route['dev'] = column - if prev == 'via': - route['via'] = column - if prev == 'proto': + if prev == "dev": + route["dev"] = column + if prev == "via": + route["via"] = column + if prev == "proto": # translate protocol names back to numbers - route['proto'] = proto_name_to_number(column) - if prev == 'metric': - route['metric'] = column - if prev == 'pref': - route['pref'] = column + route["proto"] = proto_name_to_number(column) + if prev == "metric": + route["metric"] = column + if prev == "pref": + route["pref"] = column prev = column return result + def sleep(amount, reason=None): """ Sleep wrapper that registers in the log the amount of sleep """ if reason is None: - logger.info('Sleeping for {} seconds'.format(amount)) + logger.info("Sleeping for {} seconds".format(amount)) else: - logger.info(reason + ' ({} seconds)'.format(amount)) + logger.info(reason + " ({} seconds)".format(amount)) time.sleep(amount) + def checkAddressSanitizerError(output, router, component): "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise" - addressSantizerError = re.search('(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ', output) + addressSantizerError = re.search( + "(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output + ) if addressSantizerError: - sys.stderr.write("%s: %s triggered an exception by AddressSanitizer\n" % (router, component)) + sys.stderr.write( + "%s: %s triggered an exception by AddressSanitizer\n" % (router, component) + ) # Sanitizer Error found in log pidMark = addressSantizerError.group(1) - addressSantizerLog = re.search('%s(.*)%s' % (pidMark, pidMark), output, re.DOTALL) + addressSantizerLog = re.search( + "%s(.*)%s" % (pidMark, pidMark), output, re.DOTALL + ) if addressSantizerLog: - callingTest = os.path.basename(sys._current_frames().values()[0].f_back.f_back.f_globals['__file__']) + callingTest = os.path.basename( + sys._current_frames().values()[0].f_back.f_back.f_globals["__file__"] + ) callingProc = sys._getframe(2).f_code.co_name with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile: - sys.stderr.write('\n'.join(addressSantizerLog.group(1).splitlines()) + '\n') + sys.stderr.write( + "\n".join(addressSantizerLog.group(1).splitlines()) + "\n" + ) addrSanFile.write("## Error: %s\n\n" % addressSantizerError.group(2)) - addrSanFile.write("### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n" % (callingTest, callingProc, router)) - addrSanFile.write(' '+ '\n '.join(addressSantizerLog.group(1).splitlines()) + '\n') + addrSanFile.write( + "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n" + % (callingTest, callingProc, router) + ) + addrSanFile.write( + " " + + "\n ".join(addressSantizerLog.group(1).splitlines()) + + "\n" + ) addrSanFile.write("\n---------------\n") return True return False + def addRouter(topo, name): "Adding a FRRouter (or Quagga) to Topology" - MyPrivateDirs = ['/etc/frr', - '/etc/quagga', - '/var/run/frr', - '/var/run/quagga', - '/var/log'] + MyPrivateDirs = [ + "/etc/frr", + "/etc/quagga", + "/var/run/frr", + "/var/run/quagga", + "/var/log", + ] if sys.platform.startswith("linux"): return topo.addNode(name, cls=LinuxRouter, privateDirs=MyPrivateDirs) elif sys.platform.startswith("freebsd"): return topo.addNode(name, cls=FreeBSDRouter, privateDirs=MyPrivateDirs) + def set_sysctl(node, sysctl, value): "Set a sysctl value and return None on success or an error string" - valuestr = '{}'.format(value) + valuestr = "{}".format(value) command = "sysctl {0}={1}".format(sysctl, valuestr) cmdret = node.cmd(command) - matches = re.search(r'([^ ]+) = ([^\s]+)', cmdret) + matches = re.search(r"([^ ]+) = ([^\s]+)", cmdret) if matches is None: return cmdret if matches.group(1) != sysctl: @@ -665,6 +741,7 @@ def set_sysctl(node, sysctl, value): return None + def assert_sysctl(node, sysctl, value): "Set and assert that the sysctl is set with the specified value." assert set_sysctl(node, sysctl, value) is None @@ -675,65 +752,81 @@ class Router(Node): def __init__(self, name, **params): super(Router, self).__init__(name, **params) - self.logdir = params.get('logdir') + self.logdir = params.get("logdir") # Backward compatibility: # Load configuration defaults like topogen. - self.config_defaults = configparser.ConfigParser({ - 'verbosity': 'info', - 'frrdir': '/usr/lib/frr', - 'quaggadir': '/usr/lib/quagga', - 'routertype': 'frr', - 'memleak_path': None, - }) + self.config_defaults = configparser.ConfigParser( + { + "verbosity": "info", + "frrdir": "/usr/lib/frr", + "quaggadir": "/usr/lib/quagga", + "routertype": "frr", + "memleak_path": None, + } + ) self.config_defaults.read( - os.path.join(os.path.dirname(os.path.realpath(__file__)), - '../pytest.ini') + os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini") ) # If this topology is using old API and doesn't have logdir # specified, then attempt to generate an unique logdir. if self.logdir is None: - cur_test = os.environ['PYTEST_CURRENT_TEST'] - self.logdir = ('/tmp/topotests/' + - cur_test[0:cur_test.find(".py")].replace('/', '.')) + cur_test = os.environ["PYTEST_CURRENT_TEST"] + self.logdir = "/tmp/topotests/" + cur_test[ + 0 : cur_test.find(".py") + ].replace("/", ".") # If the logdir is not created, then create it and set the # appropriated permissions. if not os.path.isdir(self.logdir): - os.system('mkdir -p ' + self.logdir + '/' + name) - os.system('chmod -R go+rw /tmp/topotests') + os.system("mkdir -p " + self.logdir + "/" + name) + os.system("chmod -R go+rw /tmp/topotests") self.daemondir = None self.hasmpls = False - self.routertype = 'frr' - self.daemons = {'zebra': 0, 'ripd': 0, 'ripngd': 0, 'ospfd': 0, - 'ospf6d': 0, 'isisd': 0, 'bgpd': 0, 'pimd': 0, - 'ldpd': 0, 'eigrpd': 0, 'nhrpd': 0, 'staticd': 0, - 'bfdd': 0, 'sharpd': 0} - self.daemons_options = {'zebra': ''} + self.routertype = "frr" + self.daemons = { + "zebra": 0, + "ripd": 0, + "ripngd": 0, + "ospfd": 0, + "ospf6d": 0, + "isisd": 0, + "bgpd": 0, + "pimd": 0, + "ldpd": 0, + "eigrpd": 0, + "nhrpd": 0, + "staticd": 0, + "bfdd": 0, + "sharpd": 0, + } + self.daemons_options = {"zebra": ""} self.reportCores = True self.version = None def _config_frr(self, **params): "Configure FRR binaries" - self.daemondir = params.get('frrdir') + self.daemondir = params.get("frrdir") if self.daemondir is None: - self.daemondir = self.config_defaults.get('topogen', 'frrdir') + self.daemondir = self.config_defaults.get("topogen", "frrdir") - zebra_path = os.path.join(self.daemondir, 'zebra') + zebra_path = os.path.join(self.daemondir, "zebra") if not os.path.isfile(zebra_path): raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path)) def _config_quagga(self, **params): "Configure Quagga binaries" - self.daemondir = params.get('quaggadir') + self.daemondir = params.get("quaggadir") if self.daemondir is None: - self.daemondir = self.config_defaults.get('topogen', 'quaggadir') + self.daemondir = self.config_defaults.get("topogen", "quaggadir") - zebra_path = os.path.join(self.daemondir, 'zebra') + zebra_path = os.path.join(self.daemondir, "zebra") if not os.path.isfile(zebra_path): - raise Exception("Quagga zebra binary doesn't exist at {}".format(zebra_path)) + raise Exception( + "Quagga zebra binary doesn't exist at {}".format(zebra_path) + ) # pylint: disable=W0221 # Some params are only meaningful for the parent class. @@ -741,28 +834,27 @@ class Router(Node): super(Router, self).config(**params) # User did not specify the daemons directory, try to autodetect it. - self.daemondir = params.get('daemondir') + self.daemondir = params.get("daemondir") if self.daemondir is None: - self.routertype = params.get('routertype', - self.config_defaults.get( - 'topogen', - 'routertype')) - if self.routertype == 'quagga': + self.routertype = params.get( + "routertype", self.config_defaults.get("topogen", "routertype") + ) + if self.routertype == "quagga": self._config_quagga(**params) else: self._config_frr(**params) else: # Test the provided path - zpath = os.path.join(self.daemondir, 'zebra') + zpath = os.path.join(self.daemondir, "zebra") if not os.path.isfile(zpath): - raise Exception('No zebra binary found in {}'.format(zpath)) + raise Exception("No zebra binary found in {}".format(zpath)) # Allow user to specify routertype when the path was specified. - if params.get('routertype') is not None: - self.routertype = params.get('routertype') + if params.get("routertype") is not None: + self.routertype = params.get("routertype") - self.cmd('ulimit -c unlimited') + self.cmd("ulimit -c unlimited") # Set ownership of config files - self.cmd('chown {0}:{0}vty /etc/{0}'.format(self.routertype)) + self.cmd("chown {0}:{0}vty /etc/{0}".format(self.routertype)) def terminate(self): # Delete Running Quagga or FRR Daemons @@ -772,62 +864,66 @@ class Router(Node): # self.cmd('kill -7 `cat %s`' % d.rstrip()) # self.waitOutput() # Disable forwarding - set_sysctl(self, 'net.ipv4.ip_forward', 0) - set_sysctl(self, 'net.ipv6.conf.all.forwarding', 0) + set_sysctl(self, "net.ipv4.ip_forward", 0) + set_sysctl(self, "net.ipv6.conf.all.forwarding", 0) super(Router, self).terminate() - os.system('chmod -R go+rw /tmp/topotests') + os.system("chmod -R go+rw /tmp/topotests") - def stopRouter(self, wait=True, assertOnError=True, minErrorVersion='5.1'): + def stopRouter(self, wait=True, assertOnError=True, minErrorVersion="5.1"): # Stop Running Quagga or FRR Daemons - rundaemons = self.cmd('ls -1 /var/run/%s/*.pid' % self.routertype) + rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype) errors = "" if re.search(r"No such file or directory", rundaemons): return errors if rundaemons is not None: numRunning = 0 for d in StringIO.StringIO(rundaemons): - daemonpid = self.cmd('cat %s' % d.rstrip()).rstrip() - if (daemonpid.isdigit() and pid_exists(int(daemonpid))): - logger.info('{}: stopping {}'.format( - self.name, - os.path.basename(d.rstrip().rsplit(".", 1)[0]) - )) - self.cmd('kill -TERM %s' % daemonpid) + daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip() + if daemonpid.isdigit() and pid_exists(int(daemonpid)): + logger.info( + "{}: stopping {}".format( + self.name, os.path.basename(d.rstrip().rsplit(".", 1)[0]) + ) + ) + self.cmd("kill -TERM %s" % daemonpid) self.waitOutput() if pid_exists(int(daemonpid)): numRunning += 1 if wait and numRunning > 0: - sleep(2, '{}: waiting for daemons stopping'.format(self.name)) + sleep(2, "{}: waiting for daemons stopping".format(self.name)) # 2nd round of kill if daemons didn't exit for d in StringIO.StringIO(rundaemons): - daemonpid = self.cmd('cat %s' % d.rstrip()).rstrip() - if (daemonpid.isdigit() and pid_exists(int(daemonpid))): - logger.info('{}: killing {}'.format( - self.name, - os.path.basename(d.rstrip().rsplit(".", 1)[0]) - )) - self.cmd('kill -7 %s' % daemonpid) + daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip() + if daemonpid.isdigit() and pid_exists(int(daemonpid)): + logger.info( + "{}: killing {}".format( + self.name, + os.path.basename(d.rstrip().rsplit(".", 1)[0]), + ) + ) + self.cmd("kill -7 %s" % daemonpid) self.waitOutput() - self.cmd('rm -- {}'.format(d.rstrip())) + self.cmd("rm -- {}".format(d.rstrip())) if wait: - errors = self.checkRouterCores(reportOnce=True) - if self.checkRouterVersion('<', minErrorVersion): - #ignore errors in old versions - errors = "" - if assertOnError and len(errors) > 0: - assert "Errors found - details follow:" == 0, errors + errors = self.checkRouterCores(reportOnce=True) + if self.checkRouterVersion("<", minErrorVersion): + # ignore errors in old versions + errors = "" + if assertOnError and len(errors) > 0: + assert "Errors found - details follow:" == 0, errors return errors def removeIPs(self): for interface in self.intfNames(): - self.cmd('ip address flush', interface) + self.cmd("ip address flush", interface) def checkCapability(self, daemon, param): if param is not None: daemon_path = os.path.join(self.daemondir, daemon) - daemon_search_option = param.replace('-','') - output = self.cmd('{0} -h | grep {1}'.format( - daemon_path, daemon_search_option)) + daemon_search_option = param.replace("-", "") + output = self.cmd( + "{0} -h | grep {1}".format(daemon_path, daemon_search_option) + ) if daemon_search_option not in output: return False return True @@ -839,74 +935,89 @@ class Router(Node): if param is not None: self.daemons_options[daemon] = param if source is None: - self.cmd('touch /etc/%s/%s.conf' % (self.routertype, daemon)) + self.cmd("touch /etc/%s/%s.conf" % (self.routertype, daemon)) self.waitOutput() else: - self.cmd('cp %s /etc/%s/%s.conf' % (source, self.routertype, daemon)) + self.cmd("cp %s /etc/%s/%s.conf" % (source, self.routertype, daemon)) self.waitOutput() - self.cmd('chmod 640 /etc/%s/%s.conf' % (self.routertype, daemon)) + self.cmd("chmod 640 /etc/%s/%s.conf" % (self.routertype, daemon)) self.waitOutput() - self.cmd('chown %s:%s /etc/%s/%s.conf' % (self.routertype, self.routertype, self.routertype, daemon)) + self.cmd( + "chown %s:%s /etc/%s/%s.conf" + % (self.routertype, self.routertype, self.routertype, daemon) + ) self.waitOutput() - if (daemon == 'zebra') and (self.daemons['staticd'] == 0): + if (daemon == "zebra") and (self.daemons["staticd"] == 0): # Add staticd with zebra - if it exists - staticd_path = os.path.join(self.daemondir, 'staticd') + staticd_path = os.path.join(self.daemondir, "staticd") if os.path.isfile(staticd_path): - self.daemons['staticd'] = 1 - self.daemons_options['staticd'] = '' + self.daemons["staticd"] = 1 + self.daemons_options["staticd"] = "" # Auto-Started staticd has no config, so it will read from zebra config else: - logger.info('No daemon {} known'.format(daemon)) + logger.info("No daemon {} known".format(daemon)) # print "Daemons after:", self.daemons def startRouter(self, tgen=None): # Disable integrated-vtysh-config - self.cmd('echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf' % self.routertype) - self.cmd('chown %s:%svty /etc/%s/vtysh.conf' % (self.routertype, self.routertype, self.routertype)) + self.cmd( + 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf' + % self.routertype + ) + self.cmd( + "chown %s:%svty /etc/%s/vtysh.conf" + % (self.routertype, self.routertype, self.routertype) + ) # TODO remove the following lines after all tests are migrated to Topogen. # Try to find relevant old logfiles in /tmp and delete them - map(os.remove, glob.glob('{}/{}/*.log'.format(self.logdir, self.name))) + map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name))) # Remove old core files - map(os.remove, glob.glob('{}/{}/*.dmp'.format(self.logdir, self.name))) + map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name))) # Remove IP addresses from OS first - we have them in zebra.conf self.removeIPs() # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher # No error - but return message and skip all the tests - if self.daemons['ldpd'] == 1: - ldpd_path = os.path.join(self.daemondir, 'ldpd') + if self.daemons["ldpd"] == 1: + ldpd_path = os.path.join(self.daemondir, "ldpd") if not os.path.isfile(ldpd_path): logger.info("LDP Test, but no ldpd compiled or installed") return "LDP Test, but no ldpd compiled or installed" - if version_cmp(platform.release(), '4.5') < 0: + if version_cmp(platform.release(), "4.5") < 0: logger.info("LDP Test need Linux Kernel 4.5 minimum") return "LDP Test need Linux Kernel 4.5 minimum" # Check if have mpls if tgen != None: self.hasmpls = tgen.hasmpls if self.hasmpls != True: - logger.info("LDP/MPLS Tests will be skipped, platform missing module(s)") + logger.info( + "LDP/MPLS Tests will be skipped, platform missing module(s)" + ) else: # Test for MPLS Kernel modules available self.hasmpls = False - if not module_present('mpls-router'): - logger.info('MPLS tests will not run (missing mpls-router kernel module)') - elif not module_present('mpls-iptunnel'): - logger.info('MPLS tests will not run (missing mpls-iptunnel kernel module)') + if not module_present("mpls-router"): + logger.info( + "MPLS tests will not run (missing mpls-router kernel module)" + ) + elif not module_present("mpls-iptunnel"): + logger.info( + "MPLS tests will not run (missing mpls-iptunnel kernel module)" + ) else: self.hasmpls = True if self.hasmpls != True: return "LDP/MPLS Tests need mpls kernel modules" - self.cmd('echo 100000 > /proc/sys/net/mpls/platform_labels') + self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels") - if self.daemons['eigrpd'] == 1: - eigrpd_path = os.path.join(self.daemondir, 'eigrpd') + if self.daemons["eigrpd"] == 1: + eigrpd_path = os.path.join(self.daemondir, "eigrpd") if not os.path.isfile(eigrpd_path): logger.info("EIGRP Test, but no eigrpd compiled or installed") return "EIGRP Test, but no eigrpd compiled or installed" - if self.daemons['bfdd'] == 1: - bfdd_path = os.path.join(self.daemondir, 'bfdd') + if self.daemons["bfdd"] == 1: + bfdd_path = os.path.join(self.daemondir, "bfdd") if not os.path.isfile(bfdd_path): logger.info("BFD Test, but no bfdd compiled or installed") return "BFD Test, but no bfdd compiled or installed" @@ -917,52 +1028,65 @@ class Router(Node): def restartRouter(self): # Starts actual daemons without init (ie restart) # cd to per node directory - self.cmd('cd {}/{}'.format(self.logdir, self.name)) - self.cmd('umask 000') - #Re-enable to allow for report per run + self.cmd("cd {}/{}".format(self.logdir, self.name)) + self.cmd("umask 000") + # Re-enable to allow for report per run self.reportCores = True if self.version == None: - self.version = self.cmd(os.path.join(self.daemondir, 'bgpd')+' -v').split()[2] - logger.info('{}: running version: {}'.format(self.name,self.version)) + self.version = self.cmd( + os.path.join(self.daemondir, "bgpd") + " -v" + ).split()[2] + logger.info("{}: running version: {}".format(self.name, self.version)) # Start Zebra first - if self.daemons['zebra'] == 1: - zebra_path = os.path.join(self.daemondir, 'zebra') - zebra_option = self.daemons_options['zebra'] - self.cmd('{0} {1} > zebra.out 2> zebra.err &'.format( - zebra_path, zebra_option, self.logdir, self.name - )) + if self.daemons["zebra"] == 1: + zebra_path = os.path.join(self.daemondir, "zebra") + zebra_option = self.daemons_options["zebra"] + self.cmd( + "{0} {1} > zebra.out 2> zebra.err &".format( + zebra_path, zebra_option, self.logdir, self.name + ) + ) self.waitOutput() - logger.debug('{}: {} zebra started'.format(self, self.routertype)) - sleep(1, '{}: waiting for zebra to start'.format(self.name)) + logger.debug("{}: {} zebra started".format(self, self.routertype)) + sleep(1, "{}: waiting for zebra to start".format(self.name)) # Start staticd next if required - if self.daemons['staticd'] == 1: - staticd_path = os.path.join(self.daemondir, 'staticd') - staticd_option = self.daemons_options['staticd'] - self.cmd('{0} {1} > staticd.out 2> staticd.err &'.format( - staticd_path, staticd_option, self.logdir, self.name - )) + if self.daemons["staticd"] == 1: + staticd_path = os.path.join(self.daemondir, "staticd") + staticd_option = self.daemons_options["staticd"] + self.cmd( + "{0} {1} > staticd.out 2> staticd.err &".format( + staticd_path, staticd_option, self.logdir, self.name + ) + ) self.waitOutput() - logger.debug('{}: {} staticd started'.format(self, self.routertype)) - # Fix Link-Local Addresses + logger.debug("{}: {} staticd started".format(self, self.routertype)) + # Fix Link-Local Addresses # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this - self.cmd('for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS=\':\'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done') + self.cmd( + "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done" + ) # Now start all the other daemons for daemon in self.daemons: # Skip disabled daemons and zebra - if self.daemons[daemon] == 0 or daemon == 'zebra' or daemon == 'staticd': + if self.daemons[daemon] == 0 or daemon == "zebra" or daemon == "staticd": continue daemon_path = os.path.join(self.daemondir, daemon) - self.cmd('{0} {1} > {2}.out 2> {2}.err &'.format( - daemon_path, self.daemons_options.get(daemon, ''), daemon - )) + self.cmd( + "{0} {1} > {2}.out 2> {2}.err &".format( + daemon_path, self.daemons_options.get(daemon, ""), daemon + ) + ) self.waitOutput() - logger.debug('{}: {} {} started'.format(self, self.routertype, daemon)) + logger.debug("{}: {} {} started".format(self, self.routertype, daemon)) + def getStdErr(self, daemon): - return self.getLog('err', daemon) + return self.getLog("err", daemon) + def getStdOut(self, daemon): - return self.getLog('out', daemon) + return self.getLog("out", daemon) + def getLog(self, log, daemon): - return self.cmd('cat {}/{}/{}.{}'.format(self.logdir, self.name, daemon, log)) + return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log)) def checkRouterCores(self, reportLeaks=True, reportOnce=False): if reportOnce and not self.reportCores: @@ -970,33 +1094,62 @@ class Router(Node): reportMade = False traces = "" for daemon in self.daemons: - if (self.daemons[daemon] == 1): + if self.daemons[daemon] == 1: # Look for core file - corefiles = glob.glob('{}/{}/{}_core*.dmp'.format( - self.logdir, self.name, daemon)) - if (len(corefiles) > 0): + corefiles = glob.glob( + "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon) + ) + if len(corefiles) > 0: daemon_path = os.path.join(self.daemondir, daemon) - backtrace = subprocess.check_output([ - "gdb {} {} --batch -ex bt 2> /dev/null".format(daemon_path, corefiles[0]) - ], shell=True) - sys.stderr.write("\n%s: %s crashed. Core file found - Backtrace follows:\n" % (self.name, daemon)) + backtrace = subprocess.check_output( + [ + "gdb {} {} --batch -ex bt 2> /dev/null".format( + daemon_path, corefiles[0] + ) + ], + shell=True, + ) + sys.stderr.write( + "\n%s: %s crashed. Core file found - Backtrace follows:\n" + % (self.name, daemon) + ) sys.stderr.write("%s" % backtrace) - traces = traces + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s" % (self.name, daemon, backtrace) + traces = ( + traces + + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s" + % (self.name, daemon, backtrace) + ) reportMade = True elif reportLeaks: log = self.getStdErr(daemon) if "memstats" in log: - sys.stderr.write("%s: %s has memory leaks:\n" % (self.name, daemon)) - traces = traces + "\n%s: %s has memory leaks:\n" % (self.name, daemon) + sys.stderr.write( + "%s: %s has memory leaks:\n" % (self.name, daemon) + ) + traces = traces + "\n%s: %s has memory leaks:\n" % ( + self.name, + daemon, + ) log = re.sub("core_handler: ", "", log) - log = re.sub(r"(showing active allocations in memory group [a-zA-Z0-9]+)", r"\n ## \1", log) + log = re.sub( + r"(showing active allocations in memory group [a-zA-Z0-9]+)", + r"\n ## \1", + log, + ) log = re.sub("memstats: ", " ", log) sys.stderr.write(log) reportMade = True # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found - if checkAddressSanitizerError(self.getStdErr(daemon), self.name, daemon): - sys.stderr.write("%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon)) - traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon) + if checkAddressSanitizerError( + self.getStdErr(daemon), self.name, daemon + ): + sys.stderr.write( + "%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon) + ) + traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % ( + self.name, + daemon, + ) reportMade = True if reportMade: self.reportCores = False @@ -1007,7 +1160,9 @@ class Router(Node): global fatal_error - daemonsRunning = self.cmd('vtysh -c "show logging" | grep "Logging configuration for"') + daemonsRunning = self.cmd( + 'vtysh -c "show logging" | grep "Logging configuration for"' + ) # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"): return "%s: vtysh killed by AddressSanitizer" % (self.name) @@ -1016,32 +1171,59 @@ class Router(Node): if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning): sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon)) if daemon is "staticd": - sys.stderr.write("You may have a copy of staticd installed but are attempting to test against\n") - sys.stderr.write("a version of FRR that does not have staticd, please cleanup the install dir\n") + sys.stderr.write( + "You may have a copy of staticd installed but are attempting to test against\n" + ) + sys.stderr.write( + "a version of FRR that does not have staticd, please cleanup the install dir\n" + ) # Look for core file - corefiles = glob.glob('{}/{}/{}_core*.dmp'.format( - self.logdir, self.name, daemon)) - if (len(corefiles) > 0): + corefiles = glob.glob( + "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon) + ) + if len(corefiles) > 0: daemon_path = os.path.join(self.daemondir, daemon) - backtrace = subprocess.check_output([ - "gdb {} {} --batch -ex bt 2> /dev/null".format(daemon_path, corefiles[0]) - ], shell=True) - sys.stderr.write("\n%s: %s crashed. Core file found - Backtrace follows:\n" % (self.name, daemon)) + backtrace = subprocess.check_output( + [ + "gdb {} {} --batch -ex bt 2> /dev/null".format( + daemon_path, corefiles[0] + ) + ], + shell=True, + ) + sys.stderr.write( + "\n%s: %s crashed. Core file found - Backtrace follows:\n" + % (self.name, daemon) + ) sys.stderr.write("%s\n" % backtrace) else: # No core found - If we find matching logfile in /tmp, then print last 20 lines from it. - if os.path.isfile('{}/{}/{}.log'.format(self.logdir, self.name, daemon)): - log_tail = subprocess.check_output([ - "tail -n20 {}/{}/{}.log 2> /dev/null".format( - self.logdir, self.name, daemon) - ], shell=True) - sys.stderr.write("\nFrom %s %s %s log file:\n" % (self.routertype, self.name, daemon)) + if os.path.isfile( + "{}/{}/{}.log".format(self.logdir, self.name, daemon) + ): + log_tail = subprocess.check_output( + [ + "tail -n20 {}/{}/{}.log 2> /dev/null".format( + self.logdir, self.name, daemon + ) + ], + shell=True, + ) + sys.stderr.write( + "\nFrom %s %s %s log file:\n" + % (self.routertype, self.name, daemon) + ) sys.stderr.write("%s\n" % log_tail) # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found - if checkAddressSanitizerError(self.getStdErr(daemon), self.name, daemon): - return "%s: Daemon %s not running - killed by AddressSanitizer" % (self.name, daemon) + if checkAddressSanitizerError( + self.getStdErr(daemon), self.name, daemon + ): + return "%s: Daemon %s not running - killed by AddressSanitizer" % ( + self.name, + daemon, + ) return "%s: Daemon %s not running" % (self.name, daemon) return "" @@ -1061,25 +1243,27 @@ class Router(Node): # Make sure we have version information first if self.version == None: - self.version = self.cmd(os.path.join(self.daemondir, 'bgpd')+' -v').split()[2] - logger.info('{}: running version: {}'.format(self.name,self.version)) + self.version = self.cmd( + os.path.join(self.daemondir, "bgpd") + " -v" + ).split()[2] + logger.info("{}: running version: {}".format(self.name, self.version)) rversion = self.version if rversion is None: return False result = version_cmp(rversion, version) - if cmpop == '>=': + if cmpop == ">=": return result >= 0 - if cmpop == '>': + if cmpop == ">": return result > 0 - if cmpop == '=': + if cmpop == "=": return result == 0 - if cmpop == '<': + if cmpop == "<": return result < 0 - if cmpop == '<': + if cmpop == "<": return result < 0 - if cmpop == '<=': + if cmpop == "<=": return result <= 0 def get_ipv6_linklocal(self): @@ -1087,37 +1271,41 @@ class Router(Node): linklocal = [] - ifaces = self.cmd('ip -6 address') + ifaces = self.cmd("ip -6 address") # Fix newlines (make them all the same) - ifaces = ('\n'.join(ifaces.splitlines()) + '\n').splitlines() - interface="" - ll_per_if_count=0 + ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines() + interface = "" + ll_per_if_count = 0 for line in ifaces: - m = re.search('[0-9]+: ([^:@]+)[@if0-9:]+ <', line) + m = re.search("[0-9]+: ([^:@]+)[@if0-9:]+ <", line) if m: interface = m.group(1) ll_per_if_count = 0 - m = re.search('inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link', line) + m = re.search( + "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link", + line, + ) if m: local = m.group(1) ll_per_if_count += 1 - if (ll_per_if_count > 1): + if ll_per_if_count > 1: linklocal += [["%s-%s" % (interface, ll_per_if_count), local]] else: linklocal += [[interface, local]] return linklocal + def daemon_available(self, daemon): "Check if specified daemon is installed (and for ldp if kernel supports MPLS)" daemon_path = os.path.join(self.daemondir, daemon) if not os.path.isfile(daemon_path): return False - if (daemon == 'ldpd'): - if version_cmp(platform.release(), '4.5') < 0: + if daemon == "ldpd": + if version_cmp(platform.release(), "4.5") < 0: return False - if not module_present('mpls-router', load=False): + if not module_present("mpls-router", load=False): return False - if not module_present('mpls-iptunnel', load=False): + if not module_present("mpls-iptunnel", load=False): return False return True @@ -1125,18 +1313,20 @@ class Router(Node): "Return the type of Router (frr or quagga)" return self.routertype + def report_memory_leaks(self, filename_prefix, testscript): "Report Memory Leaks to file prefixed with given string" leakfound = False filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt" for daemon in self.daemons: - if (self.daemons[daemon] == 1): + if self.daemons[daemon] == 1: log = self.getStdErr(daemon) if "memstats" in log: # Found memory leak - logger.info('\nRouter {} {} StdErr Log:\n{}'.format( - self.name, daemon, log)) + logger.info( + "\nRouter {} {} StdErr Log:\n{}".format(self.name, daemon, log) + ) if not leakfound: leakfound = True # Check if file already exists @@ -1144,17 +1334,25 @@ class Router(Node): leakfile = open(filename, "a") if not fileexists: # New file - add header - leakfile.write("# Memory Leak Detection for topotest %s\n\n" % testscript) + leakfile.write( + "# Memory Leak Detection for topotest %s\n\n" + % testscript + ) leakfile.write("## Router %s\n" % self.name) leakfile.write("### Process %s\n" % daemon) log = re.sub("core_handler: ", "", log) - log = re.sub(r"(showing active allocations in memory group [a-zA-Z0-9]+)", r"\n#### \1\n", log) + log = re.sub( + r"(showing active allocations in memory group [a-zA-Z0-9]+)", + r"\n#### \1\n", + log, + ) log = re.sub("memstats: ", " ", log) leakfile.write(log) leakfile.write("\n") if leakfound: leakfile.close() + class LinuxRouter(Router): "A Linux Router Node with IPv4/IPv6 forwarding enabled." @@ -1164,25 +1362,26 @@ class LinuxRouter(Router): def config(self, **params): Router.config(self, **params) # Enable forwarding on the router - assert_sysctl(self, 'net.ipv4.ip_forward', 1) - assert_sysctl(self, 'net.ipv6.conf.all.forwarding', 1) + assert_sysctl(self, "net.ipv4.ip_forward", 1) + assert_sysctl(self, "net.ipv6.conf.all.forwarding", 1) # Enable coredumps - assert_sysctl(self, 'kernel.core_uses_pid', 1) - assert_sysctl(self, 'fs.suid_dumpable', 1) - #this applies to the kernel not the namespace... - #original on ubuntu 17.x, but apport won't save as in namespace + assert_sysctl(self, "kernel.core_uses_pid", 1) + assert_sysctl(self, "fs.suid_dumpable", 1) + # this applies to the kernel not the namespace... + # original on ubuntu 17.x, but apport won't save as in namespace # |/usr/share/apport/apport %p %s %c %d %P - corefile = '%e_core-sig_%s-pid_%p.dmp' - assert_sysctl(self, 'kernel.core_pattern', corefile) + corefile = "%e_core-sig_%s-pid_%p.dmp" + assert_sysctl(self, "kernel.core_pattern", corefile) def terminate(self): """ Terminate generic LinuxRouter Mininet instance """ - set_sysctl(self, 'net.ipv4.ip_forward', 0) - set_sysctl(self, 'net.ipv6.conf.all.forwarding', 0) + set_sysctl(self, "net.ipv4.ip_forward", 0) + set_sysctl(self, "net.ipv6.conf.all.forwarding", 0) Router.terminate(self) + class FreeBSDRouter(Router): "A FreeBSD Router Node with IPv4/IPv6 forwarding enabled." @@ -1194,5 +1393,5 @@ class LegacySwitch(OVSSwitch): "A Legacy Switch without OpenFlow" def __init__(self, name, **params): - OVSSwitch.__init__(self, name, failMode='standalone', **params) + OVSSwitch.__init__(self, name, failMode="standalone", **params) self.switchIP = None diff --git a/tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py b/tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py index 56cd42ea57..92cebfe0b6 100755 --- a/tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py +++ b/tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py @@ -32,53 +32,56 @@ from functools import partial # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Required to instantiate the topology builder class. from mininet.topo import Topo + # Import topogen and topotest helpers from lib import topotest from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger + # and Finally pytest import pytest class OspfSrTopo(Topo): "Test topology builder" + def build(self): "Build function" tgen = get_topogen(self) # Check for mpls if tgen.hasmpls is not True: - tgen.set_error('MPLS not available, tests will be skipped') + tgen.set_error("MPLS not available, tests will be skipped") # Create 4 routers for routern in range(1, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # Interconect router 1 and 2 - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) # Interconect router 3 and 2 - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r3']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r2"]) # Interconect router 4 and 2 - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r4']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r4"]) + switch.add_link(tgen.gears["r2"]) def setup_module(mod): "Sets up the pytest environment" - logger.info('\n\n---- Starting OSPF Segment Routing tests ----\n') + logger.info("\n\n---- Starting OSPF Segment Routing tests ----\n") tgen = Topogen(OspfSrTopo, mod.__name__) tgen.start_topology() @@ -87,12 +90,10 @@ def setup_module(mod): for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_OSPF, - os.path.join(CWD, '{}/ospfd.conf'.format(rname)) + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) ) # Initialize all routers. @@ -101,14 +102,15 @@ def setup_module(mod): # Verify that version, MPLS and Segment Routing are OK for router in router_list.values(): # Check for Version - if router.has_version('<', '4'): - tgen.set_error('Unsupported FRR version') + if router.has_version("<", "4"): + tgen.set_error("Unsupported FRR version") break # Check that Segment Routing is available output = tgen.gears[router.name].vtysh_cmd( - "show ip ospf database segment-routing json") + "show ip ospf database segment-routing json" + ) if output.find("Unknown") != -1: - tgen.set_error('Segment Routing is not available') + tgen.set_error("Segment Routing is not available") def teardown_module(mod): @@ -117,7 +119,8 @@ def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() - logger.info('\n\n---- OSPF Segment Routing tests End ----\n') + logger.info("\n\n---- OSPF Segment Routing tests End ----\n") + # Shared test function to validate expected output. def compare_ospf_srdb(rname, expected): @@ -126,11 +129,10 @@ def compare_ospf_srdb(rname, expected): and compare the obtained result with the expected output. """ tgen = get_topogen() - current = tgen.gears[rname].vtysh_cmd( - 'show ip ospf database segment-routing json') - return topotest.difflines(current, expected, - title1="Current output", - title2="Expected output") + current = tgen.gears[rname].vtysh_cmd("show ip ospf database segment-routing json") + return topotest.difflines( + current, expected, title1="Current output", title2="Expected output" + ) def compare_mpls_table(rname, expected): @@ -139,10 +141,10 @@ def compare_mpls_table(rname, expected): result with the expected output. """ tgen = get_topogen() - current = tgen.gears[rname].vtysh_cmd('show mpls table json') - return topotest.difflines(current, expected, - title1="Current output", - title2="Expected output") + current = tgen.gears[rname].vtysh_cmd("show mpls table json") + return topotest.difflines( + current, expected, title1="Current output", title2="Expected output" + ) def test_ospf_sr(): @@ -151,24 +153,23 @@ def test_ospf_sr(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('--- test OSPF Segment Routing Data Base ---') + logger.info("--- test OSPF Segment Routing Data Base ---") for rnum in range(1, 5): - router = 'r{}'.format(rnum) + router = "r{}".format(rnum) logger.info('\tRouter "%s"', router) # Load expected results from the command - reffile = os.path.join(CWD, '{}/ospf_srdb.json'.format(router)) + reffile = os.path.join(CWD, "{}/ospf_srdb.json".format(router)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 60 seconds. test_func = partial(compare_ospf_srdb, router, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=25, wait=3) - assert result, ( - 'OSPF did not start Segment Routing on {}:\n{}' - ).format(router, diff) + result, diff = topotest.run_and_expect(test_func, "", count=25, wait=3) + assert result, ("OSPF did not start Segment Routing on {}:\n{}").format( + router, diff + ) def test_ospf_kernel_route(): @@ -177,34 +178,34 @@ def test_ospf_kernel_route(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('--- test OSPF Segment Routing MPLS tables ---') + logger.info("--- test OSPF Segment Routing MPLS tables ---") for rnum in range(1, 5): - router = 'r{}'.format(rnum) + router = "r{}".format(rnum) logger.info('\tRouter "%s"', router) # Load expected results from the command - reffile = os.path.join(CWD, '{}/zebra_mpls.json'.format(router)) + reffile = os.path.join(CWD, "{}/zebra_mpls.json".format(router)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 60 seconds. test_func = partial(compare_mpls_table, router, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=25, wait=3) - assert result, ( - 'OSPF did not properly instal MPLS table on {}:\n{}' - ).format(router, diff) + result, diff = topotest.run_and_expect(test_func, "", count=25, wait=3) + assert result, ("OSPF did not properly instal MPLS table on {}:\n{}").format( + router, diff + ) def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospf-topo1-vrf/r1/zebraroute.txt b/tests/topotests/ospf-topo1-vrf/r1/zebraroute.txt index 973db543fa..d72aa3b8e5 100644 --- a/tests/topotests/ospf-topo1-vrf/r1/zebraroute.txt +++ b/tests/topotests/ospf-topo1-vrf/r1/zebraroute.txt @@ -1,8 +1,8 @@ VRF r1-cust1: -O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, XX:XX:XX +O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX C>* 10.0.1.0/24 is directly connected, r1-eth0, XX:XX:XX -O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r1-eth1, XX:XX:XX -O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, XX:XX:XX +O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r1-eth1, weight 1, XX:XX:XX +O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, weight 1, XX:XX:XX C>* 10.0.3.0/24 is directly connected, r1-eth1, XX:XX:XX -O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r1-eth1, XX:XX:XX +O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r1-eth1, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf-topo1-vrf/r1/zebraroutedown.txt b/tests/topotests/ospf-topo1-vrf/r1/zebraroutedown.txt index 7bdccd0909..5ea6bdc04d 100644 --- a/tests/topotests/ospf-topo1-vrf/r1/zebraroutedown.txt +++ b/tests/topotests/ospf-topo1-vrf/r1/zebraroutedown.txt @@ -1,7 +1,7 @@ VRF r1-cust1: -O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, XX:XX:XX +O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX C>* 10.0.1.0/24 is directly connected, r1-eth0, XX:XX:XX -O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r1-eth1, XX:XX:XX -O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, XX:XX:XX +O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r1-eth1, weight 1, XX:XX:XX +O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, weight 1, XX:XX:XX C>* 10.0.3.0/24 is directly connected, r1-eth1, XX:XX:XX diff --git a/tests/topotests/ospf-topo1-vrf/r2/zebraroute.txt b/tests/topotests/ospf-topo1-vrf/r2/zebraroute.txt index 2916cb9274..ce5e5f3bab 100644 --- a/tests/topotests/ospf-topo1-vrf/r2/zebraroute.txt +++ b/tests/topotests/ospf-topo1-vrf/r2/zebraroute.txt @@ -1,8 +1,8 @@ VRF r2-cust1: -O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, XX:XX:XX -O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, XX:XX:XX +O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, weight 1, XX:XX:XX +O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX C>* 10.0.2.0/24 is directly connected, r2-eth0, XX:XX:XX -O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, XX:XX:XX +O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, weight 1, XX:XX:XX C>* 10.0.3.0/24 is directly connected, r2-eth1, XX:XX:XX -O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r2-eth1, XX:XX:XX +O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r2-eth1, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf-topo1-vrf/r2/zebraroutedown.txt b/tests/topotests/ospf-topo1-vrf/r2/zebraroutedown.txt index ccaf9abc31..157811ec77 100644 --- a/tests/topotests/ospf-topo1-vrf/r2/zebraroutedown.txt +++ b/tests/topotests/ospf-topo1-vrf/r2/zebraroutedown.txt @@ -1,7 +1,7 @@ VRF r2-cust1: -O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, XX:XX:XX -O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, XX:XX:XX +O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, weight 1, XX:XX:XX +O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX C>* 10.0.2.0/24 is directly connected, r2-eth0, XX:XX:XX -O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, XX:XX:XX +O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, weight 1, XX:XX:XX C>* 10.0.3.0/24 is directly connected, r2-eth1, XX:XX:XX diff --git a/tests/topotests/ospf-topo1-vrf/r3/zebraroute.txt b/tests/topotests/ospf-topo1-vrf/r3/zebraroute.txt index 70eae0a9fb..f40b7b09af 100644 --- a/tests/topotests/ospf-topo1-vrf/r3/zebraroute.txt +++ b/tests/topotests/ospf-topo1-vrf/r3/zebraroute.txt @@ -1,8 +1,8 @@ VRF r3-cust1: -O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r3-eth0, XX:XX:XX -O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r3-eth0, XX:XX:XX -O 10.0.3.0/24 [110/10] is directly connected, r3-eth0, XX:XX:XX +O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r3-eth0, weight 1, XX:XX:XX +O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r3-eth0, weight 1, XX:XX:XX +O 10.0.3.0/24 [110/10] is directly connected, r3-eth0, weight 1, XX:XX:XX C>* 10.0.3.0/24 is directly connected, r3-eth0, XX:XX:XX -O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, XX:XX:XX +O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, weight 1, XX:XX:XX C>* 10.0.10.0/24 is directly connected, r3-eth1, XX:XX:XX diff --git a/tests/topotests/ospf-topo1-vrf/r3/zebraroutedown.txt b/tests/topotests/ospf-topo1-vrf/r3/zebraroutedown.txt index 6d54782eff..89cd6f56c4 100644 --- a/tests/topotests/ospf-topo1-vrf/r3/zebraroutedown.txt +++ b/tests/topotests/ospf-topo1-vrf/r3/zebraroutedown.txt @@ -1,4 +1,4 @@ VRF r3-cust1: -O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, XX:XX:XX +O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, weight 1, XX:XX:XX C>* 10.0.10.0/24 is directly connected, r3-eth1, XX:XX:XX diff --git a/tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py b/tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py index fc4854454c..130d0c85f9 100755 --- a/tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py +++ b/tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py @@ -34,7 +34,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -45,33 +45,35 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class OSPFTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 3 routers for routern in range(1, 4): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # Create a empty network for router 1 - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) # Create a empty network for router 2 - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) # Interconect router 1, 2 and 3 - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) # Create empty netowrk for router3 - switch = tgen.add_switch('s4') - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["r3"]) def setup_module(mod): @@ -83,23 +85,26 @@ def setup_module(mod): # check for zebra capability for rname, router in router_list.iteritems(): - if router.check_capability( - TopoRouter.RD_ZEBRA, - '--vrfwnetns' - ) == False: - return pytest.skip('Skipping OSPF VRF NETNS feature. VRF NETNS backend not available on FRR') - - if os.system('ip netns list') != 0: - return pytest.skip('Skipping OSPF VRF NETNS Test. NETNS not available on System') + if router.check_capability(TopoRouter.RD_ZEBRA, "--vrfwnetns") == False: + return pytest.skip( + "Skipping OSPF VRF NETNS feature. VRF NETNS backend not available on FRR" + ) + + if os.system("ip netns list") != 0: + return pytest.skip( + "Skipping OSPF VRF NETNS Test. NETNS not available on System" + ) - logger.info('Testing with VRF Namespace support') + logger.info("Testing with VRF Namespace support") - cmds = ['if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi', - 'ip netns add {0}-cust1', - 'ip link set dev {0}-eth0 netns {0}-cust1', - 'ip netns exec {0}-cust1 ifconfig {0}-eth0 up', - 'ip link set dev {0}-eth1 netns {0}-cust1', - 'ip netns exec {0}-cust1 ifconfig {0}-eth1 up'] + cmds = [ + "if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi", + "ip netns add {0}-cust1", + "ip link set dev {0}-eth0 netns {0}-cust1", + "ip netns exec {0}-cust1 ifconfig {0}-eth0 up", + "ip link set dev {0}-eth1 netns {0}-cust1", + "ip netns exec {0}-cust1 ifconfig {0}-eth1 up", + ] for rname, router in router_list.iteritems(): @@ -109,19 +114,18 @@ def setup_module(mod): router.load_config( TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)), - '--vrfwnetns' + os.path.join(CWD, "{}/zebra.conf".format(rname)), + "--vrfwnetns", ) router.load_config( - TopoRouter.RD_OSPF, - os.path.join(CWD, '{}/ospfd.conf'.format(rname)) + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) ) # Initialize all routers. tgen.start_router() for router in router_list.values(): - if router.has_version('<', '4.0'): - tgen.set_error('unsupported version') + if router.has_version("<", "4.0"): + tgen.set_error("unsupported version") def teardown_module(mod): @@ -130,16 +134,19 @@ def teardown_module(mod): # move back rx-eth0 to default VRF # delete rx-vrf - cmds = ['ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1', - 'ip netns exec {0}-cust1 ip link set {0}-eth1 netns 1', - 'ip netns delete {0}-cust1'] - + cmds = [ + "ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1", + "ip netns exec {0}-cust1 ip link set {0}-eth1 netns 1", + "ip netns delete {0}-cust1", + ] + router_list = tgen.routers() for rname, router in router_list.iteritems(): for cmd in cmds: tgen.net[rname].cmd(cmd.format(rname)) tgen.stop_topology() + # Shared test function to validate expected output. def compare_show_ip_route_vrf(rname, expected): """ @@ -147,35 +154,37 @@ def compare_show_ip_route_vrf(rname, expected): result with the expected output. """ tgen = get_topogen() - vrf_name = '{0}-cust1'.format(rname) + vrf_name = "{0}-cust1".format(rname) current = topotest.ip4_route_zebra(tgen.gears[rname], vrf_name) - ret = topotest.difflines(current, expected, - title1="Current output", - title2="Expected output") + ret = topotest.difflines( + current, expected, title1="Current output", title2="Expected output" + ) return ret + def test_ospf_convergence(): "Test OSPF daemon convergence" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") for rname, router in tgen.routers().iteritems(): logger.info('Waiting for router "%s" convergence', rname) # Load expected results from the command - reffile = os.path.join(CWD, '{}/ospfroute.txt'.format(rname)) + reffile = os.path.join(CWD, "{}/ospfroute.txt".format(rname)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 60 seconds. - test_func = partial(topotest.router_output_cmp, - router, - 'show ip ospf vrf {0}-cust1 route'.format(rname), - expected) - result, diff = topotest.run_and_expect(test_func, '', - count=160, wait=0.5) - assertmsg = 'OSPF did not converge on {}:\n{}'.format(rname, diff) + test_func = partial( + topotest.router_output_cmp, + router, + "show ip ospf vrf {0}-cust1 route".format(rname), + expected, + ) + result, diff = topotest.run_and_expect(test_func, "", count=160, wait=0.5) + assertmsg = "OSPF did not converge on {}:\n{}".format(rname, diff) assert result, assertmsg @@ -184,19 +193,19 @@ def test_ospf_kernel_route(): tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") rlist = tgen.routers().values() for router in rlist: logger.info('Checking OSPF IPv4 kernel routes in "%s"', router.name) - reffile = os.path.join(CWD, '{}/zebraroute.txt'.format(router.name)) + reffile = os.path.join(CWD, "{}/zebraroute.txt".format(router.name)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 60 seconds. test_func = partial(compare_show_ip_route_vrf, router.name, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=140, wait=0.5) + result, diff = topotest.run_and_expect(test_func, "", count=140, wait=0.5) assertmsg = 'OSPF IPv4 route mismatch in router "{}": {}'.format( - router.name, diff) + router.name, diff + ) assert result, assertmsg @@ -205,52 +214,57 @@ def test_ospf_json(): tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") for rname, router in tgen.routers().iteritems(): - logger.info('Comparing router "%s" "show ip ospf vrf %s-cust1 json" output', router.name, router.name) + logger.info( + 'Comparing router "%s" "show ip ospf vrf %s-cust1 json" output', + router.name, + router.name, + ) expected = { - '{}-cust1'.format(router.name) : { - 'vrfName': '{}-cust1'.format(router.name), - 'routerId': '10.0.255.{}'.format(rname[1:]), - 'tosRoutesOnly': True, - 'rfc2328Conform': True, - 'spfScheduleDelayMsecs': 0, - 'holdtimeMinMsecs': 50, - 'holdtimeMaxMsecs': 5000, - 'lsaMinIntervalMsecs': 5000, - 'lsaMinArrivalMsecs': 1000, - 'writeMultiplier': 20, - 'refreshTimerMsecs': 10000, - 'asbrRouter': 'injectingExternalRoutingInformation', - 'attachedAreaCounter': 1, - 'areas': {} - } + "{}-cust1".format(router.name): { + "vrfName": "{}-cust1".format(router.name), + "routerId": "10.0.255.{}".format(rname[1:]), + "tosRoutesOnly": True, + "rfc2328Conform": True, + "spfScheduleDelayMsecs": 0, + "holdtimeMinMsecs": 50, + "holdtimeMaxMsecs": 5000, + "lsaMinIntervalMsecs": 5000, + "lsaMinArrivalMsecs": 1000, + "writeMultiplier": 20, + "refreshTimerMsecs": 10000, + "asbrRouter": "injectingExternalRoutingInformation", + "attachedAreaCounter": 1, + "areas": {}, } + } # Area specific additional checks - if router.name == 'r1' or router.name == 'r2' or router.name == 'r3': - expected['{}-cust1'.format(router.name)]['areas']['0.0.0.0'] = { - 'areaIfActiveCounter': 2, - 'areaIfTotalCounter': 2, - 'authentication': 'authenticationNone', - 'backbone': True, - 'lsaAsbrNumber': 0, - 'lsaNetworkNumber': 1, - 'lsaNssaNumber': 0, - 'lsaNumber': 4, - 'lsaOpaqueAreaNumber': 0, - 'lsaOpaqueLinkNumber': 0, - 'lsaRouterNumber': 3, - 'lsaSummaryNumber': 0, - 'nbrFullAdjacentCounter': 2, + if router.name == "r1" or router.name == "r2" or router.name == "r3": + expected["{}-cust1".format(router.name)]["areas"]["0.0.0.0"] = { + "areaIfActiveCounter": 2, + "areaIfTotalCounter": 2, + "authentication": "authenticationNone", + "backbone": True, + "lsaAsbrNumber": 0, + "lsaNetworkNumber": 1, + "lsaNssaNumber": 0, + "lsaNumber": 4, + "lsaOpaqueAreaNumber": 0, + "lsaOpaqueLinkNumber": 0, + "lsaRouterNumber": 3, + "lsaSummaryNumber": 0, + "nbrFullAdjacentCounter": 2, } - test_func = partial(topotest.router_json_cmp, - router, - 'show ip ospf vrf {0}-cust1 json'.format(rname), - expected) - _, diff = topotest.run_and_expect(test_func, None, - count=10, wait=0.5) + test_func = partial( + topotest.router_json_cmp, + router, + "show ip ospf vrf {0}-cust1 json".format(rname), + expected, + ) + _, diff = topotest.run_and_expect(test_func, None, count=10, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(rname) assert diff is None, assertmsg @@ -260,27 +274,30 @@ def test_ospf_link_down(): tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") # Simulate a network down event on router3 switch3 interface. - router3 = tgen.gears['r3'] - topotest.interface_set_status(router3, 'r3-eth0', ifaceaction=False, vrf_name='r3-cust1') + router3 = tgen.gears["r3"] + topotest.interface_set_status( + router3, "r3-eth0", ifaceaction=False, vrf_name="r3-cust1" + ) # Expect convergence on all routers for rname, router in tgen.routers().iteritems(): logger.info('Waiting for router "%s" convergence after link failure', rname) # Load expected results from the command - reffile = os.path.join(CWD, '{}/ospfroute_down.txt'.format(rname)) + reffile = os.path.join(CWD, "{}/ospfroute_down.txt".format(rname)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 60 seconds. - test_func = partial(topotest.router_output_cmp, - router, - 'show ip ospf vrf {0}-cust1 route'.format(rname), - expected) - result, diff = topotest.run_and_expect(test_func, '', - count=140, wait=0.5) - assertmsg = 'OSPF did not converge on {}:\n{}'.format(rname, diff) + test_func = partial( + topotest.router_output_cmp, + router, + "show ip ospf vrf {0}-cust1 route".format(rname), + expected, + ) + result, diff = topotest.run_and_expect(test_func, "", count=140, wait=0.5) + assertmsg = "OSPF did not converge on {}:\n{}".format(rname, diff) assert result, assertmsg @@ -289,21 +306,23 @@ def test_ospf_link_down_kernel_route(): tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") rlist = tgen.routers().values() for router in rlist: - logger.info('Checking OSPF IPv4 kernel routes in "%s" after link down', router.name) + logger.info( + 'Checking OSPF IPv4 kernel routes in "%s" after link down', router.name + ) - str='{0}-cust1'.format(router.name) - reffile = os.path.join(CWD, '{}/zebraroutedown.txt'.format(router.name)) + str = "{0}-cust1".format(router.name) + reffile = os.path.join(CWD, "{}/zebraroutedown.txt".format(router.name)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 60 seconds. test_func = partial(compare_show_ip_route_vrf, router.name, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=140, wait=0.5) + result, diff = topotest.run_and_expect(test_func, "", count=140, wait=0.5) assertmsg = 'OSPF IPv4 route mismatch in router "{}" after link down: {}'.format( - router.name, diff) + router.name, diff + ) assert result, assertmsg @@ -311,10 +330,11 @@ def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospf-topo1/test_ospf_topo1.py b/tests/topotests/ospf-topo1/test_ospf_topo1.py index 638e394153..d734f378e7 100755 --- a/tests/topotests/ospf-topo1/test_ospf_topo1.py +++ b/tests/topotests/ospf-topo1/test_ospf_topo1.py @@ -34,7 +34,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -45,70 +45,71 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class OSPFTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 4 routers for routern in range(1, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # Create a empty network for router 1 - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) # Create a empty network for router 2 - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) # Interconect router 1, 2 and 3 - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) # Create empty netowrk for router3 - switch = tgen.add_switch('s4') - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["r3"]) # Interconect router 3 and 4 - switch = tgen.add_switch('s5') - switch.add_link(tgen.gears['r3']) - switch.add_link(tgen.gears['r4']) + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r4"]) # Create a empty network for router 4 - switch = tgen.add_switch('s6') - switch.add_link(tgen.gears['r4']) + switch = tgen.add_switch("s6") + switch.add_link(tgen.gears["r4"]) + def setup_module(mod): "Sets up the pytest environment" tgen = Topogen(OSPFTopo, mod.__name__) tgen.start_topology() - ospf6_config = 'ospf6d.conf' - if tgen.gears['r1'].has_version('<', '4.0'): - ospf6_config = 'ospf6d.conf-pre-v4' + ospf6_config = "ospf6d.conf" + if tgen.gears["r1"].has_version("<", "4.0"): + ospf6_config = "ospf6d.conf-pre-v4" router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_OSPF, - os.path.join(CWD, '{}/ospfd.conf'.format(rname)) + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_OSPF6, - os.path.join(CWD, '{}/{}'.format(rname, ospf6_config)) + TopoRouter.RD_OSPF6, os.path.join(CWD, "{}/{}".format(rname, ospf6_config)) ) # Initialize all routers. tgen.start_router() + def teardown_module(mod): "Teardown the pytest environment" tgen = get_topogen() @@ -121,46 +122,50 @@ def compare_show_ipv6_ospf6(rname, expected): result with the expected output. """ tgen = get_topogen() - current = tgen.gears[rname].vtysh_cmd('show ipv6 ospf6 route') + current = tgen.gears[rname].vtysh_cmd("show ipv6 ospf6 route") # Remove the link addresses - current = re.sub(r'fe80::[^ ]+', 'fe80::xxxx:xxxx:xxxx:xxxx', current) - expected = re.sub(r'fe80::[^ ]+', 'fe80::xxxx:xxxx:xxxx:xxxx', expected) + current = re.sub(r"fe80::[^ ]+", "fe80::xxxx:xxxx:xxxx:xxxx", current) + expected = re.sub(r"fe80::[^ ]+", "fe80::xxxx:xxxx:xxxx:xxxx", expected) # Remove the time - current = re.sub(r'\d+:\d{2}:\d{2}', '', current) - expected = re.sub(r'\d+:\d{2}:\d{2}', '', expected) + current = re.sub(r"\d+:\d{2}:\d{2}", "", current) + expected = re.sub(r"\d+:\d{2}:\d{2}", "", expected) + + return topotest.difflines( + topotest.normalize_text(current), + topotest.normalize_text(expected), + title1="Current output", + title2="Expected output", + ) - return topotest.difflines(topotest.normalize_text(current), - topotest.normalize_text(expected), - title1="Current output", - title2="Expected output") def test_ospf_convergence(): "Test OSPF daemon convergence" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") for router, rnode in tgen.routers().iteritems(): logger.info('Waiting for router "%s" convergence', router) # Load expected results from the command - reffile = os.path.join(CWD, '{}/ospfroute.txt'.format(router)) + reffile = os.path.join(CWD, "{}/ospfroute.txt".format(router)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 80 seconds. test_func = partial( - topotest.router_output_cmp, rnode, 'show ip ospf route', expected) - result, diff = topotest.run_and_expect(test_func, '', - count=160, wait=0.5) - assert result, 'OSPF did not converge on {}:\n{}'.format(router, diff) + topotest.router_output_cmp, rnode, "show ip ospf route", expected + ) + result, diff = topotest.run_and_expect(test_func, "", count=160, wait=0.5) + assert result, "OSPF did not converge on {}:\n{}".format(router, diff) + def test_ospf_kernel_route(): "Test OSPF kernel route installation" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") rlist = tgen.routers().values() for router in rlist: @@ -168,25 +173,26 @@ def test_ospf_kernel_route(): routes = topotest.ip4_route(router) expected = { - '10.0.1.0/24': {}, - '10.0.2.0/24': {}, - '10.0.3.0/24': {}, - '10.0.10.0/24': {}, - '172.16.0.0/24': {}, - '172.16.1.0/24': {}, + "10.0.1.0/24": {}, + "10.0.2.0/24": {}, + "10.0.3.0/24": {}, + "10.0.10.0/24": {}, + "172.16.0.0/24": {}, + "172.16.1.0/24": {}, } assertmsg = 'OSPF IPv4 route mismatch in router "{}"'.format(router.name) assert topotest.json_cmp(routes, expected) is None, assertmsg + def test_ospf6_convergence(): "Test OSPF6 daemon convergence" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") - ospf6route_file = '{}/ospf6route_ecmp.txt' + ospf6route_file = "{}/ospf6route_ecmp.txt" for rnum in range(1, 5): - router = 'r{}'.format(rnum) + router = "r{}".format(rnum) logger.info('Waiting for router "%s" IPv6 OSPF convergence', router) @@ -196,39 +202,37 @@ def test_ospf6_convergence(): # Run test function until we get an result. Wait at most 60 seconds. test_func = partial(compare_show_ipv6_ospf6, router, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=25, wait=3) + result, diff = topotest.run_and_expect(test_func, "", count=25, wait=3) if (not result) and (rnum == 1): # Didn't match the new ECMP version - try the old pre-ECMP format - ospf6route_file = '{}/ospf6route.txt' + ospf6route_file = "{}/ospf6route.txt" # Load expected results from the command reffile = os.path.join(CWD, ospf6route_file.format(router)) expected = open(reffile).read() test_func = partial(compare_show_ipv6_ospf6, router, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=1, wait=3) + result, diff = topotest.run_and_expect(test_func, "", count=1, wait=3) if not result: # Didn't match the old version - switch back to new ECMP version # and fail - ospf6route_file = '{}/ospf6route_ecmp.txt' + ospf6route_file = "{}/ospf6route_ecmp.txt" # Load expected results from the command reffile = os.path.join(CWD, ospf6route_file.format(router)) expected = open(reffile).read() test_func = partial(compare_show_ipv6_ospf6, router, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=1, wait=3) + result, diff = topotest.run_and_expect(test_func, "", count=1, wait=3) + + assert result, "OSPF6 did not converge on {}:\n{}".format(router, diff) - assert result, 'OSPF6 did not converge on {}:\n{}'.format(router, diff) def test_ospf6_kernel_route(): "Test OSPF kernel route installation" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") rlist = tgen.routers().values() for router in rlist: @@ -236,216 +240,231 @@ def test_ospf6_kernel_route(): routes = topotest.ip6_route(router) expected = { - '2001:db8:1::/64': {}, - '2001:db8:2::/64': {}, - '2001:db8:3::/64': {}, - '2001:db8:100::/64': {}, - '2001:db8:200::/64': {}, - '2001:db8:300::/64': {}, + "2001:db8:1::/64": {}, + "2001:db8:2::/64": {}, + "2001:db8:3::/64": {}, + "2001:db8:100::/64": {}, + "2001:db8:200::/64": {}, + "2001:db8:300::/64": {}, } assertmsg = 'OSPF IPv6 route mismatch in router "{}"'.format(router.name) assert topotest.json_cmp(routes, expected) is None, assertmsg + def test_ospf_json(): "Test 'show ip ospf json' output for coherency." tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") for rnum in range(1, 5): - router = tgen.gears['r{}'.format(rnum)] + router = tgen.gears["r{}".format(rnum)] logger.info('Comparing router "%s" "show ip ospf json" output', router.name) expected = { - 'routerId': '10.0.255.{}'.format(rnum), - 'tosRoutesOnly': True, - 'rfc2328Conform': True, - 'spfScheduleDelayMsecs': 0, - 'holdtimeMinMsecs': 50, - 'holdtimeMaxMsecs': 5000, - 'lsaMinIntervalMsecs': 5000, - 'lsaMinArrivalMsecs': 1000, - 'writeMultiplier': 20, - 'refreshTimerMsecs': 10000, - 'asbrRouter': 'injectingExternalRoutingInformation', - 'attachedAreaCounter': 1, - 'areas': {} + "routerId": "10.0.255.{}".format(rnum), + "tosRoutesOnly": True, + "rfc2328Conform": True, + "spfScheduleDelayMsecs": 0, + "holdtimeMinMsecs": 50, + "holdtimeMaxMsecs": 5000, + "lsaMinIntervalMsecs": 5000, + "lsaMinArrivalMsecs": 1000, + "writeMultiplier": 20, + "refreshTimerMsecs": 10000, + "asbrRouter": "injectingExternalRoutingInformation", + "attachedAreaCounter": 1, + "areas": {}, } # Area specific additional checks - if router.name == 'r1' or router.name == 'r2' or router.name == 'r3': - expected['areas']['0.0.0.0'] = { - 'areaIfActiveCounter': 2, - 'areaIfTotalCounter': 2, - 'authentication': 'authenticationNone', - 'backbone': True, - 'lsaAsbrNumber': 1, - 'lsaNetworkNumber': 1, - 'lsaNssaNumber': 0, - 'lsaNumber': 7, - 'lsaOpaqueAreaNumber': 0, - 'lsaOpaqueLinkNumber': 0, - 'lsaRouterNumber': 3, - 'lsaSummaryNumber': 2, - 'nbrFullAdjacentCounter': 2, + if router.name == "r1" or router.name == "r2" or router.name == "r3": + expected["areas"]["0.0.0.0"] = { + "areaIfActiveCounter": 2, + "areaIfTotalCounter": 2, + "authentication": "authenticationNone", + "backbone": True, + "lsaAsbrNumber": 1, + "lsaNetworkNumber": 1, + "lsaNssaNumber": 0, + "lsaNumber": 7, + "lsaOpaqueAreaNumber": 0, + "lsaOpaqueLinkNumber": 0, + "lsaRouterNumber": 3, + "lsaSummaryNumber": 2, + "nbrFullAdjacentCounter": 2, } - if router.name == 'r3' or router.name == 'r4': - expected['areas']['0.0.0.1'] = { - 'areaIfActiveCounter': 1, - 'areaIfTotalCounter': 1, - 'authentication': 'authenticationNone', - 'lsaAsbrNumber': 2, - 'lsaNetworkNumber': 1, - 'lsaNssaNumber': 0, - 'lsaNumber': 9, - 'lsaOpaqueAreaNumber': 0, - 'lsaOpaqueLinkNumber': 0, - 'lsaRouterNumber': 2, - 'lsaSummaryNumber': 4, - 'nbrFullAdjacentCounter': 1, + if router.name == "r3" or router.name == "r4": + expected["areas"]["0.0.0.1"] = { + "areaIfActiveCounter": 1, + "areaIfTotalCounter": 1, + "authentication": "authenticationNone", + "lsaAsbrNumber": 2, + "lsaNetworkNumber": 1, + "lsaNssaNumber": 0, + "lsaNumber": 9, + "lsaOpaqueAreaNumber": 0, + "lsaOpaqueLinkNumber": 0, + "lsaRouterNumber": 2, + "lsaSummaryNumber": 4, + "nbrFullAdjacentCounter": 1, } # r4 has more interfaces for area 0.0.0.1 - if router.name == 'r4': - expected['areas']['0.0.0.1'].update({ - 'areaIfActiveCounter': 2, - 'areaIfTotalCounter': 2, - }) + if router.name == "r4": + expected["areas"]["0.0.0.1"].update( + {"areaIfActiveCounter": 2, "areaIfTotalCounter": 2,} + ) # router 3 has an additional area - if router.name == 'r3': - expected['attachedAreaCounter'] = 2 + if router.name == "r3": + expected["attachedAreaCounter"] = 2 - output = router.vtysh_cmd('show ip ospf json', isjson=True) + output = router.vtysh_cmd("show ip ospf json", isjson=True) result = topotest.json_cmp(output, expected) - assert result is None, '"{}" JSON output mismatches the expected result'.format(router.name) + assert result is None, '"{}" JSON output mismatches the expected result'.format( + router.name + ) + def test_ospf_link_down(): "Test OSPF convergence after a link goes down" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") # Simulate a network down event on router3 switch3 interface. - router3 = tgen.gears['r3'] - router3.peer_link_enable('r3-eth0', False) + router3 = tgen.gears["r3"] + router3.peer_link_enable("r3-eth0", False) # Expect convergence on all routers for router, rnode in tgen.routers().iteritems(): logger.info('Waiting for router "%s" convergence after link failure', router) # Load expected results from the command - reffile = os.path.join(CWD, '{}/ospfroute_down.txt'.format(router)) + reffile = os.path.join(CWD, "{}/ospfroute_down.txt".format(router)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 80 seconds. test_func = partial( - topotest.router_output_cmp, rnode, 'show ip ospf route', expected) - result, diff = topotest.run_and_expect(test_func, '', - count=140, wait=0.5) - assert result, 'OSPF did not converge on {}:\n{}'.format(router, diff) + topotest.router_output_cmp, rnode, "show ip ospf route", expected + ) + result, diff = topotest.run_and_expect(test_func, "", count=140, wait=0.5) + assert result, "OSPF did not converge on {}:\n{}".format(router, diff) + def test_ospf_link_down_kernel_route(): "Test OSPF kernel route installation" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") rlist = tgen.routers().values() for router in rlist: - logger.info('Checking OSPF IPv4 kernel routes in "%s" after link down', router.name) + logger.info( + 'Checking OSPF IPv4 kernel routes in "%s" after link down', router.name + ) routes = topotest.ip4_route(router) expected = { - '10.0.1.0/24': {}, - '10.0.2.0/24': {}, - '10.0.3.0/24': {}, - '10.0.10.0/24': {}, - '172.16.0.0/24': {}, - '172.16.1.0/24': {}, + "10.0.1.0/24": {}, + "10.0.2.0/24": {}, + "10.0.3.0/24": {}, + "10.0.10.0/24": {}, + "172.16.0.0/24": {}, + "172.16.1.0/24": {}, } - if router.name == 'r1' or router.name == 'r2': - expected.update({ - '10.0.10.0/24': None, - '172.16.0.0/24': None, - '172.16.1.0/24': None, - }) - elif router.name == 'r3' or router.name == 'r4': - expected.update({ - '10.0.1.0/24': None, - '10.0.2.0/24': None, - }) + if router.name == "r1" or router.name == "r2": + expected.update( + {"10.0.10.0/24": None, "172.16.0.0/24": None, "172.16.1.0/24": None,} + ) + elif router.name == "r3" or router.name == "r4": + expected.update( + {"10.0.1.0/24": None, "10.0.2.0/24": None,} + ) # Route '10.0.3.0' is no longer available for r4 since it is down. - if router.name == 'r4': - expected.update({ - '10.0.3.0/24': None, - }) - assertmsg = 'OSPF IPv4 route mismatch in router "{}" after link down'.format(router.name) + if router.name == "r4": + expected.update( + {"10.0.3.0/24": None,} + ) + assertmsg = 'OSPF IPv4 route mismatch in router "{}" after link down'.format( + router.name + ) assert topotest.json_cmp(routes, expected) is None, assertmsg + def test_ospf6_link_down(): "Test OSPF6 daemon convergence after link goes down" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") for rnum in range(1, 5): - router = 'r{}'.format(rnum) + router = "r{}".format(rnum) - logger.info('Waiting for router "%s" IPv6 OSPF convergence after link down', router) + logger.info( + 'Waiting for router "%s" IPv6 OSPF convergence after link down', router + ) # Load expected results from the command - reffile = os.path.join(CWD, '{}/ospf6route_down.txt'.format(router)) + reffile = os.path.join(CWD, "{}/ospf6route_down.txt".format(router)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 60 seconds. test_func = partial(compare_show_ipv6_ospf6, router, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=25, wait=3) - assert result, 'OSPF6 did not converge on {}:\n{}'.format(router, diff) + result, diff = topotest.run_and_expect(test_func, "", count=25, wait=3) + assert result, "OSPF6 did not converge on {}:\n{}".format(router, diff) + def test_ospf6_link_down_kernel_route(): "Test OSPF kernel route installation" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") rlist = tgen.routers().values() for router in rlist: - logger.info('Checking OSPF IPv6 kernel routes in "%s" after link down', router.name) + logger.info( + 'Checking OSPF IPv6 kernel routes in "%s" after link down', router.name + ) routes = topotest.ip6_route(router) expected = { - '2001:db8:1::/64': {}, - '2001:db8:2::/64': {}, - '2001:db8:3::/64': {}, - '2001:db8:100::/64': {}, - '2001:db8:200::/64': {}, - '2001:db8:300::/64': {}, + "2001:db8:1::/64": {}, + "2001:db8:2::/64": {}, + "2001:db8:3::/64": {}, + "2001:db8:100::/64": {}, + "2001:db8:200::/64": {}, + "2001:db8:300::/64": {}, } - if router.name == 'r1' or router.name == 'r2': - expected.update({ - '2001:db8:100::/64': None, - '2001:db8:200::/64': None, - '2001:db8:300::/64': None, - }) - elif router.name == 'r3' or router.name == 'r4': - expected.update({ - '2001:db8:1::/64': None, - '2001:db8:2::/64': None, - }) + if router.name == "r1" or router.name == "r2": + expected.update( + { + "2001:db8:100::/64": None, + "2001:db8:200::/64": None, + "2001:db8:300::/64": None, + } + ) + elif router.name == "r3" or router.name == "r4": + expected.update( + {"2001:db8:1::/64": None, "2001:db8:2::/64": None,} + ) # Route '2001:db8:3::/64' is no longer available for r4 since it is down. - if router.name == 'r4': - expected.update({ - '2001:db8:3::/64': None, - }) - assertmsg = 'OSPF IPv6 route mismatch in router "{}" after link down'.format(router.name) + if router.name == "r4": + expected.update( + {"2001:db8:3::/64": None,} + ) + assertmsg = 'OSPF IPv6 route mismatch in router "{}" after link down'.format( + router.name + ) assert topotest.json_cmp(routes, expected) is None, assertmsg + def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospf6-topo1/r1/show_ipv6_route.ref b/tests/topotests/ospf6-topo1/r1/show_ipv6_route.ref index 2db6f620f9..a2ddf7c5ae 100644 --- a/tests/topotests/ospf6-topo1/r1/show_ipv6_route.ref +++ b/tests/topotests/ospf6-topo1/r1/show_ipv6_route.ref @@ -1,9 +1,9 @@ -O fc00:1:1:1::/64 [110/10] is directly connected, r1-stubnet, XX:XX:XX -O>* fc00:2:2:2::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX -O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX -O>* fc00:4:4:4::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX -O fc00:a:a:a::/64 [110/10] is directly connected, r1-sw5, XX:XX:XX -O>* fc00:b:b:b::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX -O>* fc00:2222:2222:2222::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX -O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX -O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX +O fc00:1:1:1::/64 [110/10] is directly connected, r1-stubnet, weight 1, XX:XX:XX +O>* fc00:2:2:2::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX +O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX +O>* fc00:4:4:4::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX +O fc00:a:a:a::/64 [110/10] is directly connected, r1-sw5, weight 1, XX:XX:XX +O>* fc00:b:b:b::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX +O>* fc00:2222:2222:2222::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX +O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX +O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf6-topo1/r2/show_ipv6_route.ref b/tests/topotests/ospf6-topo1/r2/show_ipv6_route.ref index 9060b0739f..1f642b1b22 100644 --- a/tests/topotests/ospf6-topo1/r2/show_ipv6_route.ref +++ b/tests/topotests/ospf6-topo1/r2/show_ipv6_route.ref @@ -1,10 +1,10 @@ -O>* fc00:1:1:1::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX -O fc00:2:2:2::/64 [110/10] is directly connected, r2-stubnet, XX:XX:XX -O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX -O>* fc00:4:4:4::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX -O fc00:a:a:a::/64 [110/10] is directly connected, r2-sw5, XX:XX:XX -O>* fc00:b:b:b::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX -O>* fc00:1111:1111:1111::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX -O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX -O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX +O>* fc00:1:1:1::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX +O fc00:2:2:2::/64 [110/10] is directly connected, r2-stubnet, weight 1, XX:XX:XX +O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX +O>* fc00:4:4:4::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX +O fc00:a:a:a::/64 [110/10] is directly connected, r2-sw5, weight 1, XX:XX:XX +O>* fc00:b:b:b::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX +O>* fc00:1111:1111:1111::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX +O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX +O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf6-topo1/r3/show_ipv6_route.ref b/tests/topotests/ospf6-topo1/r3/show_ipv6_route.ref index 9406f41e94..8e3afa583a 100644 --- a/tests/topotests/ospf6-topo1/r3/show_ipv6_route.ref +++ b/tests/topotests/ospf6-topo1/r3/show_ipv6_route.ref @@ -1,10 +1,10 @@ -O>* fc00:1:1:1::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, XX:XX:XX -O>* fc00:2:2:2::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, XX:XX:XX -O fc00:3:3:3::/64 [110/10] is directly connected, r3-stubnet, XX:XX:XX -O>* fc00:4:4:4::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, XX:XX:XX -O fc00:a:a:a::/64 [110/10] is directly connected, r3-sw5, XX:XX:XX -O fc00:b:b:b::/64 [110/10] is directly connected, r3-sw6, XX:XX:XX -O>* fc00:1111:1111:1111::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, XX:XX:XX -O>* fc00:2222:2222:2222::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, XX:XX:XX -O>* fc00:4444:4444:4444::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, XX:XX:XX +O>* fc00:1:1:1::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX +O>* fc00:2:2:2::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX +O fc00:3:3:3::/64 [110/10] is directly connected, r3-stubnet, weight 1, XX:XX:XX +O>* fc00:4:4:4::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, weight 1, XX:XX:XX +O fc00:a:a:a::/64 [110/10] is directly connected, r3-sw5, weight 1, XX:XX:XX +O fc00:b:b:b::/64 [110/10] is directly connected, r3-sw6, weight 1, XX:XX:XX +O>* fc00:1111:1111:1111::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX +O>* fc00:2222:2222:2222::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX +O>* fc00:4444:4444:4444::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf6-topo1/r4/show_ipv6_route.ref b/tests/topotests/ospf6-topo1/r4/show_ipv6_route.ref index 9bf032b5e7..0df652ffb3 100644 --- a/tests/topotests/ospf6-topo1/r4/show_ipv6_route.ref +++ b/tests/topotests/ospf6-topo1/r4/show_ipv6_route.ref @@ -1,9 +1,9 @@ -O>* fc00:1:1:1::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX -O>* fc00:2:2:2::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX -O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX -O fc00:4:4:4::/64 [110/10] is directly connected, r4-stubnet, XX:XX:XX -O>* fc00:a:a:a::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX -O fc00:b:b:b::/64 [110/10] is directly connected, r4-sw6, XX:XX:XX -O>* fc00:1111:1111:1111::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX -O>* fc00:2222:2222:2222::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX -O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX +O>* fc00:1:1:1::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX +O>* fc00:2:2:2::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX +O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX +O fc00:4:4:4::/64 [110/10] is directly connected, r4-stubnet, weight 1, XX:XX:XX +O>* fc00:a:a:a::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX +O fc00:b:b:b::/64 [110/10] is directly connected, r4-sw6, weight 1, XX:XX:XX +O>* fc00:1111:1111:1111::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX +O>* fc00:2222:2222:2222::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX +O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf6-topo1/test_ospf6_topo1.py b/tests/topotests/ospf6-topo1/test_ospf6_topo1.py index 2f7a4ce4e3..30c09ea606 100755 --- a/tests/topotests/ospf6-topo1/test_ospf6_topo1.py +++ b/tests/topotests/ospf6-topo1/test_ospf6_topo1.py @@ -82,7 +82,7 @@ from mininet.topo import Topo # Save the Current Working Directory to find configuration files later. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -97,6 +97,7 @@ import platform ## ##################################################### + class NetworkTopo(Topo): "OSPFv3 (IPv6) Test Topology 1" @@ -107,7 +108,7 @@ class NetworkTopo(Topo): # Create 4 routers for routern in range(1, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # # Wire up the switches and routers @@ -115,31 +116,31 @@ class NetworkTopo(Topo): # # Create a empty network for router 1 - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1'], nodeif='r1-stubnet') + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"], nodeif="r1-stubnet") # Create a empty network for router 2 - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2'], nodeif='r2-stubnet') + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"], nodeif="r2-stubnet") # Create a empty network for router 3 - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r3'], nodeif='r3-stubnet') + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r3"], nodeif="r3-stubnet") # Create a empty network for router 4 - switch = tgen.add_switch('s4') - switch.add_link(tgen.gears['r4'], nodeif='r4-stubnet') + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["r4"], nodeif="r4-stubnet") # Interconnect routers 1, 2, and 3 - switch = tgen.add_switch('s5') - switch.add_link(tgen.gears['r1'], nodeif='r1-sw5') - switch.add_link(tgen.gears['r2'], nodeif='r2-sw5') - switch.add_link(tgen.gears['r3'], nodeif='r3-sw5') + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["r1"], nodeif="r1-sw5") + switch.add_link(tgen.gears["r2"], nodeif="r2-sw5") + switch.add_link(tgen.gears["r3"], nodeif="r3-sw5") # Interconnect routers 3 and 4 - switch = tgen.add_switch('s6') - switch.add_link(tgen.gears['r3'], nodeif='r3-sw6') - switch.add_link(tgen.gears['r4'], nodeif='r4-sw6') + switch = tgen.add_switch("s6") + switch.add_link(tgen.gears["r3"], nodeif="r3-sw6") + switch.add_link(tgen.gears["r4"], nodeif="r4-sw6") ##################################################### @@ -148,6 +149,7 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(mod): "Sets up the pytest environment" @@ -164,12 +166,10 @@ def setup_module(mod): router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_OSPF6, - os.path.join(CWD, '{}/ospf6d.conf'.format(rname)) + TopoRouter.RD_OSPF6, os.path.join(CWD, "{}/ospf6d.conf".format(rname)) ) # Initialize all routers. @@ -194,14 +194,14 @@ def test_ospf6_converged(): pytest.skip(tgen.errors) # For debugging, uncomment the next line - #tgen.mininet_cli() + # tgen.mininet_cli() # Wait for OSPF6 to converge (All Neighbors in either Full or TwoWay State) logger.info("Waiting for OSPF6 convergence") # Set up for regex - pat1 = re.compile('^[0-9]') - pat2 = re.compile('Full') + pat1 = re.compile("^[0-9]") + pat2 = re.compile("Full") timeout = 60 while timeout > 0: @@ -210,7 +210,7 @@ def test_ospf6_converged(): # Look for any node not yet converged for router, rnode in tgen.routers().iteritems(): - resStr = rnode.vtysh_cmd('show ipv6 ospf neigh') + resStr = rnode.vtysh_cmd("show ipv6 ospf neigh") isConverged = False @@ -225,12 +225,12 @@ def test_ospf6_converged(): break if isConverged == False: - logger.info('Waiting for {}'.format(router)) + logger.info("Waiting for {}".format(router)) sys.stdout.flush() break if isConverged: - logger.info('Done') + logger.info("Done") break else: sleep(5) @@ -238,7 +238,7 @@ def test_ospf6_converged(): if timeout == 0: # Bail out with error if a router fails to converge - ospfStatus = rnode.vtysh_cmd('show ipv6 ospf neigh') + ospfStatus = rnode.vtysh_cmd("show ipv6 ospf neigh") assert False, "OSPFv6 did not converge:\n{}".format(ospfStatus) logger.info("OSPFv3 converged.") @@ -250,6 +250,7 @@ def test_ospf6_converged(): if tgen.routers_have_failure(): assert tgen.errors == "", tgen.errors + def compare_show_ipv6(rname, expected): """ Calls 'show ipv6 route' for router `rname` and compare the obtained @@ -263,21 +264,24 @@ def compare_show_ipv6(rname, expected): # Use just the 'O'spf lines of the output linearr = [] for line in current.splitlines(): - if re.match('^O', line): + if re.match("^O", line): linearr.append(line) - current = '\n'.join(linearr) + current = "\n".join(linearr) + + return topotest.difflines( + topotest.normalize_text(current), + topotest.normalize_text(expected), + title1="Current output", + title2="Expected output", + ) - return topotest.difflines(topotest.normalize_text(current), - topotest.normalize_text(expected), - title1="Current output", - title2="Expected output") def test_ospfv3_routingTable(): tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") # For debugging, uncomment the next line # tgen.mininet_cli() @@ -287,15 +291,13 @@ def test_ospfv3_routingTable(): logger.info('Waiting for router "%s" convergence', router) # Load expected results from the command - reffile = os.path.join(CWD, '{}/show_ipv6_route.ref'.format(router)) + reffile = os.path.join(CWD, "{}/show_ipv6_route.ref".format(router)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 60 seconds. - test_func = partial( - compare_show_ipv6, router, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=120, wait=0.5) - assert result, 'OSPFv3 did not converge on {}:\n{}'.format(router, diff) + test_func = partial(compare_show_ipv6, router, expected) + result, diff = topotest.run_and_expect(test_func, "", count=120, wait=0.5) + assert result, "OSPFv3 did not converge on {}:\n{}".format(router, diff) def test_linux_ipv6_kernel_routingTable(): @@ -303,7 +305,7 @@ def test_linux_ipv6_kernel_routingTable(): tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") # Verify Linux Kernel Routing Table logger.info("Verifying Linux IPv6 Kernel Routing Table") @@ -314,22 +316,22 @@ def test_linux_ipv6_kernel_routingTable(): # each run and we need to translate them linklocals = [] for i in range(1, 5): - linklocals += tgen.net['r{}'.format(i)].get_ipv6_linklocal() + linklocals += tgen.net["r{}".format(i)].get_ipv6_linklocal() # Now compare the routing tables (after substituting link-local addresses) for i in range(1, 5): # Actual output from router - actual = tgen.gears['r{}'.format(i)].run('ip -6 route').rstrip() + actual = tgen.gears["r{}".format(i)].run("ip -6 route").rstrip() if "nhid" in actual: - refTableFile = os.path.join(CWD, 'r{}/ip_6_address.nhg.ref'.format(i)) + refTableFile = os.path.join(CWD, "r{}/ip_6_address.nhg.ref".format(i)) else: - refTableFile = os.path.join(CWD, 'r{}/ip_6_address.ref'.format(i)) + refTableFile = os.path.join(CWD, "r{}/ip_6_address.ref".format(i)) if os.path.isfile(refTableFile): expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines())).splitlines(1) + expected = ("\n".join(expected.splitlines())).splitlines(1) # Mask out Link-Local mac addresses for ll in linklocals: @@ -338,20 +340,21 @@ def test_linux_ipv6_kernel_routingTable(): actual = re.sub(r"[ ]+proto [0-9a-z]+ +", " proto XXXX ", actual) actual = re.sub(r"[ ]+nhid [0-9]+ +", " nhid XXXX ", actual) # Remove ff00::/8 routes (seen on some kernels - not from FRR) - actual = re.sub(r'ff00::/8.*', '', actual) + actual = re.sub(r"ff00::/8.*", "", actual) # Strip empty lines actual = actual.lstrip() actual = actual.rstrip() - actual = re.sub(r' +', ' ', actual) + actual = re.sub(r" +", " ", actual) filtered_lines = [] for line in sorted(actual.splitlines()): - if line.startswith('fe80::/64 ') \ - or line.startswith('unreachable fe80::/64 '): + if line.startswith("fe80::/64 ") or line.startswith( + "unreachable fe80::/64 " + ): continue filtered_lines.append(line) - actual = '\n'.join(filtered_lines).splitlines(1) + actual = "\n".join(filtered_lines).splitlines(1) # Print Actual table # logger.info("Router r%s table" % i) @@ -359,18 +362,27 @@ def test_linux_ipv6_kernel_routingTable(): # logger.info(line.rstrip()) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual OSPFv3 IPv6 routing table", - title2="expected OSPFv3 IPv6 routing table") + title2="expected OSPFv3 IPv6 routing table", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed Linux IPv6 Kernel Routing Table Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed Linux IPv6 Kernel Routing Table Check:\n%s\n" + % (i, diff) + ) failures += 1 else: logger.info("r%s ok" % i) - assert failures == 0, "Linux Kernel IPv6 Routing Table verification failed for router r%s:\n%s" % (i, diff) + assert failures == 0, ( + "Linux Kernel IPv6 Routing Table verification failed for router r%s:\n%s" + % (i, diff) + ) def test_shutdown_check_stderr(): @@ -378,11 +390,13 @@ def test_shutdown_check_stderr(): tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - logger.info("SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n") - pytest.skip('Skipping test for Stderr output') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + logger.info( + "SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n" + ) + pytest.skip("Skipping test for Stderr output") net = tgen.net @@ -390,11 +404,11 @@ def test_shutdown_check_stderr(): logger.info("******************************************") for i in range(1, 5): - net['r%s' % i].stopRouter() - log = net['r%s' % i].getStdErr('ospf6d') + net["r%s" % i].stopRouter() + log = net["r%s" % i].getStdErr("ospf6d") if log: logger.info("\nRouter r%s OSPF6d StdErr Log:\n%s" % (i, log)) - log = net['r%s' % i].getStdErr('zebra') + log = net["r%s" % i].getStdErr("zebra") if log: logger.info("\nRouter r%s Zebra StdErr Log:\n%s" % (i, log)) @@ -402,22 +416,24 @@ def test_shutdown_check_stderr(): def test_shutdown_check_memleak(): "Run the memory leak test and report results." - if os.environ.get('TOPOTESTS_CHECK_MEMLEAK') is None: - logger.info("SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)") - pytest.skip('Skipping test for memory leaks') + if os.environ.get("TOPOTESTS_CHECK_MEMLEAK") is None: + logger.info( + "SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)" + ) + pytest.skip("Skipping test for memory leaks") tgen = get_topogen() net = tgen.net for i in range(1, 5): - net['r%s' % i].stopRouter() - net['r%s' % i].report_memory_leaks( - os.environ.get('TOPOTESTS_CHECK_MEMLEAK'), - os.path.basename(__file__)) + net["r%s" % i].stopRouter() + net["r%s" % i].report_memory_leaks( + os.environ.get("TOPOTESTS_CHECK_MEMLEAK"), os.path.basename(__file__) + ) -if __name__ == '__main__': +if __name__ == "__main__": # To suppress tracebacks, either use the following pytest call or # add "--tb=no" to cli diff --git a/tests/topotests/pim-basic/mcast-rx.py b/tests/topotests/pim-basic/mcast-rx.py index 9e3484e12a..8a3a44ecb1 100755 --- a/tests/topotests/pim-basic/mcast-rx.py +++ b/tests/topotests/pim-basic/mcast-rx.py @@ -36,8 +36,8 @@ import time def ifname_to_ifindex(ifname): output = subprocess.check_output("ip link show %s" % ifname, shell=True) - first_line = output.split('\n')[0] - re_index = re.search('^(\d+):', first_line) + first_line = output.split("\n")[0] + re_index = re.search("^(\d+):", first_line) if re_index: return int(re_index.group(1)) @@ -48,24 +48,28 @@ def ifname_to_ifindex(ifname): # Thou shalt be root if os.geteuid() != 0: - sys.stderr.write('ERROR: You must have root privileges\n') + sys.stderr.write("ERROR: You must have root privileges\n") sys.exit(1) -logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)5s: %(message)s') +logging.basicConfig( + level=logging.DEBUG, format="%(asctime)s %(levelname)5s: %(message)s" +) # Color the errors and warnings in red -logging.addLevelName(logging.ERROR, "\033[91m %s\033[0m" % logging.getLevelName(logging.ERROR)) -logging.addLevelName(logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING)) +logging.addLevelName( + logging.ERROR, "\033[91m %s\033[0m" % logging.getLevelName(logging.ERROR) +) +logging.addLevelName( + logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING) +) log = logging.getLogger(__name__) -parser = argparse.ArgumentParser(description='Multicast RX utility', - version='1.0.0') -parser.add_argument('group', help='Multicast IP') -parser.add_argument('ifname', help='Interface name') -parser.add_argument('--port', help='UDP port', default=1000) -parser.add_argument('--sleep', help='Time to sleep before we stop waiting', - default = 5) +parser = argparse.ArgumentParser(description="Multicast RX utility", version="1.0.0") +parser.add_argument("group", help="Multicast IP") +parser.add_argument("ifname", help="Interface name") +parser.add_argument("--port", help="UDP port", default=1000) +parser.add_argument("--sleep", help="Time to sleep before we stop waiting", default=5) args = parser.parse_args() # Create the datagram socket @@ -77,7 +81,9 @@ newpid = os.fork() if newpid == 0: ifindex = ifname_to_ifindex(args.ifname) - mreq = struct.pack("=4sLL", socket.inet_aton(args.group), socket.INADDR_ANY, ifindex) + mreq = struct.pack( + "=4sLL", socket.inet_aton(args.group), socket.INADDR_ANY, ifindex + ) sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) time.sleep(float(args.sleep)) sock.close() diff --git a/tests/topotests/pim-basic/mcast-tx.py b/tests/topotests/pim-basic/mcast-tx.py index c469e47d4c..ad6fdc1062 100755 --- a/tests/topotests/pim-basic/mcast-tx.py +++ b/tests/topotests/pim-basic/mcast-tx.py @@ -26,20 +26,28 @@ import struct import time -logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)5s: %(message)s') +logging.basicConfig( + level=logging.DEBUG, format="%(asctime)s %(levelname)5s: %(message)s" +) # Color the errors and warnings in red -logging.addLevelName(logging.ERROR, "\033[91m %s\033[0m" % logging.getLevelName(logging.ERROR)) -logging.addLevelName(logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING)) +logging.addLevelName( + logging.ERROR, "\033[91m %s\033[0m" % logging.getLevelName(logging.ERROR) +) +logging.addLevelName( + logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING) +) log = logging.getLogger(__name__) -parser = argparse.ArgumentParser(description='Multicast packet generator', version='1.0.0') -parser.add_argument('group', help='Multicast IP') -parser.add_argument('ifname', help='Interface name') -parser.add_argument('--port', type=int, help='UDP port number', default=1000) -parser.add_argument('--ttl', type=int, help='time-to-live', default=20) -parser.add_argument('--count', type=int, help='Packets to send', default=1) -parser.add_argument('--interval', type=int, help='ms between packets', default=100) +parser = argparse.ArgumentParser( + description="Multicast packet generator", version="1.0.0" +) +parser.add_argument("group", help="Multicast IP") +parser.add_argument("ifname", help="Interface name") +parser.add_argument("--port", type=int, help="UDP port number", default=1000) +parser.add_argument("--ttl", type=int, help="time-to-live", default=20) +parser.add_argument("--count", type=int, help="Packets to send", default=1) +parser.add_argument("--interval", type=int, help="ms between packets", default=100) args = parser.parse_args() # Create the datagram socket @@ -49,22 +57,24 @@ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # https://github.com/sivel/bonding/issues/10 # # Bind our socket to ifname -sock.setsockopt(socket.SOL_SOCKET, - 25, - struct.pack("%ds" % len(args.ifname), args.ifname)) +sock.setsockopt( + socket.SOL_SOCKET, 25, struct.pack("%ds" % len(args.ifname), args.ifname) +) # We need to make sure our sendto() finishes before we close the socket sock.setblocking(1) # Set the time-to-live -sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack('b', args.ttl)) +sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack("b", args.ttl)) ms = args.interval / 1000.0 # Send data to the multicast group for x in xrange(args.count): - log.info('TX multicast UDP packet to %s:%d on %s' % (args.group, args.port, args.ifname)) - sent = sock.sendto('foobar %d' % x, (args.group, args.port)) + log.info( + "TX multicast UDP packet to %s:%d on %s" % (args.group, args.port, args.ifname) + ) + sent = sock.sendto("foobar %d" % x, (args.group, args.port)) if args.count > 1 and ms: time.sleep(ms) diff --git a/tests/topotests/pim-basic/r1/bgpd.conf b/tests/topotests/pim-basic/r1/bgpd.conf index 8acaac96a0..1ca643f758 100644 --- a/tests/topotests/pim-basic/r1/bgpd.conf +++ b/tests/topotests/pim-basic/r1/bgpd.conf @@ -1,3 +1,4 @@ router bgp 65001 + no bgp ebgp-requires-policy neighbor 10.0.30.3 remote-as external redistribute connected diff --git a/tests/topotests/pim-basic/rp/bgpd.conf b/tests/topotests/pim-basic/rp/bgpd.conf index 6b16c067a5..451799288a 100644 --- a/tests/topotests/pim-basic/rp/bgpd.conf +++ b/tests/topotests/pim-basic/rp/bgpd.conf @@ -1,3 +1,4 @@ router bgp 65003 + no bgp ebgp-requires-policy neighbor 10.0.30.1 remote-as external redistribute connected diff --git a/tests/topotests/pim-basic/test_pim.py b/tests/topotests/pim-basic/test_pim.py index 9101d7e035..2abee39176 100644 --- a/tests/topotests/pim-basic/test_pim.py +++ b/tests/topotests/pim-basic/test_pim.py @@ -32,7 +32,7 @@ import json from functools import partial CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -41,15 +41,16 @@ from lib.topolog import logger from mininet.topo import Topo + class PIMTopo(Topo): def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) for routern in range(1, 4): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) - tgen.add_router('rp') + tgen.add_router("rp") # rp ------ r1 -------- r2 # \ @@ -63,21 +64,22 @@ class PIMTopo(Topo): # r1 <- sw1 -> r2 # r1-eth0 <-> r2-eth0 # 10.0.20.0/24 - sw = tgen.add_switch('sw1') - sw.add_link(tgen.gears['r1']) - sw.add_link(tgen.gears['r2']) + sw = tgen.add_switch("sw1") + sw.add_link(tgen.gears["r1"]) + sw.add_link(tgen.gears["r2"]) # r1 <- sw2 -> rp # r1-eth1 <-> rp-eth0 # 10.0.30.0/24 - sw = tgen.add_switch('sw2') - sw.add_link(tgen.gears['r1']) - sw.add_link(tgen.gears['rp']) + sw = tgen.add_switch("sw2") + sw.add_link(tgen.gears["r1"]) + sw.add_link(tgen.gears["rp"]) # 10.0.40.0/24 - sw = tgen.add_switch('sw3') - sw.add_link(tgen.gears['r1']) - sw.add_link(tgen.gears['r3']) + sw = tgen.add_switch("sw3") + sw.add_link(tgen.gears["r1"]) + sw.add_link(tgen.gears["r3"]) + def setup_module(mod): "Sets up the pytest environment" @@ -87,21 +89,18 @@ def setup_module(mod): # For all registered routers, load the zebra configuration file for rname, router in tgen.routers().iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_PIM, - os.path.join(CWD, '{}/pimd.conf'.format(rname)) + TopoRouter.RD_PIM, os.path.join(CWD, "{}/pimd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) - ) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) # After loading the configurations, this function loads configured daemons. tgen.start_router() - #tgen.mininet_cli() + # tgen.mininet_cli() def teardown_module(mod): @@ -111,22 +110,25 @@ def teardown_module(mod): # This function tears down the whole topology. tgen.stop_topology() + def test_pim_rp_setup(): "Ensure basic routing has come up and the rp has an outgoing interface" - #Ensure rp and r1 establish pim neighbor ship and bgp has come up - #Finally ensure that the rp has an outgoing interface on r1 + # Ensure rp and r1 establish pim neighbor ship and bgp has come up + # Finally ensure that the rp has an outgoing interface on r1 tgen = get_topogen() - r1 = tgen.gears['r1'] - json_file = '{}/{}/rp-info.json'.format(CWD, r1.name) + r1 = tgen.gears["r1"] + json_file = "{}/{}/rp-info.json".format(CWD, r1.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show ip pim rp-info json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show ip pim rp-info json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=15, wait=5) assertmsg = '"{}" JSON output mismatches'.format(r1.name) assert result is None, assertmsg - #tgen.mininet_cli() + # tgen.mininet_cli() + def test_pim_send_mcast_stream(): "Establish a Multicast stream from r2 -> r1 and then ensure S,G is created as appropriate" @@ -137,47 +139,58 @@ def test_pim_send_mcast_stream(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - rp = tgen.gears['rp'] - r3 = tgen.gears['r3'] - r2 = tgen.gears['r2'] - r1 = tgen.gears['r1'] + rp = tgen.gears["rp"] + r3 = tgen.gears["r3"] + r2 = tgen.gears["r2"] + r1 = tgen.gears["r1"] # Let's establish a S,G stream from r2 -> r1 CWD = os.path.dirname(os.path.realpath(__file__)) - r2.run("{}/mcast-tx.py --ttl 5 --count 5 --interval 10 229.1.1.1 r2-eth0 > /tmp/bar".format(CWD)) + r2.run( + "{}/mcast-tx.py --ttl 5 --count 5 --interval 10 229.1.1.1 r2-eth0 > /tmp/bar".format( + CWD + ) + ) # And from r3 -> r1 - r3.run("{}/mcast-tx.py --ttl 5 --count 5 --interval 10 229.1.1.1 r3-eth0 > /tmp/bar".format(CWD)) + r3.run( + "{}/mcast-tx.py --ttl 5 --count 5 --interval 10 229.1.1.1 r3-eth0 > /tmp/bar".format( + CWD + ) + ) # Let's see that it shows up and we have established some basic state out = r1.vtysh_cmd("show ip pim upstream json", isjson=True) expected = { - '229.1.1.1': { - '10.0.20.2': { - 'firstHopRouter': 1, - 'joinState': 'NotJoined', - 'regState': 'RegPrune', - 'inboundInterface': 'r1-eth0', + "229.1.1.1": { + "10.0.20.2": { + "firstHopRouter": 1, + "joinState": "NotJoined", + "regState": "RegPrune", + "inboundInterface": "r1-eth0", } } } - assert topotest.json_cmp(out, expected) is None, 'failed to converge pim' - #tgen.mininet_cli() + assert topotest.json_cmp(out, expected) is None, "failed to converge pim" + # tgen.mininet_cli() + def test_pim_rp_sees_stream(): "Ensure that the RP sees the stream and has acted accordingly" tgen = get_topogen() - rp = tgen.gears['rp'] - json_file = '{}/{}/upstream.json'.format(CWD, rp.name) + rp = tgen.gears["rp"] + json_file = "{}/{}/upstream.json".format(CWD, rp.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - rp, 'show ip pim upstream json', expected) - _, result = topotest.run_and_expect(test_func, None, count=20, wait=.5) + test_func = partial( + topotest.router_json_cmp, rp, "show ip pim upstream json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(rp.name) assert result is None, assertmsg + def test_pim_igmp_report(): "Send a igmp report from r2->r1 and ensure that the *,G state is created on r1" logger.info("Send a igmp report from r2-r1 and ensure *,G created") @@ -187,8 +200,8 @@ def test_pim_igmp_report(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - r2 = tgen.gears['r2'] - r1 = tgen.gears['r1'] + r2 = tgen.gears["r2"] + r1 = tgen.gears["r1"] # Let's send a igmp report from r2->r1 CWD = os.path.dirname(os.path.realpath(__file__)) @@ -196,28 +209,28 @@ def test_pim_igmp_report(): out = r1.vtysh_cmd("show ip pim upstream json", isjson=True) expected = { - '229.1.1.2': { - '*': { - 'sourceIgmp': 1, - 'joinState': 'Joined', - 'regState': 'RegNoInfo', - 'sptBit': 0, + "229.1.1.2": { + "*": { + "sourceIgmp": 1, + "joinState": "Joined", + "regState": "RegNoInfo", + "sptBit": 0, } } } - assert topotest.json_cmp(out, expected) is None, 'failed to converge pim' + assert topotest.json_cmp(out, expected) is None, "failed to converge pim" def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/rip-topo1/r1/rip_status.ref b/tests/topotests/rip-topo1/r1/rip_status.ref index d75fbe85bb..31ad46ab2e 100644 --- a/tests/topotests/rip-topo1/r1/rip_status.ref +++ b/tests/topotests/rip-topo1/r1/rip_status.ref @@ -8,8 +8,14 @@ Routing Protocol is "rip" Default version control: send version 2, receive version 2 Interface Send Recv Key-chain r1-eth1 2 2 + r1-eth2 2 2 + r1-eth3 2 2 Routing for Networks: 193.1.1.0/26 + r1-eth2 + r1-eth3 + Passive Interface(s): + r1-eth3 Routing Information Sources: Gateway BadPackets BadRoutes Distance Last Update 193.1.1.2 0 0 120 XX:XX:XX diff --git a/tests/topotests/rip-topo1/r1/ripd.conf b/tests/topotests/rip-topo1/r1/ripd.conf index 935ec312e5..54f1774214 100644 --- a/tests/topotests/rip-topo1/r1/ripd.conf +++ b/tests/topotests/rip-topo1/r1/ripd.conf @@ -4,6 +4,9 @@ router rip timers basic 5 180 5 version 2 network 193.1.1.0/26 + network r1-eth2 + network r1-eth3 + passive-interface r1-eth3 ! line vty ! diff --git a/tests/topotests/rip-topo1/r1/show_ip_rip.ref b/tests/topotests/rip-topo1/r1/show_ip_rip.ref index 561560f230..a0b77c886e 100644 --- a/tests/topotests/rip-topo1/r1/show_ip_rip.ref +++ b/tests/topotests/rip-topo1/r1/show_ip_rip.ref @@ -6,5 +6,7 @@ Sub-codes: Network Next Hop Metric From Tag Time R(n) 192.168.2.0/24 193.1.1.2 3 193.1.1.2 0 XX:XX R(n) 192.168.3.0/24 193.1.1.2 3 193.1.1.2 0 XX:XX +C(i) 192.168.98.0/24 0.0.0.0 1 self 0 +C(i) 192.168.99.0/24 0.0.0.0 1 self 0 C(i) 193.1.1.0/26 0.0.0.0 1 self 0 R(n) 193.1.2.0/24 193.1.1.2 2 193.1.1.2 0 XX:XX diff --git a/tests/topotests/rip-topo1/r1/show_ip_route.ref b/tests/topotests/rip-topo1/r1/show_ip_route.ref index 62d71f0ab6..2ff26045aa 100644 --- a/tests/topotests/rip-topo1/r1/show_ip_route.ref +++ b/tests/topotests/rip-topo1/r1/show_ip_route.ref @@ -1,3 +1,3 @@ -R>* 192.168.2.0/24 [120/3] via 193.1.1.2, r1-eth1 -R>* 192.168.3.0/24 [120/3] via 193.1.1.2, r1-eth1 -R>* 193.1.2.0/24 [120/2] via 193.1.1.2, r1-eth1 +R>* 192.168.2.0/24 [120/3] via 193.1.1.2, r1-eth1, weight 1 +R>* 192.168.3.0/24 [120/3] via 193.1.1.2, r1-eth1, weight 1 +R>* 193.1.2.0/24 [120/2] via 193.1.1.2, r1-eth1, weight 1 diff --git a/tests/topotests/rip-topo1/r1/zebra.conf b/tests/topotests/rip-topo1/r1/zebra.conf index 8537f6dd80..7c8f2c502b 100644 --- a/tests/topotests/rip-topo1/r1/zebra.conf +++ b/tests/topotests/rip-topo1/r1/zebra.conf @@ -5,6 +5,13 @@ hostname r1 interface r1-eth0 ip address 192.168.1.1/24 ! +interface r1-eth2 + ip address 192.168.99.1/24 +! +interface r1-eth3 + ip address 192.168.98.1/24 +! + interface r1-eth1 description to sw2 - RIPv2 interface ip address 193.1.1.1/26 diff --git a/tests/topotests/rip-topo1/r2/rip_status.ref b/tests/topotests/rip-topo1/r2/rip_status.ref index da1abd041a..99841a62b0 100644 --- a/tests/topotests/rip-topo1/r2/rip_status.ref +++ b/tests/topotests/rip-topo1/r2/rip_status.ref @@ -14,5 +14,6 @@ Routing Protocol is "rip" 193.1.2.0/24 Routing Information Sources: Gateway BadPackets BadRoutes Distance Last Update + 193.1.1.1 0 0 120 XX:XX:XX 193.1.2.2 0 0 120 XX:XX:XX Distance: (default is 120) diff --git a/tests/topotests/rip-topo1/r2/show_ip_rip.ref b/tests/topotests/rip-topo1/r2/show_ip_rip.ref index 58ab052160..b61fb45eac 100644 --- a/tests/topotests/rip-topo1/r2/show_ip_rip.ref +++ b/tests/topotests/rip-topo1/r2/show_ip_rip.ref @@ -6,5 +6,7 @@ Sub-codes: Network Next Hop Metric From Tag Time R(n) 192.168.2.0/24 193.1.2.2 2 193.1.2.2 0 XX:XX R(n) 192.168.3.0/24 193.1.2.2 2 193.1.2.2 0 XX:XX +R(n) 192.168.98.0/24 193.1.1.1 2 193.1.1.1 0 XX:XX +R(n) 192.168.99.0/24 193.1.1.1 2 193.1.1.1 0 XX:XX C(i) 193.1.1.0/26 0.0.0.0 1 self 0 C(i) 193.1.2.0/24 0.0.0.0 1 self 0 diff --git a/tests/topotests/rip-topo1/r2/show_ip_route.ref b/tests/topotests/rip-topo1/r2/show_ip_route.ref index 4b34939aa5..80f51a92c7 100644 --- a/tests/topotests/rip-topo1/r2/show_ip_route.ref +++ b/tests/topotests/rip-topo1/r2/show_ip_route.ref @@ -1,2 +1,4 @@ -R>* 192.168.2.0/24 [120/2] via 193.1.2.2, r2-eth1 -R>* 192.168.3.0/24 [120/2] via 193.1.2.2, r2-eth1 +R>* 192.168.2.0/24 [120/2] via 193.1.2.2, r2-eth1, weight 1 +R>* 192.168.3.0/24 [120/2] via 193.1.2.2, r2-eth1, weight 1 +R>* 192.168.98.0/24 [120/2] via 193.1.1.1, r2-eth0, weight 1 +R>* 192.168.99.0/24 [120/2] via 193.1.1.1, r2-eth0, weight 1 diff --git a/tests/topotests/rip-topo1/r3/show_ip_rip.ref b/tests/topotests/rip-topo1/r3/show_ip_rip.ref index cf672712a8..1df299b5e6 100644 --- a/tests/topotests/rip-topo1/r3/show_ip_rip.ref +++ b/tests/topotests/rip-topo1/r3/show_ip_rip.ref @@ -6,5 +6,7 @@ Sub-codes: Network Next Hop Metric From Tag Time S(r) 192.168.2.0/24 192.168.3.10 1 self 0 C(r) 192.168.3.0/24 0.0.0.0 1 self 0 +R(n) 192.168.98.0/24 193.1.2.1 3 193.1.2.1 0 XX:XX +R(n) 192.168.99.0/24 193.1.2.1 3 193.1.2.1 0 XX:XX R(n) 193.1.1.0/26 193.1.2.1 2 193.1.2.1 0 XX:XX C(i) 193.1.2.0/24 0.0.0.0 1 self 0 diff --git a/tests/topotests/rip-topo1/r3/show_ip_route.ref b/tests/topotests/rip-topo1/r3/show_ip_route.ref index 835e1229c8..2b739f0489 100644 --- a/tests/topotests/rip-topo1/r3/show_ip_route.ref +++ b/tests/topotests/rip-topo1/r3/show_ip_route.ref @@ -1 +1,3 @@ -R>* 193.1.1.0/26 [120/2] via 193.1.2.1, r3-eth1 +R>* 192.168.98.0/24 [120/3] via 193.1.2.1, r3-eth1, weight 1 +R>* 192.168.99.0/24 [120/3] via 193.1.2.1, r3-eth1, weight 1 +R>* 193.1.1.0/26 [120/2] via 193.1.2.1, r3-eth1, weight 1 diff --git a/tests/topotests/rip-topo1/test_rip_topo1.py b/tests/topotests/rip-topo1/test_rip_topo1.py index 8f3c25e910..3098812a24 100755 --- a/tests/topotests/rip-topo1/test_rip_topo1.py +++ b/tests/topotests/rip-topo1/test_rip_topo1.py @@ -54,6 +54,7 @@ fatal_error = "" ## ##################################################### + class NetworkTopo(Topo): "RIP Topology 1" @@ -63,33 +64,38 @@ class NetworkTopo(Topo): router = {} # # Setup Main Router - router[1] = topotest.addRouter(self, 'r1') + router[1] = topotest.addRouter(self, "r1") # # Setup RIP Routers for i in range(2, 4): - router[i] = topotest.addRouter(self, 'r%s' % i) + router[i] = topotest.addRouter(self, "r%s" % i) # # Setup Switches switch = {} # # On main router # First switch is for a dummy interface (for local network) - switch[1] = self.addSwitch('sw1', cls=topotest.LegacySwitch) - self.addLink(switch[1], router[1], intfName2='r1-eth0') + switch[1] = self.addSwitch("sw1", cls=topotest.LegacySwitch) + self.addLink(switch[1], router[1], intfName2="r1-eth0") # # Switches for RIP # switch 2 switch is for connection to RIP router - switch[2] = self.addSwitch('sw2', cls=topotest.LegacySwitch) - self.addLink(switch[2], router[1], intfName2='r1-eth1') - self.addLink(switch[2], router[2], intfName2='r2-eth0') + switch[2] = self.addSwitch("sw2", cls=topotest.LegacySwitch) + self.addLink(switch[2], router[1], intfName2="r1-eth1") + self.addLink(switch[2], router[2], intfName2="r2-eth0") # switch 3 is between RIP routers - switch[3] = self.addSwitch('sw3', cls=topotest.LegacySwitch) - self.addLink(switch[3], router[2], intfName2='r2-eth1') - self.addLink(switch[3], router[3], intfName2='r3-eth1') + switch[3] = self.addSwitch("sw3", cls=topotest.LegacySwitch) + self.addLink(switch[3], router[2], intfName2="r2-eth1") + self.addLink(switch[3], router[3], intfName2="r3-eth1") # switch 4 is stub on remote RIP router - switch[4] = self.addSwitch('sw4', cls=topotest.LegacySwitch) - self.addLink(switch[4], router[3], intfName2='r3-eth0') + switch[4] = self.addSwitch("sw4", cls=topotest.LegacySwitch) + self.addLink(switch[4], router[3], intfName2="r3-eth0") + + switch[5] = self.addSwitch("sw5", cls=topotest.LegacySwitch) + self.addLink(switch[5], router[1], intfName2="r1-eth2") + switch[6] = self.addSwitch("sw6", cls=topotest.LegacySwitch) + self.addLink(switch[6], router[1], intfName2="r1-eth3") ##################################################### @@ -98,6 +104,7 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(module): global topo, net @@ -105,7 +112,7 @@ def setup_module(module): print("******************************************\n") print("Cleanup old Mininet runs") - os.system('sudo mn -c > /dev/null 2>&1') + os.system("sudo mn -c > /dev/null 2>&1") thisDir = os.path.dirname(os.path.realpath(__file__)) topo = NetworkTopo() @@ -116,9 +123,9 @@ def setup_module(module): # Starting Routers # for i in range(1, 4): - net['r%s' % i].loadConf('zebra', '%s/r%s/zebra.conf' % (thisDir, i)) - net['r%s' % i].loadConf('ripd', '%s/r%s/ripd.conf' % (thisDir, i)) - net['r%s' % i].startRouter() + net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i)) + net["r%s" % i].loadConf("ripd", "%s/r%s/ripd.conf" % (thisDir, i)) + net["r%s" % i].startRouter() # For debugging after starting Quagga/FRR daemons, uncomment the next line # CLI(net) @@ -139,7 +146,7 @@ def test_router_running(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) print("\n\n** Check if FRR/Quagga is running on each Router node") @@ -147,7 +154,7 @@ def test_router_running(): # Make sure that all daemons are running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -159,7 +166,7 @@ def test_converge_protocols(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -172,7 +179,7 @@ def test_converge_protocols(): # Make sure that all daemons are still running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -184,7 +191,7 @@ def test_rip_status(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -194,30 +201,37 @@ def test_rip_status(): print("******************************************\n") failures = 0 for i in range(1, 4): - refTableFile = '%s/r%s/rip_status.ref' % (thisDir, i) + refTableFile = "%s/r%s/rip_status.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ip rip status" 2> /dev/null').rstrip() - # Drop time in next due + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show ip rip status" 2> /dev/null') + .rstrip() + ) + # Drop time in next due actual = re.sub(r"in [0-9]+ seconds", "in XX seconds", actual) # Drop time in last update actual = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual IP RIP status", - title2="expected IP RIP status") + title2="expected IP RIP status", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed IP RIP status check:\n%s\n' % (i, diff)) + sys.stderr.write("r%s failed IP RIP status check:\n%s\n" % (i, diff)) failures += 1 else: print("r%s ok" % i) @@ -226,7 +240,7 @@ def test_rip_status(): # Make sure that all daemons are still running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -238,7 +252,7 @@ def test_rip_routes(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -248,28 +262,31 @@ def test_rip_routes(): print("******************************************\n") failures = 0 for i in range(1, 4): - refTableFile = '%s/r%s/show_ip_rip.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_ip_rip.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ip rip" 2> /dev/null').rstrip() + actual = net["r%s" % i].cmd('vtysh -c "show ip rip" 2> /dev/null').rstrip() # Drop Time actual = re.sub(r"[0-9][0-9]:[0-5][0-9]", "XX:XX", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual SHOW IP RIP", - title2="expected SHOW IP RIP") + title2="expected SHOW IP RIP", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed SHOW IP RIP check:\n%s\n' % (i, diff)) + sys.stderr.write("r%s failed SHOW IP RIP check:\n%s\n" % (i, diff)) failures += 1 else: print("r%s ok" % i) @@ -278,7 +295,7 @@ def test_rip_routes(): # Make sure that all daemons are still running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -290,7 +307,7 @@ def test_zebra_ipv4_routingTable(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -300,37 +317,49 @@ def test_zebra_ipv4_routingTable(): print("******************************************\n") failures = 0 for i in range(1, 4): - refTableFile = '%s/r%s/show_ip_route.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_ip_route.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ip route" 2> /dev/null | grep "^R"').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show ip route" 2> /dev/null | grep "^R"') + .rstrip() + ) # Drop timers on end of line (older Quagga Versions) actual = re.sub(r", [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", "", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual Zebra IPv4 routing table", - title2="expected Zebra IPv4 routing table") + title2="expected Zebra IPv4 routing table", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed Zebra IPv4 Routing Table Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed Zebra IPv4 Routing Table Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "Zebra IPv4 Routing Table verification failed for router r%s:\n%s" % (i, diff) + assert failures == 0, ( + "Zebra IPv4 Routing Table verification failed for router r%s:\n%s" + % (i, diff) + ) # Make sure that all daemons are still running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -342,30 +371,30 @@ def test_shutdown_check_stderr(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - pytest.skip('Skipping test for Stderr output and memory leaks') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + pytest.skip("Skipping test for Stderr output and memory leaks") thisDir = os.path.dirname(os.path.realpath(__file__)) print("\n\n** Verifing unexpected STDERR output from daemons") print("******************************************\n") - net['r1'].stopRouter() + net["r1"].stopRouter() - log = net['r1'].getStdErr('ripd') + log = net["r1"].getStdErr("ripd") if log: print("\nRIPd StdErr Log:\n" + log) - log = net['r1'].getStdErr('zebra') + log = net["r1"].getStdErr("zebra") if log: print("\nZebra StdErr Log:\n" + log) -if __name__ == '__main__': +if __name__ == "__main__": - setLogLevel('info') + setLogLevel("info") # To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli # retval = pytest.main(["-s", "--tb=no"]) retval = pytest.main(["-s"]) diff --git a/tests/topotests/ripng-topo1/r1/ripng_status.ref b/tests/topotests/ripng-topo1/r1/ripng_status.ref index e6197f179b..b02cc69d0e 100644 --- a/tests/topotests/ripng-topo1/r1/ripng_status.ref +++ b/tests/topotests/ripng-topo1/r1/ripng_status.ref @@ -8,8 +8,12 @@ Routing Protocol is "RIPng" Default version control: send version 1, receive version 1 Interface Send Recv r1-eth1 1 1 + r1-eth2 1 1 + r1-eth3 1 1 Routing for Networks: fc00:5::/64 + r1-eth2 + r1-eth3 Routing Information Sources: Gateway BadPackets BadRoutes Distance Last Update fe80::XXXX:XXXX:XXXX:XXXX diff --git a/tests/topotests/ripng-topo1/r1/ripngd.conf b/tests/topotests/ripng-topo1/r1/ripngd.conf index dd54c43557..07ed7296d9 100644 --- a/tests/topotests/ripng-topo1/r1/ripngd.conf +++ b/tests/topotests/ripng-topo1/r1/ripngd.conf @@ -7,6 +7,9 @@ debug ripng zebra router ripng timers basic 5 180 5 network fc00:5::/64 + network r1-eth2 + network r1-eth3 + passive-interface r1-eth3 ! line vty ! diff --git a/tests/topotests/ripng-topo1/r1/show_ipv6_ripng.ref b/tests/topotests/ripng-topo1/r1/show_ipv6_ripng.ref index 18d026a8fd..30d0f31e18 100644 --- a/tests/topotests/ripng-topo1/r1/show_ipv6_ripng.ref +++ b/tests/topotests/ripng-topo1/r1/show_ipv6_ripng.ref @@ -12,3 +12,7 @@ R(n) fc00:7::/64 fe80::XXXX:XXXX:XXXX:XXXX r1-eth1 3 0 XX:XX R(n) fc00:7:1111::/64 fe80::XXXX:XXXX:XXXX:XXXX r1-eth1 3 0 XX:XX +C(i) fc00:98:0:1::/64 + :: self 1 0 +C(i) fc00:99:0:1::/64 + :: self 1 0 diff --git a/tests/topotests/ripng-topo1/r1/show_ipv6_route.ref b/tests/topotests/ripng-topo1/r1/show_ipv6_route.ref index 7e5fc3f0f5..55fbbc34f3 100644 --- a/tests/topotests/ripng-topo1/r1/show_ipv6_route.ref +++ b/tests/topotests/ripng-topo1/r1/show_ipv6_route.ref @@ -1,3 +1,3 @@ -R>* fc00:6::/62 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1 -R>* fc00:7::/64 [120/3] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1 -R>* fc00:7:1111::/64 [120/3] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1 +R>* fc00:6::/62 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1, weight 1 +R>* fc00:7::/64 [120/3] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1, weight 1 +R>* fc00:7:1111::/64 [120/3] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1, weight 1 diff --git a/tests/topotests/ripng-topo1/r1/zebra.conf b/tests/topotests/ripng-topo1/r1/zebra.conf index 1a10343044..11c1cdc5b9 100644 --- a/tests/topotests/ripng-topo1/r1/zebra.conf +++ b/tests/topotests/ripng-topo1/r1/zebra.conf @@ -10,6 +10,12 @@ interface r1-eth1 ipv6 address fc00:5::1/64 no link-detect ! +interface r1-eth2 + ipv6 address fc00:99:0:1::1/64 +! +interface r1-eth3 + ipv6 address fc00:98:0:1::1/64 +! ip forwarding ipv6 forwarding ! diff --git a/tests/topotests/ripng-topo1/r2/show_ipv6_ripng.ref b/tests/topotests/ripng-topo1/r2/show_ipv6_ripng.ref index 765efd07a2..fe5bcc8b31 100644 --- a/tests/topotests/ripng-topo1/r2/show_ipv6_ripng.ref +++ b/tests/topotests/ripng-topo1/r2/show_ipv6_ripng.ref @@ -12,3 +12,7 @@ R(n) fc00:7::/64 fe80::XXXX:XXXX:XXXX:XXXX r2-eth1 2 0 XX:XX R(n) fc00:7:1111::/64 fe80::XXXX:XXXX:XXXX:XXXX r2-eth1 2 0 XX:XX +R(n) fc00:98:0:1::/64 + fe80::XXXX:XXXX:XXXX:XXXX r2-eth0 2 0 XX:XX +R(n) fc00:99:0:1::/64 + fe80::XXXX:XXXX:XXXX:XXXX r2-eth0 2 0 XX:XX diff --git a/tests/topotests/ripng-topo1/r2/show_ipv6_route.ref b/tests/topotests/ripng-topo1/r2/show_ipv6_route.ref index 688e77e7ed..72e1f926a2 100644 --- a/tests/topotests/ripng-topo1/r2/show_ipv6_route.ref +++ b/tests/topotests/ripng-topo1/r2/show_ipv6_route.ref @@ -1,2 +1,4 @@ -R>* fc00:7::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r2-eth1 -R>* fc00:7:1111::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r2-eth1 +R>* fc00:7::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r2-eth1, weight 1 +R>* fc00:7:1111::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r2-eth1, weight 1 +R>* fc00:98:0:1::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r2-eth0, weight 1 +R>* fc00:99:0:1::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r2-eth0, weight 1 diff --git a/tests/topotests/ripng-topo1/r3/show_ipv6_ripng.ref b/tests/topotests/ripng-topo1/r3/show_ipv6_ripng.ref index 81e76b97a6..909ad663ba 100644 --- a/tests/topotests/ripng-topo1/r3/show_ipv6_ripng.ref +++ b/tests/topotests/ripng-topo1/r3/show_ipv6_ripng.ref @@ -12,3 +12,7 @@ C(r) fc00:7::/64 :: self 1 0 S(r) fc00:7:1111::/64 :: self 1 0 +R(n) fc00:98:0:1::/64 + fe80::XXXX:XXXX:XXXX:XXXX r3-eth1 3 0 XX:XX +R(n) fc00:99:0:1::/64 + fe80::XXXX:XXXX:XXXX:XXXX r3-eth1 3 0 XX:XX diff --git a/tests/topotests/ripng-topo1/r3/show_ipv6_route.ref b/tests/topotests/ripng-topo1/r3/show_ipv6_route.ref index 8e46e39921..25a7440111 100644 --- a/tests/topotests/ripng-topo1/r3/show_ipv6_route.ref +++ b/tests/topotests/ripng-topo1/r3/show_ipv6_route.ref @@ -1 +1,3 @@ -R>* fc00:5::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r3-eth1 +R>* fc00:5::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r3-eth1, weight 1 +R>* fc00:98:0:1::/64 [120/3] via fe80::XXXX:XXXX:XXXX:XXXX, r3-eth1, weight 1 +R>* fc00:99:0:1::/64 [120/3] via fe80::XXXX:XXXX:XXXX:XXXX, r3-eth1, weight 1 diff --git a/tests/topotests/ripng-topo1/test_ripng_topo1.py b/tests/topotests/ripng-topo1/test_ripng_topo1.py index 32b137240c..23e689235c 100755 --- a/tests/topotests/ripng-topo1/test_ripng_topo1.py +++ b/tests/topotests/ripng-topo1/test_ripng_topo1.py @@ -55,6 +55,7 @@ fatal_error = "" ## ##################################################### + class NetworkTopo(Topo): "RIPng Topology 1" @@ -64,33 +65,37 @@ class NetworkTopo(Topo): router = {} # # Setup Main Router - router[1] = topotest.addRouter(self, 'r1') + router[1] = topotest.addRouter(self, "r1") # # Setup RIPng Routers for i in range(2, 4): - router[i] = topotest.addRouter(self, 'r%s' % i) + router[i] = topotest.addRouter(self, "r%s" % i) # Setup Switches switch = {} # # On main router # First switch is for a dummy interface (for local network) - switch[1] = self.addSwitch('sw1', cls=topotest.LegacySwitch) - self.addLink(switch[1], router[1], intfName2='r1-eth0') + switch[1] = self.addSwitch("sw1", cls=topotest.LegacySwitch) + self.addLink(switch[1], router[1], intfName2="r1-eth0") # # Switches for RIPng # switch 2 switch is for connection to RIP router - switch[2] = self.addSwitch('sw2', cls=topotest.LegacySwitch) - self.addLink(switch[2], router[1], intfName2='r1-eth1') - self.addLink(switch[2], router[2], intfName2='r2-eth0') + switch[2] = self.addSwitch("sw2", cls=topotest.LegacySwitch) + self.addLink(switch[2], router[1], intfName2="r1-eth1") + self.addLink(switch[2], router[2], intfName2="r2-eth0") # switch 3 is between RIP routers - switch[3] = self.addSwitch('sw3', cls=topotest.LegacySwitch) - self.addLink(switch[3], router[2], intfName2='r2-eth1') - self.addLink(switch[3], router[3], intfName2='r3-eth1') + switch[3] = self.addSwitch("sw3", cls=topotest.LegacySwitch) + self.addLink(switch[3], router[2], intfName2="r2-eth1") + self.addLink(switch[3], router[3], intfName2="r3-eth1") # switch 4 is stub on remote RIP router - switch[4] = self.addSwitch('sw4', cls=topotest.LegacySwitch) - self.addLink(switch[4], router[3], intfName2='r3-eth0') + switch[4] = self.addSwitch("sw4", cls=topotest.LegacySwitch) + self.addLink(switch[4], router[3], intfName2="r3-eth0") + switch[5] = self.addSwitch("sw5", cls=topotest.LegacySwitch) + self.addLink(switch[5], router[1], intfName2="r1-eth2") + switch[6] = self.addSwitch("sw6", cls=topotest.LegacySwitch) + self.addLink(switch[6], router[1], intfName2="r1-eth3") ##################################################### @@ -99,6 +104,7 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(module): global topo, net @@ -106,7 +112,7 @@ def setup_module(module): print("******************************************\n") print("Cleanup old Mininet runs") - os.system('sudo mn -c > /dev/null 2>&1') + os.system("sudo mn -c > /dev/null 2>&1") thisDir = os.path.dirname(os.path.realpath(__file__)) topo = NetworkTopo() @@ -117,9 +123,9 @@ def setup_module(module): # Starting Routers # for i in range(1, 4): - net['r%s' % i].loadConf('zebra', '%s/r%s/zebra.conf' % (thisDir, i)) - net['r%s' % i].loadConf('ripngd', '%s/r%s/ripngd.conf' % (thisDir, i)) - net['r%s' % i].startRouter() + net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i)) + net["r%s" % i].loadConf("ripngd", "%s/r%s/ripngd.conf" % (thisDir, i)) + net["r%s" % i].startRouter() # For debugging after starting Quagga/FRR daemons, uncomment the next line # CLI(net) @@ -140,7 +146,7 @@ def test_router_running(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) print("\n\n** Check if FRR/Quagga is running on each Router node") @@ -148,7 +154,7 @@ def test_router_running(): # Starting Routers for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -160,7 +166,7 @@ def test_converge_protocols(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -173,11 +179,11 @@ def test_converge_protocols(): # Make sure that all daemons are running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line - #CLI(net) + # CLI(net) def test_ripng_status(): @@ -185,7 +191,7 @@ def test_ripng_status(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -195,41 +201,53 @@ def test_ripng_status(): print("******************************************\n") failures = 0 for i in range(1, 4): - refTableFile = '%s/r%s/ripng_status.ref' % (thisDir, i) + refTableFile = "%s/r%s/ripng_status.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ipv6 ripng status" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show ipv6 ripng status" 2> /dev/null') + .rstrip() + ) # Mask out Link-Local mac address portion. They are random... actual = re.sub(r" fe80::[0-9a-f:]+", " fe80::XXXX:XXXX:XXXX:XXXX", actual) - # Drop time in next due + # Drop time in next due actual = re.sub(r"in [0-9]+ seconds", "in XX seconds", actual) # Drop time in last update actual = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual IPv6 RIPng status", - title2="expected IPv6 RIPng status") + title2="expected IPv6 RIPng status", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed IPv6 RIPng status check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed IPv6 RIPng status check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "IPv6 RIPng status failed for router r%s:\n%s" % (i, diff) + assert failures == 0, "IPv6 RIPng status failed for router r%s:\n%s" % ( + i, + diff, + ) # Make sure that all daemons are running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -241,7 +259,7 @@ def test_ripng_routes(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -251,42 +269,52 @@ def test_ripng_routes(): print("******************************************\n") failures = 0 for i in range(1, 4): - refTableFile = '%s/r%s/show_ipv6_ripng.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_ipv6_ripng.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ipv6 ripng" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i].cmd('vtysh -c "show ipv6 ripng" 2> /dev/null').rstrip() + ) # Drop Time actual = re.sub(r" [0-9][0-9]:[0-5][0-9]", " XX:XX", actual) # Mask out Link-Local mac address portion. They are random... - actual = re.sub(r" fe80::[0-9a-f: ]+", " fe80::XXXX:XXXX:XXXX:XXXX ", actual) + actual = re.sub( + r" fe80::[0-9a-f: ]+", " fe80::XXXX:XXXX:XXXX:XXXX ", actual + ) # Remove trailing spaces on all lines - actual = '\n'.join([line.rstrip() for line in actual.splitlines()]) + actual = "\n".join([line.rstrip() for line in actual.splitlines()]) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual SHOW IPv6 RIPng", - title2="expected SHOW IPv6 RIPng") + title2="expected SHOW IPv6 RIPng", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed SHOW IPv6 RIPng check:\n%s\n' % (i, diff)) + sys.stderr.write("r%s failed SHOW IPv6 RIPng check:\n%s\n" % (i, diff)) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "SHOW IPv6 RIPng failed for router r%s:\n%s" % (i, diff) + assert failures == 0, "SHOW IPv6 RIPng failed for router r%s:\n%s" % ( + i, + diff, + ) # Make sure that all daemons are running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -298,7 +326,7 @@ def test_zebra_ipv6_routingTable(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -308,39 +336,51 @@ def test_zebra_ipv6_routingTable(): print("******************************************\n") failures = 0 for i in range(1, 4): - refTableFile = '%s/r%s/show_ipv6_route.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_ipv6_route.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ipv6 route" 2> /dev/null | grep "^R"').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show ipv6 route" 2> /dev/null | grep "^R"') + .rstrip() + ) # Mask out Link-Local mac address portion. They are random... actual = re.sub(r" fe80::[0-9a-f:]+", " fe80::XXXX:XXXX:XXXX:XXXX", actual) # Drop timers on end of line (older Quagga Versions) actual = re.sub(r", [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", "", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual Zebra IPv6 routing table", - title2="expected Zebra IPv6 routing table") + title2="expected Zebra IPv6 routing table", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed Zebra IPv6 Routing Table Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed Zebra IPv6 Routing Table Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "Zebra IPv6 Routing Table verification failed for router r%s:\n%s" % (i, diff) + assert failures == 0, ( + "Zebra IPv6 Routing Table verification failed for router r%s:\n%s" + % (i, diff) + ) # Make sure that all daemons are running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -352,24 +392,26 @@ def test_shutdown_check_stderr(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - print("SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n") - pytest.skip('Skipping test for Stderr output') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + print( + "SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n" + ) + pytest.skip("Skipping test for Stderr output") thisDir = os.path.dirname(os.path.realpath(__file__)) print("\n\n** Verifying unexpected STDERR output from daemons") print("******************************************\n") - net['r1'].stopRouter() + net["r1"].stopRouter() - log = net['r1'].getStdErr('ripngd') + log = net["r1"].getStdErr("ripngd") if log: print("\nRIPngd StdErr Log:\n" + log) - log = net['r1'].getStdErr('zebra') + log = net["r1"].getStdErr("zebra") if log: print("\nZebra StdErr Log:\n" + log) @@ -379,22 +421,26 @@ def test_shutdown_check_memleak(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) - if os.environ.get('TOPOTESTS_CHECK_MEMLEAK') is None: - print("SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n") - pytest.skip('Skipping test for memory leaks') - + if os.environ.get("TOPOTESTS_CHECK_MEMLEAK") is None: + print( + "SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n" + ) + pytest.skip("Skipping test for memory leaks") + thisDir = os.path.dirname(os.path.realpath(__file__)) - net['r1'].stopRouter() - net['r1'].report_memory_leaks(os.environ.get('TOPOTESTS_CHECK_MEMLEAK'), os.path.basename(__file__)) + net["r1"].stopRouter() + net["r1"].report_memory_leaks( + os.environ.get("TOPOTESTS_CHECK_MEMLEAK"), os.path.basename(__file__) + ) -if __name__ == '__main__': +if __name__ == "__main__": - setLogLevel('info') + setLogLevel("info") # To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli # retval = pytest.main(["-s", "--tb=no"]) retval = pytest.main(["-s"]) diff --git a/tests/topotests/zebra_rib/test_zebra_rib.py b/tests/topotests/zebra_rib/test_zebra_rib.py index d73f613f95..17eb736cab 100755 --- a/tests/topotests/zebra_rib/test_zebra_rib.py +++ b/tests/topotests/zebra_rib/test_zebra_rib.py @@ -34,7 +34,7 @@ import json # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -45,24 +45,27 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class ZebraTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) - tgen.add_router('r1') + tgen.add_router("r1") # Create a empty network for router 1 - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r1']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r1"]) + def setup_module(mod): "Sets up the pytest environment" @@ -72,78 +75,86 @@ def setup_module(mod): router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) # Initialize all routers. tgen.start_router() + def teardown_module(mod): "Teardown the pytest environment" tgen = get_topogen() tgen.stop_topology() + def test_zebra_kernel_admin_distance(): "Test some basic kernel routes added that should be accepted" logger.info("Test some basic kernel routes that should be accepted") tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") - r1 = tgen.gears['r1'] + r1 = tgen.gears["r1"] # Route with 255/8192 metric - r1.run('ip route add 4.5.1.0/24 via 192.168.210.2 dev r1-eth0 metric 4278198272') + r1.run("ip route add 4.5.1.0/24 via 192.168.210.2 dev r1-eth0 metric 4278198272") # Route with 1/1 metric - r1.run('ip route add 4.5.2.0/24 via 192.168.211.2 dev r1-eth1 metric 16777217') + r1.run("ip route add 4.5.2.0/24 via 192.168.211.2 dev r1-eth1 metric 16777217") # Route with 10/1 metric - r1.run('ip route add 4.5.3.0/24 via 192.168.212.2 dev r1-eth2 metric 167772161') + r1.run("ip route add 4.5.3.0/24 via 192.168.212.2 dev r1-eth2 metric 167772161") # Same route with a 160/1 metric - r1.run('ip route add 4.5.3.0/24 via 192.168.213.2 dev r1-eth3 metric 2684354561') + r1.run("ip route add 4.5.3.0/24 via 192.168.213.2 dev r1-eth3 metric 2684354561") - #Currently I believe we have a bug here with the same route and different - #metric. That needs to be properly resolved. Making a note for - #coming back around later and fixing this. - #tgen.mininet_cli() + # Currently I believe we have a bug here with the same route and different + # metric. That needs to be properly resolved. Making a note for + # coming back around later and fixing this. + # tgen.mininet_cli() for i in range(1, 2): - json_file = '{}/r1/v4_route_{}.json'.format(CWD, i) + json_file = "{}/r1/v4_route_{}.json".format(CWD, i) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, - 'show ip route 4.5.{}.0 json'.format(i), - expected) - _, result = topotest.run_and_expect(test_func, None, count=2, wait=.5) + test_func = partial( + topotest.router_json_cmp, + r1, + "show ip route 4.5.{}.0 json".format(i), + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5) assertmsg = '"r1" JSON output mismatches' assert result is None, assertmsg - #tgen.mininet_cli() + # tgen.mininet_cli() + def test_zebra_kernel_override(): "Test that a FRR route with a lower admin distance takes over" logger.info("Test kernel override with a better admin distance") tgen = get_topogen() - if (tgen.routers_have_failure()): + if tgen.routers_have_failure(): ptyest.skip("skipped because of preview test failure") - r1 = tgen.gears['r1'] + r1 = tgen.gears["r1"] r1.vtysh_cmd("conf\nip route 4.5.1.0/24 192.168.216.3") - json_file = '{}/r1/v4_route_1_static_override.json'.format(CWD) + json_file = "{}/r1/v4_route_1_static_override.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show ip route 4.5.1.0 json', expected) - _, result = topotest.run_and_expect(test_func, None, count=2, wait=.5) + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 4.5.1.0 json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5) assert result is None, '"r1" JSON output mismatches' - logger.info("Test that the removal of the static route allows the kernel to take back over") + logger.info( + "Test that the removal of the static route allows the kernel to take back over" + ) r1.vtysh_cmd("conf\nno ip route 4.5.1.0/24 192.168.216.3") - json_file = '{}/r1/v4_route_1.json'.format(CWD) + json_file = "{}/r1/v4_route_1.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show ip route 4.5.1.0 json', expected) - _, result = topotest.run_and_expect(test_func, None, count=2, wait=.5) + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 4.5.1.0 json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5) assert result is None, '"r1" JSON output mismatches' @@ -151,10 +162,11 @@ def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tools/coccinelle/cast_to_larger_sizes.cocci b/tools/coccinelle/cast_to_larger_sizes.cocci new file mode 100644 index 0000000000..d97e1f9c33 --- /dev/null +++ b/tools/coccinelle/cast_to_larger_sizes.cocci @@ -0,0 +1,20 @@ +// spatch -sp_file tools/coccinelle/cast_to_larger_sizes.cocci --recursive-includes ./ + +@r@ +typedef uint8_t; +typedef uint16_t; +typedef uint32_t; +typedef uint64_t; +uint8_t *i8; +position p; +@@ + + \( + (uint64_t *) i8@p\|(uint32_t *) i8@p\|(uint16_t *) i8@p + \) + +@script:python@ +p << r.p; +@@ + +coccilib.report.print_report(p[0],"Bad typecast to larger size") diff --git a/tools/coccinelle/same_type_casting.cocci b/tools/coccinelle/same_type_casting.cocci new file mode 100644 index 0000000000..58fd7569af --- /dev/null +++ b/tools/coccinelle/same_type_casting.cocci @@ -0,0 +1,7 @@ +@@ +type T; +T *ptr; +@@ + +- (T *)ptr ++ ptr diff --git a/tools/gen_northbound_callbacks.c b/tools/gen_northbound_callbacks.c index cbdf01e7b8..7118986854 100644 --- a/tools/gen_northbound_callbacks.c +++ b/tools/gen_northbound_callbacks.c @@ -358,7 +358,7 @@ int main(int argc, char *argv[]) if (argc != 1) usage(EXIT_FAILURE); - yang_init(); + yang_init(false); if (search_path) ly_ctx_set_searchdir(ly_native_ctx, search_path); diff --git a/tools/gen_yang_deviations.c b/tools/gen_yang_deviations.c index f611f1c57e..f908e1fc69 100644 --- a/tools/gen_yang_deviations.c +++ b/tools/gen_yang_deviations.c @@ -65,7 +65,7 @@ int main(int argc, char *argv[]) if (argc != 1) usage(EXIT_FAILURE); - yang_init(); + yang_init(false); /* Load YANG module. */ module = yang_module_load(argv[0]); diff --git a/tools/start-stop-daemon.c b/tools/start-stop-daemon.c index c75306a959..13118a2769 100644 --- a/tools/start-stop-daemon.c +++ b/tools/start-stop-daemon.c @@ -235,7 +235,7 @@ static const char *next_dirname(const char *s) { const char *cur; - cur = (const char *)s; + cur = s; if (*cur != '\0') { for (; *cur != '/'; ++cur) @@ -255,7 +255,7 @@ static void add_namespace(const char *path) const char *nsdirname, *nsname, *cur; struct namespace *namespace; - cur = (const char *)path; + cur = path; nsdirname = nsname = ""; while ((cur = next_dirname(cur))[0] != '\0') { @@ -273,7 +273,7 @@ static void add_namespace(const char *path) badusage("invalid namepspace path"); namespace = xmalloc(sizeof(*namespace)); - namespace->path = (const char *)path; + namespace->path = path; namespace->nstype = nstype; LIST_INSERT_HEAD(&namespace_head, namespace, list); } diff --git a/vrrpd/vrrp_ndisc.c b/vrrpd/vrrp_ndisc.c index dc546b09a2..b989e66f60 100644 --- a/vrrpd/vrrp_ndisc.c +++ b/vrrpd/vrrp_ndisc.c @@ -83,8 +83,7 @@ static int vrrp_ndisc_una_build(struct interface *ifp, struct ipaddr *ip, struct nd_opt_hdr *nd_opt_h = (struct nd_opt_hdr *)((char *)ndh + sizeof(struct nd_neighbor_advert)); - char *nd_opt_lladdr = - (char *)((char *)nd_opt_h + sizeof(struct nd_opt_hdr)); + char *nd_opt_lladdr = ((char *)nd_opt_h + sizeof(struct nd_opt_hdr)); char *lladdr = (char *)ifp->hw_addr; /* diff --git a/vrrpd/vrrp_vty.c b/vrrpd/vrrp_vty.c index 892c8dadd4..b6388cc5ba 100644 --- a/vrrpd/vrrp_vty.c +++ b/vrrpd/vrrp_vty.c @@ -744,15 +744,33 @@ static int vrrp_config_write_interface(struct vty *vty) return write; } -static struct cmd_node interface_node = {INTERFACE_NODE, "%s(config-if)# ", 1}; -static struct cmd_node debug_node = {DEBUG_NODE, "", 1}; -static struct cmd_node vrrp_node = {VRRP_NODE, "", 1}; +static struct cmd_node interface_node = { + .name = "interface", + .node = INTERFACE_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-if)# ", + .config_write = vrrp_config_write_interface, +}; + +static struct cmd_node debug_node = { + .name = "debug", + .node = DEBUG_NODE, + .prompt = "", + .config_write = vrrp_config_write_debug, +}; + +static struct cmd_node vrrp_node = { + .name = "vrrp", + .node = VRRP_NODE, + .prompt = "", + .config_write = vrrp_config_write_global, +}; void vrrp_vty_init(void) { - install_node(&debug_node, vrrp_config_write_debug); - install_node(&interface_node, vrrp_config_write_interface); - install_node(&vrrp_node, vrrp_config_write_global); + install_node(&debug_node); + install_node(&interface_node); + install_node(&vrrp_node); if_cmd_init(); install_element(VIEW_NODE, &vrrp_vrid_show_cmd); diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c index 310acdf37f..2ef0347651 100644 --- a/vtysh/vtysh.c +++ b/vtysh/vtysh.c @@ -1173,141 +1173,326 @@ static char **new_completion(const char *text, int start, int end) /* Vty node structures. */ static struct cmd_node bgp_node = { - BGP_NODE, "%s(config-router)# ", + .name = "bgp", + .node = BGP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", }; static struct cmd_node rip_node = { - RIP_NODE, "%s(config-router)# ", + .name = "rip", + .node = RIP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", }; static struct cmd_node isis_node = { - ISIS_NODE, "%s(config-router)# ", + .name = "isis", + .node = ISIS_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", }; static struct cmd_node openfabric_node = { - OPENFABRIC_NODE, "%s(config-router)# ", + .name = "openfabric", + .node = OPENFABRIC_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", }; static struct cmd_node interface_node = { - INTERFACE_NODE, "%s(config-if)# ", + .name = "interface", + .node = INTERFACE_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-if)# ", }; static struct cmd_node pw_node = { - PW_NODE, "%s(config-pw)# ", + .name = "pw", + .node = PW_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-pw)# ", }; static struct cmd_node vrf_node = { - VRF_NODE, "%s(config-vrf)# ", + .name = "vrf", + .node = VRF_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-vrf)# ", }; static struct cmd_node nh_group_node = { - NH_GROUP_NODE, - "%s(config-nh-group)# ", + .name = "nexthop-group", + .node = NH_GROUP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-nh-group)# ", }; -static struct cmd_node rmap_node = {RMAP_NODE, "%s(config-route-map)# "}; +static struct cmd_node rmap_node = { + .name = "routemap", + .node = RMAP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-route-map)# ", +}; -static struct cmd_node pbr_map_node = {PBRMAP_NODE, "%s(config-pbr-map)# "}; +static struct cmd_node pbr_map_node = { + .name = "pbr-map", + .node = PBRMAP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-pbr-map)# ", +}; -static struct cmd_node zebra_node = {ZEBRA_NODE, "%s(config-router)# "}; +static struct cmd_node zebra_node = { + .name = "zebra", + .node = ZEBRA_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", +}; -static struct cmd_node bgp_vpnv4_node = {BGP_VPNV4_NODE, - "%s(config-router-af)# "}; +static struct cmd_node bgp_vpnv4_node = { + .name = "bgp vpnv4", + .node = BGP_VPNV4_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node bgp_vpnv6_node = {BGP_VPNV6_NODE, - "%s(config-router-af)# "}; +static struct cmd_node bgp_vpnv6_node = { + .name = "bgp vpnv6", + .node = BGP_VPNV6_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node bgp_flowspecv4_node = {BGP_FLOWSPECV4_NODE, - "%s(config-router-af)# "}; +static struct cmd_node bgp_flowspecv4_node = { + .name = "bgp ipv4 flowspec", + .node = BGP_FLOWSPECV4_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node bgp_flowspecv6_node = {BGP_FLOWSPECV6_NODE, - "%s(config-router-af)# "}; +static struct cmd_node bgp_flowspecv6_node = { + .name = "bgp ipv6 flowspec", + .node = BGP_FLOWSPECV6_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node bgp_ipv4_node = {BGP_IPV4_NODE, - "%s(config-router-af)# "}; +static struct cmd_node bgp_ipv4_node = { + .name = "bgp ipv4 unicast", + .node = BGP_IPV4_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node bgp_ipv4m_node = {BGP_IPV4M_NODE, - "%s(config-router-af)# "}; +static struct cmd_node bgp_ipv4m_node = { + .name = "bgp ipv4 multicast", + .node = BGP_IPV4M_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node bgp_ipv4l_node = {BGP_IPV4L_NODE, - "%s(config-router-af)# "}; +static struct cmd_node bgp_ipv4l_node = { + .name = "bgp ipv4 labeled unicast", + .node = BGP_IPV4L_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node bgp_ipv6_node = {BGP_IPV6_NODE, - "%s(config-router-af)# "}; +static struct cmd_node bgp_ipv6_node = { + .name = "bgp ipv6", + .node = BGP_IPV6_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node bgp_ipv6m_node = {BGP_IPV6M_NODE, - "%s(config-router-af)# "}; +static struct cmd_node bgp_ipv6m_node = { + .name = "bgp ipv6 multicast", + .node = BGP_IPV6M_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node bgp_evpn_node = {BGP_EVPN_NODE, - "%s(config-router-af)# "}; +static struct cmd_node bgp_evpn_node = { + .name = "bgp evpn", + .node = BGP_EVPN_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; -static struct cmd_node bgp_evpn_vni_node = {BGP_EVPN_VNI_NODE, - "%s(config-router-af-vni)# "}; +static struct cmd_node bgp_evpn_vni_node = { + .name = "bgp evpn vni", + .node = BGP_EVPN_VNI_NODE, + .parent_node = BGP_EVPN_NODE, + .prompt = "%s(config-router-af-vni)# ", +}; -static struct cmd_node bgp_ipv6l_node = {BGP_IPV6L_NODE, - "%s(config-router-af)# "}; +static struct cmd_node bgp_ipv6l_node = { + .name = "bgp ipv6 labeled unicast", + .node = BGP_IPV6L_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-af)# ", +}; static struct cmd_node bgp_vnc_defaults_node = { - BGP_VNC_DEFAULTS_NODE, "%s(config-router-vnc-defaults)# "}; + .name = "bgp vnc defaults", + .node = BGP_VNC_DEFAULTS_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-vnc-defaults)# ", +}; static struct cmd_node bgp_vnc_nve_group_node = { - BGP_VNC_NVE_GROUP_NODE, "%s(config-router-vnc-nve-group)# "}; + .name = "bgp vnc nve", + .node = BGP_VNC_NVE_GROUP_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-vnc-nve-group)# ", +}; -static struct cmd_node bgp_vrf_policy_node = {BGP_VRF_POLICY_NODE, - "%s(config-router-vrf-policy)# "}; +static struct cmd_node bgp_vrf_policy_node = { + .name = "bgp vrf policy", + .node = BGP_VRF_POLICY_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-vrf-policy)# ", +}; static struct cmd_node bgp_vnc_l2_group_node = { - BGP_VNC_L2_GROUP_NODE, "%s(config-router-vnc-l2-group)# "}; + .name = "bgp vnc l2", + .node = BGP_VNC_L2_GROUP_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-router-vnc-l2-group)# ", +}; -static struct cmd_node bmp_node = {BMP_NODE, "%s(config-bgp-bmp)# "}; +static struct cmd_node bmp_node = { + .name = "bmp", + .node = BMP_NODE, + .parent_node = BGP_NODE, + .prompt = "%s(config-bgp-bmp)# " +}; -static struct cmd_node ospf_node = {OSPF_NODE, "%s(config-router)# "}; +static struct cmd_node ospf_node = { + .name = "ospf", + .node = OSPF_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", +}; -static struct cmd_node eigrp_node = {EIGRP_NODE, "%s(config-router)# "}; +static struct cmd_node eigrp_node = { + .name = "eigrp", + .node = EIGRP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", +}; -static struct cmd_node babel_node = {BABEL_NODE, "%s(config-router)# "}; +static struct cmd_node babel_node = { + .name = "babel", + .node = BABEL_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", +}; -static struct cmd_node ripng_node = {RIPNG_NODE, "%s(config-router)# "}; +static struct cmd_node ripng_node = { + .name = "ripng", + .node = RIPNG_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-router)# ", +}; -static struct cmd_node ospf6_node = {OSPF6_NODE, "%s(config-ospf6)# "}; +static struct cmd_node ospf6_node = { + .name = "ospf6", + .node = OSPF6_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-ospf6)# ", +}; -static struct cmd_node ldp_node = {LDP_NODE, "%s(config-ldp)# "}; +static struct cmd_node ldp_node = { + .name = "ldp", + .node = LDP_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-ldp)# ", +}; -static struct cmd_node ldp_ipv4_node = {LDP_IPV4_NODE, "%s(config-ldp-af)# "}; +static struct cmd_node ldp_ipv4_node = { + .name = "ldp ipv4", + .node = LDP_IPV4_NODE, + .parent_node = LDP_NODE, + .prompt = "%s(config-ldp-af)# ", +}; -static struct cmd_node ldp_ipv6_node = {LDP_IPV6_NODE, "%s(config-ldp-af)# "}; +static struct cmd_node ldp_ipv6_node = { + .name = "ldp ipv6", + .node = LDP_IPV6_NODE, + .parent_node = LDP_NODE, + .prompt = "%s(config-ldp-af)# ", +}; -static struct cmd_node ldp_ipv4_iface_node = {LDP_IPV4_IFACE_NODE, - "%s(config-ldp-af-if)# "}; +static struct cmd_node ldp_ipv4_iface_node = { + .name = "ldp ipv4 interface", + .node = LDP_IPV4_IFACE_NODE, + .parent_node = LDP_IPV4_NODE, + .prompt = "%s(config-ldp-af-if)# ", +}; -static struct cmd_node ldp_ipv6_iface_node = {LDP_IPV6_IFACE_NODE, - "%s(config-ldp-af-if)# "}; +static struct cmd_node ldp_ipv6_iface_node = { + .name = "ldp ipv6 interface", + .node = LDP_IPV6_IFACE_NODE, + .parent_node = LDP_IPV6_NODE, + .prompt = "%s(config-ldp-af-if)# ", +}; -static struct cmd_node ldp_l2vpn_node = {LDP_L2VPN_NODE, "%s(config-l2vpn)# "}; +static struct cmd_node ldp_l2vpn_node = { + .name = "ldp l2vpn", + .node = LDP_L2VPN_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-l2vpn)# ", +}; -static struct cmd_node ldp_pseudowire_node = {LDP_PSEUDOWIRE_NODE, - "%s(config-l2vpn-pw)# "}; +static struct cmd_node ldp_pseudowire_node = { + .name = "ldp", + .node = LDP_PSEUDOWIRE_NODE, + .parent_node = LDP_L2VPN_NODE, + .prompt = "%s(config-l2vpn-pw)# ", +}; -static struct cmd_node keychain_node = {KEYCHAIN_NODE, "%s(config-keychain)# "}; +static struct cmd_node keychain_node = { + .name = "keychain", + .node = KEYCHAIN_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-keychain)# ", +}; -static struct cmd_node keychain_key_node = {KEYCHAIN_KEY_NODE, - "%s(config-keychain-key)# "}; +static struct cmd_node keychain_key_node = { + .name = "keychain key", + .node = KEYCHAIN_KEY_NODE, + .parent_node = KEYCHAIN_NODE, + .prompt = "%s(config-keychain-key)# ", +}; struct cmd_node link_params_node = { - LINK_PARAMS_NODE, "%s(config-link-params)# ", + .name = "link-params", + .node = LINK_PARAMS_NODE, + .parent_node = INTERFACE_NODE, + .prompt = "%s(config-link-params)# ", }; -static struct cmd_node rpki_node = {RPKI_NODE, "%s(config-rpki)# ", 1}; +static struct cmd_node rpki_node = { + .name = "rpki", + .node = RPKI_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-rpki)# ", +}; #if HAVE_BFDD > 0 static struct cmd_node bfd_node = { - BFD_NODE, - "%s(config-bfd)# ", + .name = "bfd", + .node = BFD_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-bfd)# ", }; static struct cmd_node bfd_peer_node = { - BFD_PEER_NODE, - "%s(config-bfd-peer)# ", + .name = "bfd peer", + .node = BFD_PEER_NODE, + .parent_node = BFD_NODE, + .prompt = "%s(config-bfd-peer)# ", }; #endif /* HAVE_BFDD */ @@ -1807,85 +1992,19 @@ DEFUNSH(VTYSH_REALLYALL, vtysh_config_terminal, vtysh_config_terminal_cmd, static int vtysh_exit(struct vty *vty) { - switch (vty->node) { - case VIEW_NODE: - case ENABLE_NODE: + struct cmd_node *cnode = vector_lookup(cmdvec, vty->node); + + if (vty->node == VIEW_NODE || vty->node == ENABLE_NODE) exit(0); - break; - case CONFIG_NODE: - vty->node = ENABLE_NODE; - break; - case INTERFACE_NODE: - case PW_NODE: - case VRF_NODE: - case NH_GROUP_NODE: - case ZEBRA_NODE: - case BGP_NODE: - case RIP_NODE: - case RIPNG_NODE: - case OSPF_NODE: - case OSPF6_NODE: - case EIGRP_NODE: - case BABEL_NODE: - case LDP_NODE: - case LDP_L2VPN_NODE: - case ISIS_NODE: - case OPENFABRIC_NODE: - case RMAP_NODE: - case PBRMAP_NODE: - case VTY_NODE: - case KEYCHAIN_NODE: - case BFD_NODE: - case RPKI_NODE: + if (cnode->node_exit) + cnode->node_exit(vty); + if (cnode->parent_node) + vty->node = cnode->parent_node; + + if (vty->node == CONFIG_NODE) { + /* resync in case one of the daemons is somewhere else */ vtysh_execute("end"); vtysh_execute("configure"); - vty->node = CONFIG_NODE; - break; - case BGP_VPNV4_NODE: - case BGP_VPNV6_NODE: - case BGP_IPV4_NODE: - case BGP_IPV4M_NODE: - case BGP_IPV4L_NODE: - case BGP_IPV6_NODE: - case BGP_IPV6M_NODE: - case BGP_IPV6L_NODE: - case BGP_FLOWSPECV4_NODE: - case BGP_FLOWSPECV6_NODE: - case BGP_VRF_POLICY_NODE: - case BGP_EVPN_NODE: - case BGP_VNC_DEFAULTS_NODE: - case BGP_VNC_NVE_GROUP_NODE: - case BGP_VNC_L2_GROUP_NODE: - case BMP_NODE: - vty->node = BGP_NODE; - break; - case BGP_EVPN_VNI_NODE: - vty->node = BGP_EVPN_NODE; - break; - case LDP_IPV4_NODE: - case LDP_IPV6_NODE: - vty->node = LDP_NODE; - break; - case LDP_IPV4_IFACE_NODE: - vty->node = LDP_IPV4_NODE; - break; - case LDP_IPV6_IFACE_NODE: - vty->node = LDP_IPV6_NODE; - break; - case LDP_PSEUDOWIRE_NODE: - vty->node = LDP_L2VPN_NODE; - break; - case KEYCHAIN_KEY_NODE: - vty->node = KEYCHAIN_NODE; - break; - case LINK_PARAMS_NODE: - vty->node = INTERFACE_NODE; - break; - case BFD_PEER_NODE: - vty->node = BFD_NODE; - break; - default: - break; } return CMD_SUCCESS; } @@ -2710,103 +2829,6 @@ DEFUNSH(VTYSH_ALL, no_vtysh_config_enable_password, return CMD_SUCCESS; } -/* Log filter */ -DEFUN (vtysh_log_filter, - vtysh_log_filter_cmd, - "[no] log-filter WORD ["DAEMONS_LIST"]", - NO_STR - FILTER_LOG_STR - "String to filter by\n" - DAEMONS_STR) -{ - char *filter = NULL; - char *daemon = NULL; - int found = 0; - int idx = 0; - int daemon_idx = 2; - int total_len = 0; - int len = 0; - - char line[ZLOG_FILTER_LENGTH_MAX + 20]; - - found = argv_find(argv, argc, "no", &idx); - if (found == 1) { - len = snprintf(line, sizeof(line), "no log-filter"); - daemon_idx += 1; - } else - len = snprintf(line, sizeof(line), "log-filter"); - - total_len += len; - - idx = 1; - found = argv_find(argv, argc, "WORD", &idx); - if (found != 1) { - vty_out(vty, "%% No filter string given\n"); - return CMD_WARNING; - } - filter = argv[idx]->arg; - - if (strnlen(filter, ZLOG_FILTER_LENGTH_MAX + 1) - > ZLOG_FILTER_LENGTH_MAX) { - vty_out(vty, "%% Filter is too long\n"); - return CMD_WARNING; - } - - len = snprintf(line + total_len, sizeof(line) - total_len, " %s\n", - filter); - - if ((len < 0) || (size_t)(total_len + len) > sizeof(line)) { - vty_out(vty, "%% Error buffering filter to daemons\n"); - return CMD_ERR_INCOMPLETE; - } - - if (argc >= (daemon_idx + 1)) - daemon = argv[daemon_idx]->text; - - if (daemon != NULL) { - vty_out(vty, "Applying log filter change to %s:\n", daemon); - return vtysh_client_execute_name(daemon, line); - } else - return show_per_daemon(line, - "Applying log filter change to %s:\n"); -} - -/* Clear log filters */ -DEFUN (vtysh_log_filter_clear, - vtysh_log_filter_clear_cmd, - "log-filter clear ["DAEMONS_LIST"]", - FILTER_LOG_STR - CLEAR_STR - DAEMONS_STR) -{ - char *daemon = NULL; - int daemon_idx = 2; - - char line[] = "clear log-filter\n"; - - if (argc >= (daemon_idx + 1)) - daemon = argv[daemon_idx]->text; - - if (daemon != NULL) { - vty_out(vty, "Clearing all filters applied to %s:\n", daemon); - return vtysh_client_execute_name(daemon, line); - } else - return show_per_daemon(line, - "Clearing all filters applied to %s:\n"); -} - -/* Show log filter */ -DEFUN (vtysh_show_log_filter, - vtysh_show_log_filter_cmd, - "show log-filter", - SHOW_STR - FILTER_LOG_STR) -{ - char line[] = "do show log-filter\n"; - - return show_per_daemon(line, "Log filters applied to %s:\n"); -} - DEFUN (vtysh_write_terminal, vtysh_write_terminal_cmd, "write terminal ["DAEMONS_LIST"]", @@ -2877,13 +2899,12 @@ static void backup_config_file(const char *fbackup) strlcat(integrate_sav, CONF_BACKUP_EXT, integrate_sav_sz); /* Move current configuration file to backup config file. */ - if (unlink(integrate_sav) != 0) { - vty_out(vty, "Warning: %s unlink failed\n", integrate_sav); - } - if (rename(fbackup, integrate_sav) != 0) { - vty_out(vty, "Error renaming %s to %s\n", fbackup, - integrate_sav); - } + if (unlink(integrate_sav) != 0 && errno != ENOENT) + vty_out(vty, "Unlink failed for %s: %s\n", integrate_sav, + strerror(errno)); + if (rename(fbackup, integrate_sav) != 0 && errno != ENOENT) + vty_out(vty, "Error renaming %s to %s: %s\n", fbackup, + integrate_sav, strerror(errno)); free(integrate_sav); } @@ -3430,7 +3451,7 @@ DEFUN(find, if (regexec(&exp, cli->string, 0, NULL, 0) == 0) vty_out(vty, " (%s) %s\n", - node_names[node->node], cli->string); + node->name, cli->string); } } @@ -3729,54 +3750,54 @@ void vtysh_init_vty(void) cmd_variable_handler_register(vtysh_var_handler); /* Install nodes. */ - install_node(&bgp_node, NULL); - install_node(&rip_node, NULL); - install_node(&interface_node, NULL); - install_node(&pw_node, NULL); - install_node(&link_params_node, NULL); - install_node(&vrf_node, NULL); - install_node(&nh_group_node, NULL); - install_node(&rmap_node, NULL); - install_node(&pbr_map_node, NULL); - install_node(&zebra_node, NULL); - install_node(&bgp_vpnv4_node, NULL); - install_node(&bgp_vpnv6_node, NULL); - install_node(&bgp_flowspecv4_node, NULL); - install_node(&bgp_flowspecv6_node, NULL); - install_node(&bgp_ipv4_node, NULL); - install_node(&bgp_ipv4m_node, NULL); - install_node(&bgp_ipv4l_node, NULL); - install_node(&bgp_ipv6_node, NULL); - install_node(&bgp_ipv6m_node, NULL); - install_node(&bgp_ipv6l_node, NULL); - install_node(&bgp_vrf_policy_node, NULL); - install_node(&bgp_evpn_node, NULL); - install_node(&bgp_evpn_vni_node, NULL); - install_node(&bgp_vnc_defaults_node, NULL); - install_node(&bgp_vnc_nve_group_node, NULL); - install_node(&bgp_vnc_l2_group_node, NULL); - install_node(&ospf_node, NULL); - install_node(&eigrp_node, NULL); - install_node(&babel_node, NULL); - install_node(&ripng_node, NULL); - install_node(&ospf6_node, NULL); - install_node(&ldp_node, NULL); - install_node(&ldp_ipv4_node, NULL); - install_node(&ldp_ipv6_node, NULL); - install_node(&ldp_ipv4_iface_node, NULL); - install_node(&ldp_ipv6_iface_node, NULL); - install_node(&ldp_l2vpn_node, NULL); - install_node(&ldp_pseudowire_node, NULL); - install_node(&keychain_node, NULL); - install_node(&keychain_key_node, NULL); - install_node(&isis_node, NULL); - install_node(&openfabric_node, NULL); - install_node(&vty_node, NULL); - install_node(&rpki_node, NULL); - install_node(&bmp_node, NULL); + install_node(&bgp_node); + install_node(&rip_node); + install_node(&interface_node); + install_node(&pw_node); + install_node(&link_params_node); + install_node(&vrf_node); + install_node(&nh_group_node); + install_node(&rmap_node); + install_node(&pbr_map_node); + install_node(&zebra_node); + install_node(&bgp_vpnv4_node); + install_node(&bgp_vpnv6_node); + install_node(&bgp_flowspecv4_node); + install_node(&bgp_flowspecv6_node); + install_node(&bgp_ipv4_node); + install_node(&bgp_ipv4m_node); + install_node(&bgp_ipv4l_node); + install_node(&bgp_ipv6_node); + install_node(&bgp_ipv6m_node); + install_node(&bgp_ipv6l_node); + install_node(&bgp_vrf_policy_node); + install_node(&bgp_evpn_node); + install_node(&bgp_evpn_vni_node); + install_node(&bgp_vnc_defaults_node); + install_node(&bgp_vnc_nve_group_node); + install_node(&bgp_vnc_l2_group_node); + install_node(&ospf_node); + install_node(&eigrp_node); + install_node(&babel_node); + install_node(&ripng_node); + install_node(&ospf6_node); + install_node(&ldp_node); + install_node(&ldp_ipv4_node); + install_node(&ldp_ipv6_node); + install_node(&ldp_ipv4_iface_node); + install_node(&ldp_ipv6_iface_node); + install_node(&ldp_l2vpn_node); + install_node(&ldp_pseudowire_node); + install_node(&keychain_node); + install_node(&keychain_key_node); + install_node(&isis_node); + install_node(&openfabric_node); + install_node(&vty_node); + install_node(&rpki_node); + install_node(&bmp_node); #if HAVE_BFDD > 0 - install_node(&bfd_node, NULL); - install_node(&bfd_peer_node, NULL); + install_node(&bfd_node); + install_node(&bfd_peer_node); #endif /* HAVE_BFDD */ struct cmd_node *node; @@ -4107,9 +4128,6 @@ void vtysh_init_vty(void) /* Logging */ install_element(VIEW_NODE, &vtysh_show_logging_cmd); - install_element(VIEW_NODE, &vtysh_show_log_filter_cmd); - install_element(CONFIG_NODE, &vtysh_log_filter_cmd); - install_element(CONFIG_NODE, &vtysh_log_filter_clear_cmd); install_element(CONFIG_NODE, &vtysh_log_stdout_cmd); install_element(CONFIG_NODE, &vtysh_log_stdout_level_cmd); install_element(CONFIG_NODE, &no_vtysh_log_stdout_cmd); diff --git a/watchfrr/watchfrr.c b/watchfrr/watchfrr.c index ed9616963d..2db612adca 100644 --- a/watchfrr/watchfrr.c +++ b/watchfrr/watchfrr.c @@ -27,6 +27,8 @@ #include "command.h" #include "libfrr.h" #include "lib_errors.h" +#include "zlog_targets.h" +#include "network.h" #include <getopt.h> #include <sys/un.h> @@ -42,7 +44,7 @@ #endif /* Macros to help randomize timers. */ -#define JITTER(X) ((random() % ((X)+1))-((X)/2)) +#define JITTER(X) ((frr_weak_random() % ((X)+1))-((X)/2)) #define FUZZY(X) ((X)+JITTER((X)/20)) #define DEFAULT_PERIOD 5 @@ -318,9 +320,8 @@ static pid_t run_background(char *shell_cmd) } default: /* Parent process: we will reap the child later. */ - flog_err_sys(EC_LIB_SYSTEM_CALL, - "Forked background command [pid %d]: %s", - (int)child, shell_cmd); + zlog_info("Forked background command [pid %d]: %s", (int)child, + shell_cmd); return child; } } @@ -559,9 +560,9 @@ static int wakeup_init(struct thread *t_wakeup) dmn->t_wakeup = NULL; if (try_connect(dmn) < 0) { - flog_err(EC_WATCHFRR_CONNECTION, - "%s state -> down : initial connection attempt failed", - dmn->name); + zlog_info( + "%s state -> down : initial connection attempt failed", + dmn->name); dmn->state = DAEMON_DOWN; } phase_check(); @@ -1370,11 +1371,10 @@ int main(int argc, char **argv) frr_config_fork(); - zlog_set_level(ZLOG_DEST_MONITOR, ZLOG_DISABLED); if (watchfrr_di.daemon_mode) - zlog_set_level(ZLOG_DEST_SYSLOG, MIN(gs.loglevel, LOG_DEBUG)); + zlog_syslog_set_prio_min(MIN(gs.loglevel, LOG_DEBUG)); else - zlog_set_level(ZLOG_DEST_STDOUT, MIN(gs.loglevel, LOG_DEBUG)); + zlog_aux_init(NULL, MIN(gs.loglevel, LOG_DEBUG)); frr_run(master); diff --git a/watchfrr/watchfrr_vty.c b/watchfrr/watchfrr_vty.c index c06cb89382..eda4f5d516 100644 --- a/watchfrr/watchfrr_vty.c +++ b/watchfrr/watchfrr_vty.c @@ -23,6 +23,7 @@ #include "memory.h" #include "log.h" +#include "log_vty.h" #include "vty.h" #include "command.h" @@ -134,6 +135,19 @@ DEFUN (show_watchfrr, return CMD_SUCCESS; } +/* we don't have the other logging commands since watchfrr only accepts + * log config through command line options + */ +DEFUN_NOSH (show_logging, + show_logging_cmd, + "show logging", + SHOW_STR + "Show current logging configuration\n") +{ + log_show_syslog(vty); + return CMD_SUCCESS; +} + #ifndef VTYSH_EXTRACT_PL #include "watchfrr/watchfrr_vty_clippy.c" #endif @@ -190,4 +204,5 @@ void watchfrr_vty_init(void) install_element(CONFIG_NODE, &show_debugging_watchfrr_cmd); install_element(VIEW_NODE, &show_watchfrr_cmd); + install_element(VIEW_NODE, &show_logging_cmd); } diff --git a/yang/embedmodel.py b/yang/embedmodel.py index 52671f99a8..624a11da9d 100644 --- a/yang/embedmodel.py +++ b/yang/embedmodel.py @@ -3,11 +3,18 @@ # YANG module to C wrapper # written 2018 by David Lamparter, placed in Public Domain. -import sys, string, re +import sys +import os +import string +import re inname = sys.argv[1] outname = sys.argv[2] +outdir = os.path.dirname(os.path.abspath(outname)) +if not os.path.isdir(outdir): + os.makedirs(outdir) + # these are regexes to avoid a compile-time/host dependency on yang-tools # or python-yang. Cross-compiling FRR is already somewhat involved, no need # to make it even harder. diff --git a/yang/frr-bfdd.yang b/yang/frr-bfdd.yang index f9ac2e43b0..b870bfd0c8 100644 --- a/yang/frr-bfdd.yang +++ b/yang/frr-bfdd.yang @@ -16,7 +16,7 @@ module frr-bfdd { prefix frr-route-types; } - organization "Free Range Routing"; + organization "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; diff --git a/yang/frr-eigrpd.yang b/yang/frr-eigrpd.yang index 0c62954570..092b714045 100644 --- a/yang/frr-eigrpd.yang +++ b/yang/frr-eigrpd.yang @@ -16,7 +16,7 @@ module frr-eigrpd { prefix frr-route-types; } - organization "Free Range Routing"; + organization "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; @@ -191,7 +191,7 @@ module frr-eigrpd { leaf-list neighbor { description "Specific EIGRP neighbor"; - type inet:ipv4-prefix; + type inet:ipv4-address; } list redistribute { diff --git a/yang/frr-filter.yang b/yang/frr-filter.yang index e79ede87b7..61ffa51552 100644 --- a/yang/frr-filter.yang +++ b/yang/frr-filter.yang @@ -10,7 +10,7 @@ module frr-filter { prefix yang; } - organization "Free Range Routing"; + organization "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; diff --git a/yang/frr-igmp.yang b/yang/frr-igmp.yang new file mode 100644 index 0000000000..7f750dcf3e --- /dev/null +++ b/yang/frr-igmp.yang @@ -0,0 +1,147 @@ +module frr-igmp { + yang-version "1.1"; + namespace "http://frrouting.org/yang/igmp"; + + prefix frr-igmp; + + import frr-routing { + prefix "frr-rt"; + } + + import ietf-routing-types { + prefix "rt-types"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import frr-interface { + prefix frr-interface; + } + + organization + "Free Range Routing"; + + contact + "FRR Users List: <mailto:frog@lists.frrouting.org> + FRR Development List: <mailto:dev@lists.frrouting.org>"; + + description + "This module defines a model for managing FRR pimd daemon."; + + revision 2019-11-06 { + description + "Initial revision."; + reference + "RFC 2236: IGMP v2. + RFC 3376: IGMP v3."; + } + + grouping interface-config-attributes { + description + "Configuration attributes applied to the interface level."; + + leaf igmp-enable { + type boolean; + default "false"; + description + "Enable IGMP protocol on the interface."; + } + + leaf version { + type uint8 { + range "2..3"; + } + default "3"; + description + "IGMP version."; + } + + leaf query-interval { + type uint16 { + range "1..1800"; + } + units seconds; + default "125"; + description + "The Query Interval is the interval between General Queries + sent by the Querier."; + } + + leaf query-max-response-time { + type uint8 { + range "10..250"; + } + units deciseconds; + default "100"; + description + "Query maximum response time specifies the maximum time + allowed before sending a responding report."; + } + + leaf last-member-query-interval { + type uint8 { + range "1..255"; + } + units deciseconds; + default "10"; + description + "Last Member Query Interval, which may be tuned to modify + the leave latency of the network."; + } + + leaf robustness-variable { + type uint8 { + range "1..7"; + } + default "2"; + description + "Querier's Robustness Variable allows tuning for the + expected packet loss on a network."; + } + } + + grouping per-af-interface-config-attributes { + description + "Configuration attributes applied to the interface level per address family."; + + list static-group { + key "group-addr source-addr"; + description + "A static multicast route, (*,G) or (S,G). + The version of IGMP must be 3 to support (S,G)."; + + leaf group-addr { + type rt-types:ip-multicast-group-address; + description + "Multicast group address."; + } + leaf source-addr { + type inet:ip-address; + description + "Multicast source address."; + } + } + + } // per-af-interface-config-attributes + + /* + * Per-interface configuration data + */ + augment "/frr-interface:lib/frr-interface:interface" { + container igmp { + description + "IGMP interface parameters."; + uses interface-config-attributes; + list address-family { + key "address-family"; + description + "Each list entry for one address family."; + uses frr-rt:address-family; + uses per-af-interface-config-attributes; + + } //address-family + } + } +} diff --git a/yang/frr-interface.yang b/yang/frr-interface.yang index 4f7f3beebd..7ebba935a4 100644 --- a/yang/frr-interface.yang +++ b/yang/frr-interface.yang @@ -3,14 +3,30 @@ module frr-interface { namespace "http://frrouting.org/yang/interface"; prefix frr-interface; + import frr-vrf { + prefix frr-vrf; + } + + import ietf-interfaces { + prefix ietf-if; + } + + import ietf-yang-types { + prefix yang; + } + organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; description "This module defines a model for managing FRR interfaces."; + revision 2020-02-05 { + description + "Added operational data"; + } revision 2019-09-09 { description "Added interface-ref typedef"; @@ -20,12 +36,236 @@ module frr-interface { "Initial revision."; } + identity other { + base ietf-if:interface-type; + description + "Other type"; + } + + identity unknown { + base ietf-if:interface-type; + description + "Unknown type"; + } + + identity ethernet { + base ietf-if:interface-type; + description + "Ethernet type"; + } + + identity exper-ethernet { + base ietf-if:interface-type; + description + "Experimental Ethernet type"; + } + + identity loopback { + base ietf-if:interface-type; + description + "Loopback type"; + } + + identity pimreg { + base ietf-if:interface-type; + description + "PIMSM Registration."; + } + + identity ipip { + base ietf-if:interface-type; + description + "IPIP Tunnel."; + } + + identity ipip6 { + base ietf-if:interface-type; + description + "IPIP6 Tunnel."; + } + + identity ipgre { + base ietf-if:interface-type; + description + "GRE over IP."; + } + + typedef interface-ref { + type leafref { + path "/frr-interface:lib/frr-interface:interface/frr-interface:name"; + require-instance false; + } + description + "Reference to an interface"; + } + + typedef if-flags-type { + type enumeration { + enum "up" { + value 1; + description + "Active and ready to transfer packets."; + } + enum "broadcast" { + value 2; + description + "Broadcast enabled."; + } + enum "debug" { + value 3; + description + "Debug mode."; + } + enum "loopback" { + value 4; + description + "Loopback interface."; + } + enum "point-to-point" { + value 5; + description + "Point-to-Point link."; + } + enum "notrailers" { + value 6; + description + "This flag is unused in Linux, but it exists for BSD compatibility. + Avoid use of trailers"; + } + enum "running" { + value 7; + description + "Up and Running."; + } + enum "noarp" { + value 8; + description + "Can't perform address resolution protocol."; + } + enum "promisc" { + value 9; + description + "Promiscuous mode. Receive all packets."; + } + enum "allmulti" { + value 10; + description + "Receive all multicast packets."; + } + enum "simplex" { + value 11; + description + "Does not Rx or Tx at the sametime."; + } + enum "link0" { + value 12; + description + "Link0."; + } + enum "link1" { + value 13; + description + "Link1."; + } + enum "link2" { + value 14; + description + "Link2."; + } + enum "multicast" { + value 15; + description + "Supports multicast transmission."; + } + enum "notransmit" { + value 16; + description + "Interface is no transmit mode."; + } + enum "nortexch" { + value 17; + description + "No routing info exchange."; + } + enum "virtual" { + value 18; + description + "Virtual interface."; + } + enum "ipv4" { + value 19; + description + "IPv4 enabled."; + } + enum "ipv6" { + value 20; + description + "IPv6 enabled."; + } + } + } + + grouping if-common-operational { + leaf if-index { + type int32 { + range "0..2147483647"; + } + description + "Interface index."; + } + + leaf mtu { + type uint16; + description + "The size of the largest IPV4 packet that the interface + will send and receive."; + } + + leaf mtu6 { + type uint32; + description + "The size of the largest IPV6 packet that the interface + will send and receive."; + } + + leaf speed { + type uint32; + description + "Interface speed."; + } + + leaf metric { + type uint32; + description + "Interface metric."; + } + + leaf flags { + type if-flags-type; + description + "Interface flags."; + } + + leaf type { + type identityref { + base ietf-if:interface-type; + } + description + "The link type of the interface."; + } + + leaf phy-address { + type yang:mac-address; + description + "The interface's MAC address."; + } + } + container lib { list interface { key "name vrf"; description "Interface."; - leaf name { type string { length "1..16"; @@ -33,27 +273,27 @@ module frr-interface { description "Interface name."; } + leaf vrf { type string { - length "1..36"; + length "1..16"; } + /* yang version 0.16 having issue accessing leafref. */ + /* type frr-vrf:vrf-ref;*/ description "VRF this interface is associated with."; } + leaf description { type string; description "Interface description."; } - } - } - typedef interface-ref { - type leafref { - require-instance false; - path "/frr-interface:lib/frr-interface:interface/frr-interface:name"; + container state { + config false; + uses if-common-operational; + } } - description - "Reference to an interface"; } } diff --git a/yang/frr-isisd.yang b/yang/frr-isisd.yang index faab1e55b2..57f81892e0 100644 --- a/yang/frr-isisd.yang +++ b/yang/frr-isisd.yang @@ -20,13 +20,18 @@ module frr-isisd { } organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; description "This module defines a model for managing FRR isisd daemon."; + revision 2020-04-06 { + description + "Group LSP timers in a container so that they can be displayed and + configured together"; + } revision 2019-12-17 { description "Changed default area is-type to level-1-2"; @@ -34,7 +39,7 @@ module frr-isisd { revision 2019-09-09 { description "Changed interface references to use - frr-interface:interface-ref typedef"; + frr-interface:interface-ref typedef"; } revision 2018-07-26 { description @@ -301,8 +306,8 @@ module frr-isisd { } grouping interface-config { - description "Interface configuration grouping"; - + description + "Interface configuration grouping"; leaf area-tag { type string; mandatory true; @@ -333,8 +338,9 @@ module frr-isisd { leaf bfd-monitoring { type boolean; - default false; - description "Monitor IS-IS peers on this circuit."; + default "false"; + description + "Monitor IS-IS peers on this circuit."; } container csnp-interval { @@ -490,8 +496,8 @@ module frr-isisd { leaf network-type { type network-type; - default "broadcast"; must "(. = \"point-to-point\") or (. = \"broadcast\")"; + default "broadcast"; description "Explicitly configured type of IS-IS circuit (broadcast or point-to-point)."; } @@ -570,38 +576,50 @@ module frr-isisd { } grouping adjacency-state { + description + "Adjacency state"; container adjacencies { config false; + description + "This container lists the adjacencies of + the local node."; list adjacency { + description + "List of operational adjacencies."; leaf neighbor-sys-type { type level; description "Level capability of neighboring system"; } + leaf neighbor-sysid { type system-id; description "The system-id of the neighbor"; } + leaf neighbor-extended-circuit-id { type extended-circuit-id; description "Circuit ID of the neighbor"; } + leaf neighbor-snpa { type snpa; description "SNPA of the neighbor"; } + leaf hold-timer { type uint16; - units seconds; + units "seconds"; description "The holding time in seconds for this adjacency. This value is based on received hello PDUs and the elapsed time since receipt."; } + leaf neighbor-priority { type uint8 { range "0 .. 127"; @@ -610,37 +628,36 @@ module frr-isisd { "Priority of the neighboring IS for becoming the DIS."; } + leaf state { type adj-state-type; description "This leaf describes the state of the interface."; } - - description - "List of operational adjacencies."; } - description - "This container lists the adjacencies of - the local node."; } - description - "Adjacency state"; } grouping event-counters { + description + "Grouping for IS-IS interface event counters"; container event-counters { config false; + description + "IS-IS interface event counters."; leaf adjacency-changes { type uint32; description "The number of times an adjacency state change has occurred on this interface."; } + leaf adjacency-number { type uint32; description "The number of adjacencies on this interface."; } + leaf init-fails { type uint32; description @@ -649,12 +666,14 @@ module frr-isisd { as PPP NCP failures. Failures to form an adjacency are counted by adjacency-rejects."; } + leaf adjacency-rejects { type uint32; description "The number of times an adjacency has been rejected on this interface."; } + leaf id-len-mismatch { type uint32; description @@ -662,6 +681,7 @@ module frr-isisd { field length different from that for this system has been received on this interface."; } + leaf max-area-addresses-mismatch { type uint32; description @@ -670,26 +690,26 @@ module frr-isisd { max area address field differing from that of this system."; } + leaf authentication-type-fails { type uint32; description "Number of authentication type mismatches."; } + leaf authentication-fails { type uint32; description "Number of authentication key failures."; } - description "IS-IS interface event counters."; } - description - "Grouping for IS-IS interface event counters"; } grouping interface-state { description "IS-IS interface operational state."; uses adjacency-state; + uses event-counters; } @@ -814,75 +834,75 @@ module frr-isisd { "MTU of an LSP."; } - container refresh-interval { + container timers { description - ""; - leaf level-1 { - type uint16; - units "seconds"; - default "900"; + "LSP-related timers"; + container level-1 { description - "LSP refresh interval for level-1."; - } + "Level-1 LSP-related timers"; + leaf refresh-interval { + type uint16; + units "seconds"; + default "900"; + description + "LSP refresh interval for level-1."; + } - leaf level-2 { - type uint16; - units "seconds"; - default "900"; - description - "LSP refresh interval for level-2."; - } - } + leaf maximum-lifetime { + type uint16 { + range "350..65535"; + } + units "seconds"; + must ". >= ../refresh-interval + 300"; + default "1200"; + description + "Maximum LSP lifetime for level-1."; + } - container maximum-lifetime { - description - "Maximum LSP lifetime."; - leaf level-1 { - type uint16 { - range "350..65535"; + leaf generation-interval { + type uint16 { + range "1..120"; + } + units "seconds"; + must ". < ../refresh-interval"; + default "30"; + description + "Minimum time allowed before level-1 LSP retransmissions."; } - units "seconds"; - must ". >= ../../refresh-interval/level-1 + 300"; - default "1200"; - description - "Maximum LSP lifetime for level-1."; } - leaf level-2 { - type uint16 { - range "350..65535"; - } - units "seconds"; - must ". >= ../../refresh-interval/level-2 + 300"; - default "1200"; + container level-2 { description - "Maximum LSP lifetime for level-2."; - } - } + "Level-2 LSP-related timers"; + leaf refresh-interval { + type uint16; + units "seconds"; + default "900"; + description + "LSP refresh interval for level-2."; + } - container generation-interval { - description - "Minimum LSP regeneration interval."; - leaf level-1 { - type uint16 { - range "1..120"; + leaf maximum-lifetime { + type uint16 { + range "350..65535"; + } + units "seconds"; + must ". >= ../refresh-interval + 300"; + default "1200"; + description + "Maximum LSP lifetime for level-2."; } - units "seconds"; - must ". < ../../refresh-interval/level-1"; - default "30"; - description - "Minimum time allowed before level-1 LSP retransmissions."; - } - leaf level-2 { - type uint16 { - range "1..120"; + leaf generation-interval { + type uint16 { + range "1..120"; + } + units "seconds"; + must ". < ../refresh-interval"; + default "30"; + description + "Minimum time allowed before level-2 LSP retransmissions."; } - units "seconds"; - must ". < ../../refresh-interval/level-2"; - default "30"; - description - "Minimum time allowed before level-2 LSP retransmissions."; } } } @@ -1152,6 +1172,7 @@ module frr-isisd { description "IS-IS interface parameters."; uses interface-config; + uses interface-state; } } diff --git a/yang/frr-module-translator.yang b/yang/frr-module-translator.yang index 3d64ec5399..6713eae76e 100644 --- a/yang/frr-module-translator.yang +++ b/yang/frr-module-translator.yang @@ -4,7 +4,7 @@ module frr-module-translator { prefix frr-module-translator; organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; diff --git a/yang/frr-nexthop.yang b/yang/frr-nexthop.yang index 7d8ce1b8ed..07e15eb774 100644 --- a/yang/frr-nexthop.yang +++ b/yang/frr-nexthop.yang @@ -10,12 +10,17 @@ module frr-nexthop { import ietf-routing-types { prefix rt-types; } + import frr-interface { prefix frr-interface; } + import frr-vrf { + prefix frr-vrf; + } + organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; @@ -27,10 +32,18 @@ module frr-nexthop { "Initial revision."; } - typedef gateway-address { - type inet:ip-address; + typedef optional-ip-address { + type union { + type inet:ip-address; + type string { + pattern ''; + } + } } + /* + * Nexthop types. + */ typedef nexthop-type { type enumeration { enum "ifindex" { @@ -68,6 +81,9 @@ module frr-nexthop { "Nexthop types."; } + /* + * Blockhole nexthop type. + */ typedef blackhole-type { type enumeration { enum "unspec" { @@ -97,113 +113,148 @@ module frr-nexthop { } /* - * Nexthop object + * Common nexthop attributes grouping. */ - - grouping frr-nexthop { + grouping frr-nexthop-attributes { leaf nh-type { type nexthop-type; mandatory true; description "The nexthop type."; } - - leaf gateway { - type gateway-address; - description - "The nexthop gateway address."; - } - leaf vrf { - type string { - length "1..36"; - } + type frr-vrf:vrf-ref; description "The nexthop vrf name, if different from the route."; } - + leaf gateway { + type frr-nexthop:optional-ip-address; + description + "The nexthop gateway address."; + } leaf interface { type frr-interface:interface-ref; description "The nexthop egress interface."; } - leaf bh-type { + when "../nh-type = 'blackhole'"; type blackhole-type; description "A blackhole sub-type, if the nexthop is a blackhole type."; } - - leaf flags { - type uint32; + leaf onlink { + when "../nh-type = 'ip4-ifindex' or + ../nh-type = 'ip6-ifindex'"; + type boolean; + default "false"; description - "The nexthop's raw flags value."; + "Nexthop is directly connected."; } - leaf is-duplicate { - type empty; + uses rt-types:mpls-label-stack { description - "Duplicate nexthop; will be ignored."; + "Nexthop's MPLS label stack."; } + } - leaf is-recursive { + /* + * operational common attributes for nexthop + */ + grouping frr-nexthop-operational { + leaf duplicate { type empty; + config false; description - "Nexthop must be resolved through another gateway."; + "Duplicate nexthop"; } - leaf is-onlink { + leaf recursive { type empty; + config false; description - "Nexthop is directly connected."; + "Nexthop resolved through another gateway."; } - leaf is-active { + leaf active { type empty; + config false; description "Nexthop is active."; } - uses rt-types:mpls-label-stack { + leaf fib { + type empty; + config false; description - "Nexthop's MPLS label stack."; + "Nexthop is installed in fib."; } - leaf mtu { - type uint32; + leaf weight { + type uint8; + config false; description - "The nexthop's specific MTU."; + "Weight to be used by the nexthop for purposes of ECMP"; } } - // End of nexthop /* - * Nexthop-group container + * Single nexthop grouping. */ - - grouping frr-nexthop-group { - description - "A nexthop-group, represented as a list of nexthop objects."; - leaf name { - type string; + grouping frr-nexthop { + container frr-nexthops { description - "The nexthop-group name."; + "FRR nexthop object."; + list nexthop { + key "nh-type gateway interface"; + description + "A list of nexthop objects."; + uses frr-nexthop-attributes; + } } + } + - list entry { - key "id"; + /* + * Container for FRR nexthop group. + */ + grouping frr-nexthop-grouping { + list nexthop-group { + key "name"; description - "A list of nexthop objects."; - leaf id { - type uint32; + "A group of nexthops."; + + leaf name { + type string; description - "Identifies a nexthop within a nexthop group; the entries - are ordered by id value, and the value has no other meaning."; + "The nexthop-group name."; } uses frr-nexthop; } } - // End of frr-nexthop-group + container frr-nexthop-group { + description + "A nexthop-group, represented as a list of nexthop objects."; + uses frr-nexthop-grouping; + } + + typedef nexthop-group-ref { + type leafref { + require-instance false; + path "/frr-nexthop:frr-nexthop-group/frr-nexthop:nexthop-group/frr-nexthop:name"; + } + } + + /* + * Augment weight attributes to nexthop group. + */ + augment "/frr-nexthop-group/nexthop-group/frr-nexthops/nexthop" { + leaf weight { + type uint8; + description + "Weight to be used by the nexthop for purposes of ECMP"; + } + } } diff --git a/yang/frr-ripd.yang b/yang/frr-ripd.yang index 94a9ebf3e1..12c72b39b5 100644 --- a/yang/frr-ripd.yang +++ b/yang/frr-ripd.yang @@ -17,7 +17,7 @@ module frr-ripd { } organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; @@ -118,7 +118,7 @@ module frr-ripd { "Enable RIP on the specified IP network."; } leaf-list interface { - type frr-interface:interface-ref; + type string; description "Enable RIP on the specified interface."; } @@ -179,14 +179,14 @@ module frr-ripd { } leaf-list passive-interface { when "../passive-default = 'false'"; - type frr-interface:interface-ref; + type string; description "A list of interfaces where the sending of RIP packets is disabled."; } leaf-list non-passive-interface { when "../passive-default = 'true'"; - type frr-interface:interface-ref; + type string; description "A list of interfaces where the sending of RIP packets is enabled."; diff --git a/yang/frr-ripngd.yang b/yang/frr-ripngd.yang index 831758af86..c58962f5cd 100644 --- a/yang/frr-ripngd.yang +++ b/yang/frr-ripngd.yang @@ -17,7 +17,7 @@ module frr-ripngd { } organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; @@ -76,7 +76,7 @@ module frr-ripngd { "Enable RIPng on the specified IPv6 network."; } leaf-list interface { - type frr-interface:interface-ref; + type string; description "Enable RIPng on the specified interface."; } diff --git a/yang/frr-route-map.yang b/yang/frr-route-map.yang index 34a7e28a77..106593d9d3 100644 --- a/yang/frr-route-map.yang +++ b/yang/frr-route-map.yang @@ -13,7 +13,7 @@ module frr-route-map { prefix frr-interface; } - organization "Free Range Routing"; + organization "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; @@ -179,17 +179,27 @@ module frr-route-map { description "Match a route tag"; value 10; } - - /* - * Protocol YANG models should augment the parent node to - * contain the routing protocol specific value. The protocol - * must also augment `condition-value` to include its specific - * values or expand the `when` statement on the existing cases. - */ - enum routing-protocol-specific { - description "Match a routing protocol specific type"; + /* zebra specific conditions. */ + enum ipv4-prefix-length { + description "Match IPv4 prefix length"; value 100; } + enum ipv6-prefix-length { + description "Match IPv6 prefix length"; + value 101; + } + enum ipv4-next-hop-prefix-length { + description "Match next-hop prefix length"; + value 102; + } + enum source-protocol { + description "Match source protocol"; + value 103; + } + enum source-instance { + description "Match source protocol instance"; + value 104; + } } } @@ -200,7 +210,7 @@ module frr-route-map { case interface { when "./condition = 'interface'"; leaf interface { - type frr-interface:interface-ref; + type string; } } case access-list-num { @@ -291,15 +301,9 @@ module frr-route-map { description "Set tag"; value 3; } - - /* - * Protocol YANG models should augment the parent node to - * contain the routing protocol specific value. The protocol - * must also augment `action-value` to include its specific - * values or expand the `when` statement on the existing cases. - */ - enum routing-protocol-specific { - description "Set a routing protocol specific action"; + /* zebra specific conditions. */ + enum source { + description "Set source address for route"; value 100; } } diff --git a/yang/frr-route-types.yang b/yang/frr-route-types.yang index f22c5ef890..8fdd10121e 100644 --- a/yang/frr-route-types.yang +++ b/yang/frr-route-types.yang @@ -4,7 +4,7 @@ module frr-route-types { prefix frr-route-types; organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; @@ -106,4 +106,12 @@ module frr-route-types { } } } + + typedef frr-route-types { + description "Route types as enumerated in `lib/route_types.txt`"; + type union { + type frr-route-types-v4; + type frr-route-types-v6; + } + } } diff --git a/yang/frr-routing.yang b/yang/frr-routing.yang new file mode 100644 index 0000000000..615a81c722 --- /dev/null +++ b/yang/frr-routing.yang @@ -0,0 +1,135 @@ +module frr-routing { + yang-version "1.1"; + namespace "http://frrouting.org/yang/routing"; + prefix "rt"; + + import ietf-yang-types { + prefix "yang"; + } + + import frr-vrf { + prefix frr-vrf; + } + + organization + "Free Range Routing"; + + contact + "FRR Users List: <mailto:frog@lists.frrouting.org> + FRR Development List: <mailto:dev@lists.frrouting.org>"; + + description + "This YANG module defines essential components for the management + of a routing subsystem."; + + revision 2019-08-15 { + description + "Initial revision."; + } + + /* Identities */ + + identity address-family { + description + "Base identity from which identities describing address + families are derived."; + } + + identity ipv4 { + base address-family; + description + "This identity represents an IPv4 address family."; + } + + identity ipv6 { + base address-family; + description + "This identity represents an IPv6 address family."; + } + + identity control-plane-protocol { + description + "Base identity from which control-plane protocol identities are + derived."; + } + + identity routing-protocol { + base control-plane-protocol; + description + "Identity from which Layer 3 routing protocol identities are + derived."; + } + + /* Type Definitions */ + + typedef administrative-distance { + type uint8 { + range "1..255"; + } + description + "Admin distance associated with the route."; + } + + /* Groupings */ + + grouping address-family { + description + "This grouping provides a leaf identifying an address + family."; + leaf address-family { + type identityref { + base address-family; + } + mandatory true; + description + "Address family."; + } + } + + grouping router-id { + description + "This grouping provides a router ID."; + leaf router-id { + type yang:dotted-quad; + description + "A 32-bit number in the form of a dotted quad that is used by + some routing protocols identifying a router."; + reference + "RFC 2328: OSPF Version 2"; + } + } + + /* Data nodes */ + + container routing { + description + "Configuration parameters for the routing subsystem."; + container control-plane-protocols { + description + "Support for control-plane protocol instances."; + list control-plane-protocol { + key "type name vrf"; + description + "Each entry contains a control-plane protocol instance."; + leaf type { + type identityref { + base control-plane-protocol; + } + description + "Type of the control-plane protocol"; + } + leaf name { + type string; + description + "An arbitrary name of the control-plane protocol + instance."; + } + leaf vrf { + type frr-vrf:vrf-ref; + description + "vrf for control-plane protocol"; + } + } + } + } +} diff --git a/yang/frr-staticd.yang b/yang/frr-staticd.yang new file mode 100644 index 0000000000..58933c9040 --- /dev/null +++ b/yang/frr-staticd.yang @@ -0,0 +1,86 @@ +module frr-staticd { + yang-version "1.1"; + namespace "http://frrouting.org/yang/staticd"; + + prefix frr-staticd; + + import frr-routing { + prefix "frr-rt"; + } + + import frr-nexthop { + prefix frr-nexthop; + } + + import ietf-inet-types { + prefix inet; + } + + organization + "Free Range Routing"; + + contact + "FRR Users List: <mailto:frog@lists.frrouting.org> + FRR Development List: <mailto:dev@lists.frrouting.org>"; + + description + "This module defines a model for managing FRR staticd information. + This YANG module augments the ietf-routing with additional + nexthop information"; + + revision 2019-12-03 { + description + "Initial revision."; + } + + identity static { + base frr-rt:routing-protocol; + description + "'Static' routing pseudo-protocol."; + } + + augment "/frr-rt:routing/frr-rt:control-plane-protocols/" + + "frr-rt:control-plane-protocol" { + container staticd { + when "../frr-rt:type = 'frr-staticd:static'" { + description + "This container is only valid for the 'static' routing + protocol."; + } + description + "Support for a 'static' pseudo-protocol instance + consists of a list of routes."; + + list prefix-list { + key "destination-prefix"; + description + "List of static IP routes."; + + leaf destination-prefix { + type inet:ip-address; + description + "IP destination prefix."; + } + + leaf distance { + type frr-rt:administrative-distance; + description + "Admin distance associated with this route."; + } + + leaf tag { + type uint32 { + range "1..4294967295"; + } + description + "Route tag"; + } + container frr-staticd-next-hop { + description + "Augment static route configuration 'nexthop-list'."; + uses frr-nexthop:frr-nexthop; + } + } + } + } +} diff --git a/yang/frr-vrf.yang b/yang/frr-vrf.yang new file mode 100644 index 0000000000..4924a86e89 --- /dev/null +++ b/yang/frr-vrf.yang @@ -0,0 +1,60 @@ +module frr-vrf { + yang-version 1.1; + namespace "http://frrouting.org/yang/vrf"; + prefix frr-vrf; + + organization + "Free Range Routing"; + contact + "FRR Users List: <mailto:frog@lists.frrouting.org> + FRR Development List: <mailto:dev@lists.frrouting.org>"; + description + "This module defines a model for managing FRR VRF."; + + revision 2019-12-06 { + description + "Initial revision."; + } + + typedef vrf-ref { + type leafref { + path "/frr-vrf:lib/frr-vrf:vrf/frr-vrf:name"; + require-instance false; + } + description + "Reference to a VRF"; + } + + container lib { + list vrf { + key "name"; + description + "VRF."; + leaf name { + type string { + length "1..36"; + } + description + "VRF name."; + } + + container state { + config false; + leaf id { + type uint32 { + range "0..4294967295"; + } + description + "VRF Id."; + } + + leaf active { + type boolean; + default "false"; + description + "VRF active in kernel."; + } + } + } + } +} diff --git a/yang/frr-vrrpd.yang b/yang/frr-vrrpd.yang index 3d3a4138fa..145387c4b4 100644 --- a/yang/frr-vrrpd.yang +++ b/yang/frr-vrrpd.yang @@ -16,7 +16,7 @@ module frr-vrrpd { } organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; diff --git a/yang/frr-zebra.yang b/yang/frr-zebra.yang index 74922a22f7..26e30b5fa9 100644 --- a/yang/frr-zebra.yang +++ b/yang/frr-zebra.yang @@ -11,6 +11,10 @@ module frr-zebra { prefix inet; } + import frr-route-map { + prefix frr-route-map; + } + import frr-route-types { prefix frr-route-types; } @@ -23,12 +27,20 @@ module frr-zebra { prefix frr-nh; } + import frr-routing { + prefix frr-rt; + } + import frr-interface { prefix frr-interface; } + import frr-vrf { + prefix frr-vrf; + } + organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; @@ -40,6 +52,35 @@ module frr-zebra { "Initial revision."; } + identity afi-safi-type { + description + "Base identity type (AFI,SAFI) tuples for RIB"; + } + + identity ipv4-unicast { + base afi-safi-type; + description + "This identity represents the IPv4 unicast address family."; + } + + identity ipv6-unicast { + base afi-safi-type; + description + "This identity represents the IPv6 unicast address family."; + } + + identity ipv4-multicast { + base afi-safi-type; + description + "This identity represents the IPv4 multicast address family."; + } + + identity ipv6-multicast { + base afi-safi-type; + description + "This identity represents the IPv6 multicast address family."; + } + typedef unix-timestamp { type uint32; units "seconds"; @@ -47,6 +88,65 @@ module frr-zebra { "An absolute time in seconds since the unix epoch."; } + identity zebra-interface-type { + description + "zebra interface type."; + } + + identity zif-other { + base zebra-interface-type; + description + "Zebra interface type other."; + } + + identity zif-bridge { + base zebra-interface-type; + description + "Zebra interface type bridge."; + } + + identity zif-vlan { + base zebra-interface-type; + description + "Zebra interface type vlan."; + } + + identity zif-vxlan { + base zebra-interface-type; + description + "Zebra interface type vxlan."; + } + + identity zif-vrf { + base zebra-interface-type; + description + "Zebra interface type vrf."; + } + + identity zif-veth { + base zebra-interface-type; + description + "Zebra interface type veth."; + } + + identity zif-bond { + base zebra-interface-type; + description + "Zebra interface type bond."; + } + + identity zif-bond-slave { + base zebra-interface-type; + description + "Zebra interface type bond slave."; + } + + identity zif-macvlan { + base zebra-interface-type; + description + "Zebra interface type macvlan."; + } + /* * Multicast RPF mode configurable type */ @@ -128,13 +228,6 @@ module frr-zebra { grouping route-common { description "Common information about a route."; - leaf vrf { - type string { - length "1..36"; - } - description - "The route's vrf name."; - } leaf distance { type uint8; @@ -156,25 +249,25 @@ module frr-zebra { "Route tag value."; } - leaf is-selected { + leaf selected { type empty; description "Route is the selected or preferred route for the prefix."; } - leaf is-installed { + leaf installed { type empty; description "Route is installed in the FIB."; } - leaf is-failed { + leaf failed { type empty; description "Route installation in FIB has failed."; } - leaf is-queued { + leaf queued { type empty; description "Route has a pending FIB operation that has not completed."; @@ -193,17 +286,12 @@ module frr-zebra { } leaf uptime { - type uint32; - units "seconds"; + type yang:date-and-time; description "Uptime for the route."; } - container nexthop-group { - description - "Nexthop information for the route."; - uses frr-nh:frr-nexthop-group; - } + uses frr-nh:frr-nexthop-grouping; } // End of route-common @@ -253,6 +341,7 @@ module frr-zebra { uses route-common; } + // End of ip6-route /* * Information about EVPN VNIs @@ -339,7 +428,7 @@ module frr-zebra { "The gateway MAC-IP is being advertised."; } - leaf mcase-group { + leaf mcast-group { type rt-types:ipv4-multicast-group-address; description "The VNI multicast group for BUM traffic."; @@ -438,6 +527,7 @@ module frr-zebra { description "Debug kernel messages sent."; } + leaf debug-kernel-msg-recv { type boolean; description @@ -511,6 +601,58 @@ module frr-zebra { } } + grouping ribs { + container ribs { + description + "RIBs supported by FRR."; + list rib { + key "afi-safi-name table-id"; + leaf table-id { + type uint32; + description + "Routing Table id (default id - 254)."; + } + + leaf afi-safi-name { + type identityref { + base afi-safi-type; + } + mandatory true; + description + "AFI, SAFI name."; + } + + list route { + key "prefix"; + config false; + leaf prefix { + type inet:ip-prefix; + description + "The route's prefix."; + } + list route-entry { + key "protocol"; + leaf protocol { + type frr-route-types:frr-route-types-v4; + //TODO: Use unified route types done in PR 5183 when it is merged. + //type frr-route-types:frr-route-types; + description + "The protocol owning the route."; + } + + leaf instance { + type uint16; + must "../protocol = \"ospf\""; + description + "Retrieve routes from a specific OSPF instance."; + } + uses route-common; + } + } + } + } + } + // End of zebra container /* * RPCs @@ -528,6 +670,7 @@ module frr-zebra { description "Retrieve IPv4 routes."; } + leaf prefix-v4 { type inet:ipv4-prefix; description @@ -601,7 +744,7 @@ module frr-zebra { type uint32 { range "1..65535"; } - must '../protocol = "ospf"'; + must "../protocol = \"ospf\""; description "Retrieve routes from a specific OSPF instance."; } @@ -1786,39 +1929,32 @@ module frr-zebra { description "Extends interface model with Zebra-related parameters."; container zebra { - list ip4-addr-list { - key "ip4-prefix"; + list ip-addrs { + key "address-family ip-prefix"; description - "IPv4 prefixes for an interface."; - leaf ip4-prefix { - type inet:ipv4-prefix; + "IP prefixes for an interface."; + uses frr-rt:address-family { description - "IPv4 address prefix."; + "Address family of the RIB."; } - leaf ip4-peer { - type inet:ipv4-prefix; + + leaf ip-prefix { + type inet:ip-prefix; description - "Peer prefix, for peer-to-peer interfaces."; + "IP address prefix."; } + leaf label { type string; description "Optional string label for the address."; } - } - list ip6-addr-list { - key "ip6-prefix"; - description - "IPv6 prefixes for an interface."; - leaf ip6-prefix { - type inet:ipv6-prefix; - description - "IPv6 address prefix."; - } - leaf label { - type string; + + leaf ip4-peer { + when "derived-from-or-self(../address-family, 'ipv4')"; + type inet:ipv4-prefix; description - "Optional string label for the address."; + "Peer prefix, for peer-to-peer interfaces."; } } @@ -1827,16 +1963,19 @@ module frr-zebra { description "Multicast flag for the interface."; } + leaf link-detect { type boolean; description "Link-detection for the interface."; } + leaf shutdown { type boolean; description "Interface admin status."; } + leaf bandwidth { type uint32 { range "1..100000"; @@ -1845,9 +1984,74 @@ module frr-zebra { "Link bandwidth informational parameter, in megabits."; } // TODO -- link-params for (experimental/partial TE use in IGP extensions) + container state { + config false; + description + "Operational data."; + leaf up-count { + type uint16; + description + "Interface Up count."; + } + + leaf down-count { + type uint16; + description + "Interface Down count."; + } + + leaf zif-type { + type identityref { + base zebra-interface-type; + } + description + "zebra interface type."; + } + + leaf ptm-status { + type string; + default "disabled"; + description + "Interface PTM status."; + } + + leaf vlan-id { + type uint16 { + range "1..4094"; + } + description + "A VLAN id."; + } + + leaf vni-id { + type vni-id-type; + } + + leaf remote-vtep { + type inet:ipv4-address; + description + "The remote VTEP IP address."; + } + + leaf mcast-group { + type rt-types:ipv4-multicast-group-address; + description + "The VNI multicast group for BUM traffic."; + } + } } } + augment "/frr-vrf:lib/frr-vrf:vrf" { + description + "Extends VRF model with Zebra-related parameters."; + uses ribs; + } + + augment "/frr-vrf:lib/frr-vrf:vrf/ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop" { + uses frr-nh:frr-nexthop-operational; + } + /* * Main zebra container */ @@ -1985,4 +2189,63 @@ module frr-zebra { } // End interface model augmentation + + augment "/frr-route-map:lib" + + "/frr-route-map:route-map" + + "/frr-route-map:entry" + + "/frr-route-map:match-condition" + + "/frr-route-map:condition-value" { + case ipv4-prefix-length { + when "./condition = 'ipv4-prefix-length' or + ./condition = 'ipv4-next-hop-prefix-length'"; + leaf ipv4-prefix-length { + type uint8 { + range "0..32"; + } + } + } + case ipv6-prefix-length { + when "./condition = 'ipv6-prefix-length'"; + leaf ipv6-prefix-length { + type uint8 { + range "0..128"; + } + } + } + case source-protocol { + when "./condition = 'source-protocol'"; + leaf source-protocol { + type frr-route-types:frr-route-types; + } + } + case source-instance { + when "./condition = 'source-instance'"; + leaf source-instance { + type uint8 { + range "0..255"; + } + } + } + } + + augment "/frr-route-map:lib" + + "/frr-route-map:route-map" + + "/frr-route-map:entry" + + "/frr-route-map:set-action" + + "/frr-route-map:action-value" { + case source-v4 { + when "./action = 'source'"; + leaf source-v4 { + description "IPv4 address"; + type inet:ipv4-address; + } + } + case source-v6 { + when "./action = 'source'"; + leaf source-v6 { + description "IPv6 address"; + type inet:ipv6-address; + } + } + } } diff --git a/yang/ietf/frr-deviations-ietf-interfaces.yang b/yang/ietf/frr-deviations-ietf-interfaces.yang index 6528d66d22..704839fb60 100644 --- a/yang/ietf/frr-deviations-ietf-interfaces.yang +++ b/yang/ietf/frr-deviations-ietf-interfaces.yang @@ -8,7 +8,7 @@ module frr-deviations-ietf-interfaces { } organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> diff --git a/yang/ietf/frr-deviations-ietf-rip.yang b/yang/ietf/frr-deviations-ietf-rip.yang index 42ed8e3c09..39a1d7e71d 100644 --- a/yang/ietf/frr-deviations-ietf-rip.yang +++ b/yang/ietf/frr-deviations-ietf-rip.yang @@ -12,7 +12,7 @@ module frr-deviations-ietf-rip { } organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> diff --git a/yang/ietf/frr-deviations-ietf-routing.yang b/yang/ietf/frr-deviations-ietf-routing.yang index 62787e782c..15ceb6b929 100644 --- a/yang/ietf/frr-deviations-ietf-routing.yang +++ b/yang/ietf/frr-deviations-ietf-routing.yang @@ -8,7 +8,7 @@ module frr-deviations-ietf-routing { } organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> diff --git a/yang/ietf/ietf-interfaces.yang b/yang/ietf/ietf-interfaces.yang new file mode 100644 index 0000000000..f66c205ce0 --- /dev/null +++ b/yang/ietf/ietf-interfaces.yang @@ -0,0 +1,1123 @@ +module ietf-interfaces { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-interfaces"; + prefix if; + + import ietf-yang-types { + prefix yang; + } + + organization + "IETF NETMOD (Network Modeling) Working Group"; + + contact + "WG Web: <https://datatracker.ietf.org/wg/netmod/> + WG List: <mailto:netmod@ietf.org> + + Editor: Martin Bjorklund + <mailto:mbj@tail-f.com>"; + + description + "This module contains a collection of YANG definitions for + managing network interfaces. + + Copyright (c) 2018 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8343; see + the RFC itself for full legal notices."; + + revision 2018-02-20 { + description + "Updated to support NMDA."; + reference + "RFC 8343: A YANG Data Model for Interface Management"; + } + + revision 2014-05-08 { + description + "Initial revision."; + reference + "RFC 7223: A YANG Data Model for Interface Management"; + } + + /* + * Typedefs + */ + + typedef interface-ref { + type leafref { + path "/if:interfaces/if:interface/if:name"; + } + description + "This type is used by data models that need to reference + interfaces."; + } + + /* + * Identities + */ + + identity interface-type { + description + "Base identity from which specific interface types are + derived."; + } + + /* + * Features + */ + + feature arbitrary-names { + description + "This feature indicates that the device allows user-controlled + interfaces to be named arbitrarily."; + } + feature pre-provisioning { + description + "This feature indicates that the device supports + pre-provisioning of interface configuration, i.e., it is + possible to configure an interface whose physical interface + hardware is not present on the device."; + } + feature if-mib { + description + "This feature indicates that the device implements + the IF-MIB."; + reference + "RFC 2863: The Interfaces Group MIB"; + } + + /* + * Data nodes + */ + + container interfaces { + description + "Interface parameters."; + + list interface { + key "name"; + + description + "The list of interfaces on the device. + + The status of an interface is available in this list in the + operational state. If the configuration of a + system-controlled interface cannot be used by the system + (e.g., the interface hardware present does not match the + interface type), then the configuration is not applied to + the system-controlled interface shown in the operational + state. If the configuration of a user-controlled interface + cannot be used by the system, the configured interface is + not instantiated in the operational state. + + System-controlled interfaces created by the system are + always present in this list in the operational state, + whether or not they are configured."; + + leaf name { + type string; + description + "The name of the interface. + + A device MAY restrict the allowed values for this leaf, + possibly depending on the type of the interface. + For system-controlled interfaces, this leaf is the + device-specific name of the interface. + + If a client tries to create configuration for a + system-controlled interface that is not present in the + operational state, the server MAY reject the request if + the implementation does not support pre-provisioning of + interfaces or if the name refers to an interface that can + never exist in the system. A Network Configuration + Protocol (NETCONF) server MUST reply with an rpc-error + with the error-tag 'invalid-value' in this case. + + If the device supports pre-provisioning of interface + configuration, the 'pre-provisioning' feature is + advertised. + + If the device allows arbitrarily named user-controlled + interfaces, the 'arbitrary-names' feature is advertised. + + When a configured user-controlled interface is created by + the system, it is instantiated with the same name in the + operational state. + + A server implementation MAY map this leaf to the ifName + MIB object. Such an implementation needs to use some + mechanism to handle the differences in size and characters + allowed between this leaf and ifName. The definition of + such a mechanism is outside the scope of this document."; + reference + "RFC 2863: The Interfaces Group MIB - ifName"; + } + + leaf description { + type string; + description + "A textual description of the interface. + + A server implementation MAY map this leaf to the ifAlias + MIB object. Such an implementation needs to use some + mechanism to handle the differences in size and characters + allowed between this leaf and ifAlias. The definition of + such a mechanism is outside the scope of this document. + + Since ifAlias is defined to be stored in non-volatile + storage, the MIB implementation MUST map ifAlias to the + value of 'description' in the persistently stored + configuration."; + reference + "RFC 2863: The Interfaces Group MIB - ifAlias"; + } + + leaf type { + type identityref { + base interface-type; + } + mandatory true; + description + "The type of the interface. + + When an interface entry is created, a server MAY + initialize the type leaf with a valid value, e.g., if it + is possible to derive the type from the name of the + interface. + + If a client tries to set the type of an interface to a + value that can never be used by the system, e.g., if the + type is not supported or if the type does not match the + name of the interface, the server MUST reject the request. + A NETCONF server MUST reply with an rpc-error with the + error-tag 'invalid-value' in this case."; + reference + "RFC 2863: The Interfaces Group MIB - ifType"; + } + + leaf enabled { + type boolean; + default "true"; + description + "This leaf contains the configured, desired state of the + interface. + + Systems that implement the IF-MIB use the value of this + leaf in the intended configuration to set + IF-MIB.ifAdminStatus to 'up' or 'down' after an ifEntry + has been initialized, as described in RFC 2863. + + Changes in this leaf in the intended configuration are + reflected in ifAdminStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifAdminStatus"; + } + + leaf link-up-down-trap-enable { + if-feature if-mib; + type enumeration { + enum enabled { + value 1; + description + "The device will generate linkUp/linkDown SNMP + notifications for this interface."; + } + enum disabled { + value 2; + description + "The device will not generate linkUp/linkDown SNMP + notifications for this interface."; + } + } + description + "Controls whether linkUp/linkDown SNMP notifications + should be generated for this interface. + + If this node is not configured, the value 'enabled' is + operationally used by the server for interfaces that do + not operate on top of any other interface (i.e., there are + no 'lower-layer-if' entries), and 'disabled' otherwise."; + reference + "RFC 2863: The Interfaces Group MIB - + ifLinkUpDownTrapEnable"; + } + + leaf admin-status { + if-feature if-mib; + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + description + "Not ready to pass packets and not in some test mode."; + } + enum testing { + value 3; + description + "In some test mode."; + } + } + config false; + mandatory true; + description + "The desired state of the interface. + + This leaf has the same read semantics as ifAdminStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifAdminStatus"; + } + + leaf oper-status { + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + + description + "The interface does not pass any packets."; + } + enum testing { + value 3; + description + "In some test mode. No operational packets can + be passed."; + } + enum unknown { + value 4; + description + "Status cannot be determined for some reason."; + } + enum dormant { + value 5; + description + "Waiting for some external event."; + } + enum not-present { + value 6; + description + "Some component (typically hardware) is missing."; + } + enum lower-layer-down { + value 7; + description + "Down due to state of lower-layer interface(s)."; + } + } + config false; + mandatory true; + description + "The current operational state of the interface. + + This leaf has the same semantics as ifOperStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifOperStatus"; + } + + leaf last-change { + type yang:date-and-time; + config false; + description + "The time the interface entered its current operational + state. If the current state was entered prior to the + last re-initialization of the local network management + subsystem, then this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifLastChange"; + } + + leaf if-index { + if-feature if-mib; + type int32 { + range "1..2147483647"; + } + config false; + mandatory true; + description + "The ifIndex value for the ifEntry represented by this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifIndex"; + } + + leaf phys-address { + type yang:phys-address; + config false; + description + "The interface's address at its protocol sub-layer. For + example, for an 802.x interface, this object normally + contains a Media Access Control (MAC) address. The + interface's media-specific modules must define the bit + and byte ordering and the format of the value of this + object. For interfaces that do not have such an address + (e.g., a serial line), this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifPhysAddress"; + } + + leaf-list higher-layer-if { + type interface-ref; + config false; + description + "A list of references to interfaces layered on top of this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf-list lower-layer-if { + type interface-ref; + config false; + + description + "A list of references to interfaces layered underneath this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf speed { + type yang:gauge64; + units "bits/second"; + config false; + description + "An estimate of the interface's current bandwidth in bits + per second. For interfaces that do not vary in + bandwidth or for those where no accurate estimation can + be made, this node should contain the nominal bandwidth. + For interfaces that have no concept of bandwidth, this + node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - + ifSpeed, ifHighSpeed"; + } + + container statistics { + config false; + description + "A collection of interface-related statistics objects."; + + leaf discontinuity-time { + type yang:date-and-time; + mandatory true; + description + "The time on the most recent occasion at which any one or + more of this interface's counters suffered a + discontinuity. If no such discontinuities have occurred + since the last re-initialization of the local management + subsystem, then this node contains the time the local + management subsystem re-initialized itself."; + } + + leaf in-octets { + type yang:counter64; + description + "The total number of octets received on the interface, + including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInOctets"; + } + + leaf in-unicast-pkts { + type yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were not addressed to a + multicast or broadcast address at this sub-layer. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInUcastPkts"; + } + + leaf in-broadcast-pkts { + type yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a broadcast + address at this sub-layer. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInBroadcastPkts"; + } + + leaf in-multicast-pkts { + type yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a multicast + address at this sub-layer. For a MAC-layer protocol, + this includes both Group and Functional addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInMulticastPkts"; + } + + leaf in-discards { + type yang:counter32; + description + "The number of inbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being deliverable to a higher-layer + protocol. One possible reason for discarding such a + packet could be to free up buffer space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInDiscards"; + } + + leaf in-errors { + type yang:counter32; + description + "For packet-oriented interfaces, the number of inbound + packets that contained errors preventing them from being + deliverable to a higher-layer protocol. For character- + oriented or fixed-length interfaces, the number of + inbound transmission units that contained errors + preventing them from being deliverable to a higher-layer + protocol. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInErrors"; + } + + leaf in-unknown-protos { + type yang:counter32; + + description + "For packet-oriented interfaces, the number of packets + received via the interface that were discarded because + of an unknown or unsupported protocol. For + character-oriented or fixed-length interfaces that + support protocol multiplexing, the number of + transmission units received via the interface that were + discarded because of an unknown or unsupported protocol. + For any interface that does not support protocol + multiplexing, this counter is not present. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInUnknownProtos"; + } + + leaf out-octets { + type yang:counter64; + description + "The total number of octets transmitted out of the + interface, including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutOctets"; + } + + leaf out-unicast-pkts { + type yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were not addressed + to a multicast or broadcast address at this sub-layer, + including those that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutUcastPkts"; + } + + leaf out-broadcast-pkts { + type yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were addressed to a + broadcast address at this sub-layer, including those + that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutBroadcastPkts"; + } + + leaf out-multicast-pkts { + type yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were addressed to a + multicast address at this sub-layer, including those + that were discarded or not sent. For a MAC-layer + protocol, this includes both Group and Functional + addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutMulticastPkts"; + } + + leaf out-discards { + type yang:counter32; + description + "The number of outbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being transmitted. One possible reason + for discarding such a packet could be to free up buffer + space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutDiscards"; + } + + leaf out-errors { + type yang:counter32; + description + "For packet-oriented interfaces, the number of outbound + packets that could not be transmitted because of errors. + For character-oriented or fixed-length interfaces, the + number of outbound transmission units that could not be + transmitted because of errors. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutErrors"; + } + } + + } + } + + /* + * Legacy typedefs + */ + + typedef interface-state-ref { + type leafref { + path "/if:interfaces-state/if:interface/if:name"; + } + status deprecated; + description + "This type is used by data models that need to reference + the operationally present interfaces."; + } + + /* + * Legacy operational state data nodes + */ + + container interfaces-state { + config false; + status deprecated; + description + "Data nodes for the operational state of interfaces."; + + list interface { + key "name"; + status deprecated; + + description + "The list of interfaces on the device. + + System-controlled interfaces created by the system are + always present in this list, whether or not they are + configured."; + + leaf name { + type string; + status deprecated; + description + "The name of the interface. + + A server implementation MAY map this leaf to the ifName + MIB object. Such an implementation needs to use some + mechanism to handle the differences in size and characters + allowed between this leaf and ifName. The definition of + such a mechanism is outside the scope of this document."; + reference + "RFC 2863: The Interfaces Group MIB - ifName"; + } + + leaf type { + type identityref { + base interface-type; + } + mandatory true; + status deprecated; + description + "The type of the interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifType"; + } + + leaf admin-status { + if-feature if-mib; + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + description + "Not ready to pass packets and not in some test mode."; + } + enum testing { + value 3; + description + "In some test mode."; + } + } + mandatory true; + status deprecated; + description + "The desired state of the interface. + + This leaf has the same read semantics as ifAdminStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifAdminStatus"; + } + + leaf oper-status { + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + description + "The interface does not pass any packets."; + } + enum testing { + value 3; + description + "In some test mode. No operational packets can + be passed."; + } + enum unknown { + value 4; + description + "Status cannot be determined for some reason."; + } + enum dormant { + value 5; + description + "Waiting for some external event."; + } + enum not-present { + value 6; + description + "Some component (typically hardware) is missing."; + } + enum lower-layer-down { + value 7; + description + "Down due to state of lower-layer interface(s)."; + } + } + mandatory true; + status deprecated; + description + "The current operational state of the interface. + + This leaf has the same semantics as ifOperStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifOperStatus"; + } + + leaf last-change { + type yang:date-and-time; + status deprecated; + description + "The time the interface entered its current operational + state. If the current state was entered prior to the + last re-initialization of the local network management + subsystem, then this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifLastChange"; + } + + leaf if-index { + if-feature if-mib; + type int32 { + range "1..2147483647"; + } + mandatory true; + status deprecated; + description + "The ifIndex value for the ifEntry represented by this + interface."; + + reference + "RFC 2863: The Interfaces Group MIB - ifIndex"; + } + + leaf phys-address { + type yang:phys-address; + status deprecated; + description + "The interface's address at its protocol sub-layer. For + example, for an 802.x interface, this object normally + contains a Media Access Control (MAC) address. The + interface's media-specific modules must define the bit + and byte ordering and the format of the value of this + object. For interfaces that do not have such an address + (e.g., a serial line), this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifPhysAddress"; + } + + leaf-list higher-layer-if { + type interface-state-ref; + status deprecated; + description + "A list of references to interfaces layered on top of this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf-list lower-layer-if { + type interface-state-ref; + status deprecated; + description + "A list of references to interfaces layered underneath this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf speed { + type yang:gauge64; + units "bits/second"; + status deprecated; + description + "An estimate of the interface's current bandwidth in bits + per second. For interfaces that do not vary in + bandwidth or for those where no accurate estimation can + + be made, this node should contain the nominal bandwidth. + For interfaces that have no concept of bandwidth, this + node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - + ifSpeed, ifHighSpeed"; + } + + container statistics { + status deprecated; + description + "A collection of interface-related statistics objects."; + + leaf discontinuity-time { + type yang:date-and-time; + mandatory true; + status deprecated; + description + "The time on the most recent occasion at which any one or + more of this interface's counters suffered a + discontinuity. If no such discontinuities have occurred + since the last re-initialization of the local management + subsystem, then this node contains the time the local + management subsystem re-initialized itself."; + } + + leaf in-octets { + type yang:counter64; + status deprecated; + description + "The total number of octets received on the interface, + including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInOctets"; + } + + leaf in-unicast-pkts { + type yang:counter64; + status deprecated; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were not addressed to a + multicast or broadcast address at this sub-layer. + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInUcastPkts"; + } + + leaf in-broadcast-pkts { + type yang:counter64; + status deprecated; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a broadcast + address at this sub-layer. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInBroadcastPkts"; + } + + leaf in-multicast-pkts { + type yang:counter64; + status deprecated; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a multicast + address at this sub-layer. For a MAC-layer protocol, + this includes both Group and Functional addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInMulticastPkts"; + } + + leaf in-discards { + type yang:counter32; + status deprecated; + + description + "The number of inbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being deliverable to a higher-layer + protocol. One possible reason for discarding such a + packet could be to free up buffer space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInDiscards"; + } + + leaf in-errors { + type yang:counter32; + status deprecated; + description + "For packet-oriented interfaces, the number of inbound + packets that contained errors preventing them from being + deliverable to a higher-layer protocol. For character- + oriented or fixed-length interfaces, the number of + inbound transmission units that contained errors + preventing them from being deliverable to a higher-layer + protocol. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInErrors"; + } + + leaf in-unknown-protos { + type yang:counter32; + status deprecated; + description + "For packet-oriented interfaces, the number of packets + received via the interface that were discarded because + of an unknown or unsupported protocol. For + character-oriented or fixed-length interfaces that + support protocol multiplexing, the number of + transmission units received via the interface that were + discarded because of an unknown or unsupported protocol. + For any interface that does not support protocol + multiplexing, this counter is not present. + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInUnknownProtos"; + } + + leaf out-octets { + type yang:counter64; + status deprecated; + description + "The total number of octets transmitted out of the + interface, including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutOctets"; + } + + leaf out-unicast-pkts { + type yang:counter64; + status deprecated; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were not addressed + to a multicast or broadcast address at this sub-layer, + including those that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutUcastPkts"; + } + + leaf out-broadcast-pkts { + type yang:counter64; + status deprecated; + + description + "The total number of packets that higher-level protocols + requested be transmitted and that were addressed to a + broadcast address at this sub-layer, including those + that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutBroadcastPkts"; + } + + leaf out-multicast-pkts { + type yang:counter64; + status deprecated; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were addressed to a + multicast address at this sub-layer, including those + that were discarded or not sent. For a MAC-layer + protocol, this includes both Group and Functional + addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutMulticastPkts"; + } + + leaf out-discards { + type yang:counter32; + status deprecated; + description + "The number of outbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being transmitted. One possible reason + for discarding such a packet could be to free up buffer + space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutDiscards"; + } + + leaf out-errors { + type yang:counter32; + status deprecated; + description + "For packet-oriented interfaces, the number of outbound + packets that could not be transmitted because of errors. + For character-oriented or fixed-length interfaces, the + number of outbound transmission units that could not be + transmitted because of errors. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutErrors"; + } + } + } + } +} diff --git a/yang/subdir.am b/yang/subdir.am index c1297dafd5..5393781d22 100644 --- a/yang/subdir.am +++ b/yang/subdir.am @@ -21,11 +21,16 @@ EXTRA_DIST += yang/embedmodel.py dist_yangmodels_DATA += yang/frr-filter.yang dist_yangmodels_DATA += yang/frr-module-translator.yang +dist_yangmodels_DATA += yang/frr-nexthop.yang dist_yangmodels_DATA += yang/frr-test-module.yang dist_yangmodels_DATA += yang/frr-interface.yang dist_yangmodels_DATA += yang/frr-route-map.yang +dist_yangmodels_DATA += yang/frr-vrf.yang dist_yangmodels_DATA += yang/frr-route-types.yang +dist_yangmodels_DATA += yang/frr-routing.yang dist_yangmodels_DATA += yang/ietf/ietf-routing-types.yang +dist_yangmodels_DATA += yang/frr-igmp.yang +dist_yangmodels_DATA += yang/ietf/ietf-interfaces.yang if BFDD dist_yangmodels_DATA += yang/frr-bfdd.yang @@ -50,3 +55,11 @@ endif if VRRPD dist_yangmodels_DATA += yang/frr-vrrpd.yang endif + +if STATICD +dist_yangmodels_DATA += yang/frr-staticd.yang +endif + +if ZEBRA +dist_yangmodels_DATA += yang/frr-zebra.yang +endif diff --git a/zebra/debug.c b/zebra/debug.c index 68f6b69305..c920fca5ff 100644 --- a/zebra/debug.c +++ b/zebra/debug.c @@ -471,8 +471,13 @@ DEFPY (debug_zebra_nexthop, } /* Debug node. */ -struct cmd_node debug_node = {DEBUG_NODE, "", /* Debug node has no interface. */ - 1}; +static int config_write_debug(struct vty *vty); +struct cmd_node debug_node = { + .name = "debug", + .node = DEBUG_NODE, + .prompt = "", + .config_write = config_write_debug, +}; static int config_write_debug(struct vty *vty) { @@ -587,7 +592,7 @@ void zebra_debug_init(void) zebra_debug_nht = 0; zebra_debug_nexthop = 0; - install_node(&debug_node, config_write_debug); + install_node(&debug_node); install_element(VIEW_NODE, &show_debugging_zebra_cmd); diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c new file mode 100644 index 0000000000..9f480926ae --- /dev/null +++ b/zebra/dplane_fpm_nl.c @@ -0,0 +1,1162 @@ +/* + * Zebra dataplane plugin for Forwarding Plane Manager (FPM) using netlink. + * + * Copyright (C) 2019 Network Device Education Foundation, Inc. ("NetDEF") + * Rafael Zalamena + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <arpa/inet.h> + +#include <sys/types.h> +#include <sys/socket.h> + +#include <errno.h> +#include <string.h> + +#include "config.h" /* Include this explicitly */ +#include "lib/zebra.h" +#include "lib/json.h" +#include "lib/libfrr.h" +#include "lib/frratomic.h" +#include "lib/command.h" +#include "lib/memory.h" +#include "lib/network.h" +#include "lib/ns.h" +#include "lib/frr_pthread.h" +#include "zebra/debug.h" +#include "zebra/interface.h" +#include "zebra/zebra_dplane.h" +#include "zebra/zebra_router.h" +#include "zebra/zebra_vxlan_private.h" +#include "zebra/kernel_netlink.h" +#include "zebra/rt_netlink.h" +#include "zebra/debug.h" + +#define SOUTHBOUND_DEFAULT_ADDR INADDR_LOOPBACK +#define SOUTHBOUND_DEFAULT_PORT 2620 + +/** + * FPM header: + * { + * version: 1 byte (always 1), + * type: 1 byte (1 for netlink, 2 protobuf), + * len: 2 bytes (network order), + * } + * + * This header is used with any format to tell the users how many bytes to + * expect. + */ +#define FPM_HEADER_SIZE 4 + +static const char *prov_name = "dplane_fpm_nl"; + +struct fpm_nl_ctx { + /* data plane connection. */ + int socket; + bool disabled; + bool connecting; + bool rib_complete; + bool rmac_complete; + struct sockaddr_storage addr; + + /* data plane buffers. */ + struct stream *ibuf; + struct stream *obuf; + pthread_mutex_t obuf_mutex; + + /* + * data plane context queue: + * When a FPM server connection becomes a bottleneck, we must keep the + * data plane contexts until we get a chance to process them. + */ + struct dplane_ctx_q ctxqueue; + pthread_mutex_t ctxqueue_mutex; + + /* data plane events. */ + struct zebra_dplane_provider *prov; + struct frr_pthread *fthread; + struct thread *t_connect; + struct thread *t_read; + struct thread *t_write; + struct thread *t_event; + struct thread *t_dequeue; + + /* zebra events. */ + struct thread *t_ribreset; + struct thread *t_ribwalk; + struct thread *t_rmacreset; + struct thread *t_rmacwalk; + + /* Statistic counters. */ + struct { + /* Amount of bytes read into ibuf. */ + _Atomic uint32_t bytes_read; + /* Amount of bytes written from obuf. */ + _Atomic uint32_t bytes_sent; + /* Output buffer current usage. */ + _Atomic uint32_t obuf_bytes; + /* Output buffer peak usage. */ + _Atomic uint32_t obuf_peak; + + /* Amount of connection closes. */ + _Atomic uint32_t connection_closes; + /* Amount of connection errors. */ + _Atomic uint32_t connection_errors; + + /* Amount of user configurations: FNE_RECONNECT. */ + _Atomic uint32_t user_configures; + /* Amount of user disable requests: FNE_DISABLE. */ + _Atomic uint32_t user_disables; + + /* Amount of data plane context processed. */ + _Atomic uint32_t dplane_contexts; + /* Amount of data plane contexts enqueued. */ + _Atomic uint32_t ctxqueue_len; + /* Peak amount of data plane contexts enqueued. */ + _Atomic uint32_t ctxqueue_len_peak; + + /* Amount of buffer full events. */ + _Atomic uint32_t buffer_full; + } counters; +} *gfnc; + +enum fpm_nl_events { + /* Ask for FPM to reconnect the external server. */ + FNE_RECONNECT, + /* Disable FPM. */ + FNE_DISABLE, + /* Reset counters. */ + FNE_RESET_COUNTERS, +}; + +/* + * Prototypes. + */ +static int fpm_process_event(struct thread *t); +static int fpm_nl_enqueue(struct fpm_nl_ctx *fnc, struct zebra_dplane_ctx *ctx); +static int fpm_rib_send(struct thread *t); +static int fpm_rib_reset(struct thread *t); +static int fpm_rmac_send(struct thread *t); +static int fpm_rmac_reset(struct thread *t); + +/* + * Helper functions. + */ + +/** + * Reorganizes the data on the buffer so it can fit more data. + * + * @param s stream pointer. + */ +static void stream_pulldown(struct stream *s) +{ + size_t rlen = STREAM_READABLE(s); + + /* No more data, so just move the pointers. */ + if (rlen == 0) { + stream_reset(s); + return; + } + + /* Move the available data to the beginning. */ + memmove(s->data, &s->data[s->getp], rlen); + s->getp = 0; + s->endp = rlen; +} + +/* + * CLI. + */ +#define FPM_STR "Forwarding Plane Manager configuration\n" + +DEFUN(fpm_set_address, fpm_set_address_cmd, + "fpm address <A.B.C.D|X:X::X:X> [port (1-65535)]", + FPM_STR + "FPM remote listening server address\n" + "Remote IPv4 FPM server\n" + "Remote IPv6 FPM server\n" + "FPM remote listening server port\n" + "Remote FPM server port\n") +{ + struct sockaddr_in *sin; + struct sockaddr_in6 *sin6; + uint16_t port = 0; + uint8_t naddr[INET6_BUFSIZ]; + + if (argc == 5) + port = strtol(argv[4]->arg, NULL, 10); + + /* Handle IPv4 addresses. */ + if (inet_pton(AF_INET, argv[2]->arg, naddr) == 1) { + sin = (struct sockaddr_in *)&gfnc->addr; + + memset(sin, 0, sizeof(*sin)); + sin->sin_family = AF_INET; + sin->sin_port = + port ? htons(port) : htons(SOUTHBOUND_DEFAULT_PORT); +#ifdef HAVE_STRUCT_SOCKADDR_SA_LEN + sin->sin_len = sizeof(*sin); +#endif /* HAVE_STRUCT_SOCKADDR_SA_LEN */ + memcpy(&sin->sin_addr, naddr, sizeof(sin->sin_addr)); + + goto ask_reconnect; + } + + /* Handle IPv6 addresses. */ + if (inet_pton(AF_INET6, argv[2]->arg, naddr) != 1) { + vty_out(vty, "%% Invalid address: %s\n", argv[2]->arg); + return CMD_WARNING; + } + + sin6 = (struct sockaddr_in6 *)&gfnc->addr; + memset(sin6, 0, sizeof(*sin6)); + sin6->sin6_family = AF_INET6; + sin6->sin6_port = port ? htons(port) : htons(SOUTHBOUND_DEFAULT_PORT); +#ifdef HAVE_STRUCT_SOCKADDR_SA_LEN + sin6->sin6_len = sizeof(*sin6); +#endif /* HAVE_STRUCT_SOCKADDR_SA_LEN */ + memcpy(&sin6->sin6_addr, naddr, sizeof(sin6->sin6_addr)); + +ask_reconnect: + thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc, + FNE_RECONNECT, &gfnc->t_event); + return CMD_SUCCESS; +} + +DEFUN(no_fpm_set_address, no_fpm_set_address_cmd, + "no fpm address [<A.B.C.D|X:X::X:X> [port <1-65535>]]", + NO_STR + FPM_STR + "FPM remote listening server address\n" + "Remote IPv4 FPM server\n" + "Remote IPv6 FPM server\n" + "FPM remote listening server port\n" + "Remote FPM server port\n") +{ + thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc, + FNE_DISABLE, &gfnc->t_event); + return CMD_SUCCESS; +} + +DEFUN(fpm_reset_counters, fpm_reset_counters_cmd, + "clear fpm counters", + CLEAR_STR + FPM_STR + "FPM statistic counters\n") +{ + thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc, + FNE_RESET_COUNTERS, &gfnc->t_event); + return CMD_SUCCESS; +} + +DEFUN(fpm_show_counters, fpm_show_counters_cmd, + "show fpm counters", + SHOW_STR + FPM_STR + "FPM statistic counters\n") +{ + vty_out(vty, "%30s\n%30s\n", "FPM counters", "============"); + +#define SHOW_COUNTER(label, counter) \ + vty_out(vty, "%28s: %u\n", (label), (counter)) + + SHOW_COUNTER("Input bytes", gfnc->counters.bytes_read); + SHOW_COUNTER("Output bytes", gfnc->counters.bytes_sent); + SHOW_COUNTER("Output buffer current size", gfnc->counters.obuf_bytes); + SHOW_COUNTER("Output buffer peak size", gfnc->counters.obuf_peak); + SHOW_COUNTER("Connection closes", gfnc->counters.connection_closes); + SHOW_COUNTER("Connection errors", gfnc->counters.connection_errors); + SHOW_COUNTER("Data plane items processed", + gfnc->counters.dplane_contexts); + SHOW_COUNTER("Data plane items enqueued", + gfnc->counters.ctxqueue_len); + SHOW_COUNTER("Data plane items queue peak", + gfnc->counters.ctxqueue_len_peak); + SHOW_COUNTER("Buffer full hits", gfnc->counters.buffer_full); + SHOW_COUNTER("User FPM configurations", gfnc->counters.user_configures); + SHOW_COUNTER("User FPM disable requests", gfnc->counters.user_disables); + +#undef SHOW_COUNTER + + return CMD_SUCCESS; +} + +DEFUN(fpm_show_counters_json, fpm_show_counters_json_cmd, + "show fpm counters json", + SHOW_STR + FPM_STR + "FPM statistic counters\n" + JSON_STR) +{ + struct json_object *jo; + + jo = json_object_new_object(); + json_object_int_add(jo, "bytes-read", gfnc->counters.bytes_read); + json_object_int_add(jo, "bytes-sent", gfnc->counters.bytes_sent); + json_object_int_add(jo, "obuf-bytes", gfnc->counters.obuf_bytes); + json_object_int_add(jo, "obuf-bytes-peak", gfnc->counters.obuf_peak); + json_object_int_add(jo, "connection-closes", + gfnc->counters.connection_closes); + json_object_int_add(jo, "connection-errors", + gfnc->counters.connection_errors); + json_object_int_add(jo, "data-plane-contexts", + gfnc->counters.dplane_contexts); + json_object_int_add(jo, "data-plane-contexts-queue", + gfnc->counters.ctxqueue_len); + json_object_int_add(jo, "data-plane-contexts-queue-peak", + gfnc->counters.ctxqueue_len_peak); + json_object_int_add(jo, "buffer-full-hits", gfnc->counters.buffer_full); + json_object_int_add(jo, "user-configures", + gfnc->counters.user_configures); + json_object_int_add(jo, "user-disables", gfnc->counters.user_disables); + vty_out(vty, "%s\n", json_object_to_json_string_ext(jo, 0)); + json_object_free(jo); + + return CMD_SUCCESS; +} + +static int fpm_write_config(struct vty *vty) +{ + struct sockaddr_in *sin; + struct sockaddr_in6 *sin6; + int written = 0; + char addrstr[INET6_ADDRSTRLEN]; + + if (gfnc->disabled) + return written; + + switch (gfnc->addr.ss_family) { + case AF_INET: + written = 1; + sin = (struct sockaddr_in *)&gfnc->addr; + inet_ntop(AF_INET, &sin->sin_addr, addrstr, sizeof(addrstr)); + vty_out(vty, "fpm address %s", addrstr); + if (sin->sin_port != htons(SOUTHBOUND_DEFAULT_PORT)) + vty_out(vty, " port %d", ntohs(sin->sin_port)); + + vty_out(vty, "\n"); + break; + case AF_INET6: + written = 1; + sin6 = (struct sockaddr_in6 *)&gfnc->addr; + inet_ntop(AF_INET, &sin6->sin6_addr, addrstr, sizeof(addrstr)); + vty_out(vty, "fpm address %s", addrstr); + if (sin6->sin6_port != htons(SOUTHBOUND_DEFAULT_PORT)) + vty_out(vty, " port %d", ntohs(sin6->sin6_port)); + + vty_out(vty, "\n"); + break; + + default: + break; + } + + return written; +} + +static struct cmd_node fpm_node = { + .name = "fpm", + .node = FPM_NODE, + .prompt = "", + .config_write = fpm_write_config, +}; + +/* + * FPM functions. + */ +static int fpm_connect(struct thread *t); + +static void fpm_reconnect(struct fpm_nl_ctx *fnc) +{ + /* Grab the lock to empty the stream and stop the zebra thread. */ + frr_mutex_lock_autounlock(&fnc->obuf_mutex); + + /* Avoid calling close on `-1`. */ + if (fnc->socket != -1) { + close(fnc->socket); + fnc->socket = -1; + } + + stream_reset(fnc->ibuf); + stream_reset(fnc->obuf); + THREAD_OFF(fnc->t_read); + THREAD_OFF(fnc->t_write); + + if (fnc->t_ribreset) + thread_cancel_async(zrouter.master, &fnc->t_ribreset, NULL); + if (fnc->t_ribwalk) + thread_cancel_async(zrouter.master, &fnc->t_ribwalk, NULL); + if (fnc->t_rmacreset) + thread_cancel_async(zrouter.master, &fnc->t_rmacreset, NULL); + if (fnc->t_rmacwalk) + thread_cancel_async(zrouter.master, &fnc->t_rmacwalk, NULL); + + /* FPM is disabled, don't attempt to connect. */ + if (fnc->disabled) + return; + + thread_add_timer(fnc->fthread->master, fpm_connect, fnc, 3, + &fnc->t_connect); +} + +static int fpm_read(struct thread *t) +{ + struct fpm_nl_ctx *fnc = THREAD_ARG(t); + ssize_t rv; + + /* Let's ignore the input at the moment. */ + rv = stream_read_try(fnc->ibuf, fnc->socket, + STREAM_WRITEABLE(fnc->ibuf)); + if (rv == 0) { + atomic_fetch_add_explicit(&fnc->counters.connection_closes, 1, + memory_order_relaxed); + + if (IS_ZEBRA_DEBUG_FPM) + zlog_debug("%s: connection closed", __func__); + + fpm_reconnect(fnc); + return 0; + } + if (rv == -1) { + if (errno == EAGAIN || errno == EWOULDBLOCK + || errno == EINTR) + return 0; + + atomic_fetch_add_explicit(&fnc->counters.connection_errors, 1, + memory_order_relaxed); + zlog_warn("%s: connection failure: %s", __func__, + strerror(errno)); + fpm_reconnect(fnc); + return 0; + } + stream_reset(fnc->ibuf); + + /* Account all bytes read. */ + atomic_fetch_add_explicit(&fnc->counters.bytes_read, rv, + memory_order_relaxed); + + thread_add_read(fnc->fthread->master, fpm_read, fnc, fnc->socket, + &fnc->t_read); + + return 0; +} + +static int fpm_write(struct thread *t) +{ + struct fpm_nl_ctx *fnc = THREAD_ARG(t); + socklen_t statuslen; + ssize_t bwritten; + int rv, status; + size_t btotal; + + if (fnc->connecting == true) { + status = 0; + statuslen = sizeof(status); + + rv = getsockopt(fnc->socket, SOL_SOCKET, SO_ERROR, &status, + &statuslen); + if (rv == -1 || status != 0) { + if (rv != -1) + zlog_warn("%s: connection failed: %s", __func__, + strerror(status)); + else + zlog_warn("%s: SO_ERROR failed: %s", __func__, + strerror(status)); + + atomic_fetch_add_explicit( + &fnc->counters.connection_errors, 1, + memory_order_relaxed); + + fpm_reconnect(fnc); + return 0; + } + + fnc->connecting = false; + + /* Ask zebra main thread to start walking the RIB table. */ + thread_add_timer(zrouter.master, fpm_rib_send, fnc, 0, + &fnc->t_ribwalk); + thread_add_timer(zrouter.master, fpm_rmac_send, fnc, 0, + &fnc->t_rmacwalk); + } + + frr_mutex_lock_autounlock(&fnc->obuf_mutex); + + while (true) { + /* Stream is empty: reset pointers and return. */ + if (STREAM_READABLE(fnc->obuf) == 0) { + stream_reset(fnc->obuf); + break; + } + + /* Try to write all at once. */ + btotal = stream_get_endp(fnc->obuf) - + stream_get_getp(fnc->obuf); + bwritten = write(fnc->socket, stream_pnt(fnc->obuf), btotal); + if (bwritten == 0) { + atomic_fetch_add_explicit( + &fnc->counters.connection_closes, 1, + memory_order_relaxed); + + if (IS_ZEBRA_DEBUG_FPM) + zlog_debug("%s: connection closed", __func__); + break; + } + if (bwritten == -1) { + /* Attempt to continue if blocked by a signal. */ + if (errno == EINTR) + continue; + /* Receiver is probably slow, lets give it some time. */ + if (errno == EAGAIN || errno == EWOULDBLOCK) + break; + + atomic_fetch_add_explicit( + &fnc->counters.connection_errors, 1, + memory_order_relaxed); + zlog_warn("%s: connection failure: %s", __func__, + strerror(errno)); + fpm_reconnect(fnc); + break; + } + + /* Account all bytes sent. */ + atomic_fetch_add_explicit(&fnc->counters.bytes_sent, bwritten, + memory_order_relaxed); + + /* Account number of bytes free. */ + atomic_fetch_sub_explicit(&fnc->counters.obuf_bytes, bwritten, + memory_order_relaxed); + + stream_forward_getp(fnc->obuf, (size_t)bwritten); + } + + /* Stream is not empty yet, we must schedule more writes. */ + if (STREAM_READABLE(fnc->obuf)) { + stream_pulldown(fnc->obuf); + thread_add_write(fnc->fthread->master, fpm_write, fnc, + fnc->socket, &fnc->t_write); + return 0; + } + + return 0; +} + +static int fpm_connect(struct thread *t) +{ + struct fpm_nl_ctx *fnc = THREAD_ARG(t); + struct sockaddr_in *sin = (struct sockaddr_in *)&fnc->addr; + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&fnc->addr; + socklen_t slen; + int rv, sock; + char addrstr[INET6_ADDRSTRLEN]; + + sock = socket(fnc->addr.ss_family, SOCK_STREAM, 0); + if (sock == -1) { + zlog_err("%s: fpm socket failed: %s", __func__, + strerror(errno)); + thread_add_timer(fnc->fthread->master, fpm_connect, fnc, 3, + &fnc->t_connect); + return 0; + } + + set_nonblocking(sock); + + if (fnc->addr.ss_family == AF_INET) { + inet_ntop(AF_INET, &sin->sin_addr, addrstr, sizeof(addrstr)); + slen = sizeof(*sin); + } else { + inet_ntop(AF_INET6, &sin6->sin6_addr, addrstr, sizeof(addrstr)); + slen = sizeof(*sin6); + } + + if (IS_ZEBRA_DEBUG_FPM) + zlog_debug("%s: attempting to connect to %s:%d", __func__, + addrstr, ntohs(sin->sin_port)); + + rv = connect(sock, (struct sockaddr *)&fnc->addr, slen); + if (rv == -1 && errno != EINPROGRESS) { + atomic_fetch_add_explicit(&fnc->counters.connection_errors, 1, + memory_order_relaxed); + close(sock); + zlog_warn("%s: fpm connection failed: %s", __func__, + strerror(errno)); + thread_add_timer(fnc->fthread->master, fpm_connect, fnc, 3, + &fnc->t_connect); + return 0; + } + + fnc->connecting = (errno == EINPROGRESS); + fnc->socket = sock; + thread_add_read(fnc->fthread->master, fpm_read, fnc, sock, + &fnc->t_read); + thread_add_write(fnc->fthread->master, fpm_write, fnc, sock, + &fnc->t_write); + + /* Mark all routes as unsent. */ + thread_add_timer(zrouter.master, fpm_rib_reset, fnc, 0, + &fnc->t_ribreset); + thread_add_timer(zrouter.master, fpm_rmac_reset, fnc, 0, + &fnc->t_rmacreset); + + return 0; +} + +/** + * Encode data plane operation context into netlink and enqueue it in the FPM + * output buffer. + * + * @param fnc the netlink FPM context. + * @param ctx the data plane operation context data. + * @return 0 on success or -1 on not enough space. + */ +static int fpm_nl_enqueue(struct fpm_nl_ctx *fnc, struct zebra_dplane_ctx *ctx) +{ + uint8_t nl_buf[NL_PKT_BUF_SIZE]; + size_t nl_buf_len; + ssize_t rv; + uint64_t obytes, obytes_peak; + + nl_buf_len = 0; + + frr_mutex_lock_autounlock(&fnc->obuf_mutex); + + switch (dplane_ctx_get_op(ctx)) { + case DPLANE_OP_ROUTE_UPDATE: + case DPLANE_OP_ROUTE_DELETE: + rv = netlink_route_multipath(RTM_DELROUTE, ctx, nl_buf, + sizeof(nl_buf), true); + if (rv <= 0) { + zlog_err("%s: netlink_route_multipath failed", + __func__); + return 0; + } + + nl_buf_len = (size_t)rv; + + /* UPDATE operations need a INSTALL, otherwise just quit. */ + if (dplane_ctx_get_op(ctx) == DPLANE_OP_ROUTE_DELETE) + break; + + /* FALL THROUGH */ + case DPLANE_OP_ROUTE_INSTALL: + rv = netlink_route_multipath(RTM_NEWROUTE, ctx, + &nl_buf[nl_buf_len], + sizeof(nl_buf) - nl_buf_len, true); + if (rv <= 0) { + zlog_err("%s: netlink_route_multipath failed", + __func__); + return 0; + } + + nl_buf_len += (size_t)rv; + break; + + case DPLANE_OP_MAC_INSTALL: + case DPLANE_OP_MAC_DELETE: + rv = netlink_macfdb_update_ctx(ctx, nl_buf, sizeof(nl_buf)); + if (rv <= 0) { + zlog_err("%s: netlink_macfdb_update_ctx failed", + __func__); + return 0; + } + + nl_buf_len = (size_t)rv; + break; + + case DPLANE_OP_NH_INSTALL: + case DPLANE_OP_NH_UPDATE: + case DPLANE_OP_NH_DELETE: + case DPLANE_OP_LSP_INSTALL: + case DPLANE_OP_LSP_UPDATE: + case DPLANE_OP_LSP_DELETE: + case DPLANE_OP_PW_INSTALL: + case DPLANE_OP_PW_UNINSTALL: + case DPLANE_OP_ADDR_INSTALL: + case DPLANE_OP_ADDR_UNINSTALL: + case DPLANE_OP_NEIGH_INSTALL: + case DPLANE_OP_NEIGH_UPDATE: + case DPLANE_OP_NEIGH_DELETE: + case DPLANE_OP_VTEP_ADD: + case DPLANE_OP_VTEP_DELETE: + case DPLANE_OP_SYS_ROUTE_ADD: + case DPLANE_OP_SYS_ROUTE_DELETE: + case DPLANE_OP_ROUTE_NOTIFY: + case DPLANE_OP_LSP_NOTIFY: + case DPLANE_OP_NONE: + break; + + default: + if (IS_ZEBRA_DEBUG_FPM) + zlog_debug("%s: unhandled data plane message (%d) %s", + __func__, dplane_ctx_get_op(ctx), + dplane_op2str(dplane_ctx_get_op(ctx))); + break; + } + + /* Skip empty enqueues. */ + if (nl_buf_len == 0) + return 0; + + /* We must know if someday a message goes beyond 65KiB. */ + assert((nl_buf_len + FPM_HEADER_SIZE) <= UINT16_MAX); + + /* Check if we have enough buffer space. */ + if (STREAM_WRITEABLE(fnc->obuf) < (nl_buf_len + FPM_HEADER_SIZE)) { + atomic_fetch_add_explicit(&fnc->counters.buffer_full, 1, + memory_order_relaxed); + + if (IS_ZEBRA_DEBUG_FPM) + zlog_debug( + "%s: buffer full: wants to write %zu but has %zu", + __func__, nl_buf_len + FPM_HEADER_SIZE, + STREAM_WRITEABLE(fnc->obuf)); + + return -1; + } + + /* + * Fill in the FPM header information. + * + * See FPM_HEADER_SIZE definition for more information. + */ + stream_putc(fnc->obuf, 1); + stream_putc(fnc->obuf, 1); + stream_putw(fnc->obuf, nl_buf_len + FPM_HEADER_SIZE); + + /* Write current data. */ + stream_write(fnc->obuf, nl_buf, (size_t)nl_buf_len); + + /* Account number of bytes waiting to be written. */ + atomic_fetch_add_explicit(&fnc->counters.obuf_bytes, + nl_buf_len + FPM_HEADER_SIZE, + memory_order_relaxed); + obytes = atomic_load_explicit(&fnc->counters.obuf_bytes, + memory_order_relaxed); + obytes_peak = atomic_load_explicit(&fnc->counters.obuf_peak, + memory_order_relaxed); + if (obytes_peak < obytes) + atomic_store_explicit(&fnc->counters.obuf_peak, obytes, + memory_order_relaxed); + + /* Tell the thread to start writing. */ + thread_add_write(fnc->fthread->master, fpm_write, fnc, fnc->socket, + &fnc->t_write); + + return 0; +} + +/** + * Send all RIB installed routes to the connected data plane. + */ +static int fpm_rib_send(struct thread *t) +{ + struct fpm_nl_ctx *fnc = THREAD_ARG(t); + rib_dest_t *dest; + struct route_node *rn; + struct route_table *rt; + struct zebra_dplane_ctx *ctx; + rib_tables_iter_t rt_iter; + + /* Allocate temporary context for all transactions. */ + ctx = dplane_ctx_alloc(); + + rt_iter.state = RIB_TABLES_ITER_S_INIT; + while ((rt = rib_tables_iter_next(&rt_iter))) { + for (rn = route_top(rt); rn; rn = srcdest_route_next(rn)) { + dest = rib_dest_from_rnode(rn); + /* Skip bad route entries. */ + if (dest == NULL || dest->selected_fib == NULL) + continue; + + /* Check for already sent routes. */ + if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_FPM)) + continue; + + /* Enqueue route install. */ + dplane_ctx_reset(ctx); + dplane_ctx_route_init(ctx, DPLANE_OP_ROUTE_INSTALL, rn, + dest->selected_fib); + if (fpm_nl_enqueue(fnc, ctx) == -1) { + /* Free the temporary allocated context. */ + dplane_ctx_fini(&ctx); + + thread_add_timer(zrouter.master, fpm_rib_send, + fnc, 1, &fnc->t_ribwalk); + return 0; + } + + /* Mark as sent. */ + SET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM); + } + } + + /* Free the temporary allocated context. */ + dplane_ctx_fini(&ctx); + + /* All RIB routes sent! */ + fnc->rib_complete = true; + + return 0; +} + +/* + * The next three functions will handle RMAC enqueue. + */ +struct fpm_rmac_arg { + struct zebra_dplane_ctx *ctx; + struct fpm_nl_ctx *fnc; + zebra_l3vni_t *zl3vni; +}; + +static void fpm_enqueue_rmac_table(struct hash_bucket *backet, void *arg) +{ + struct fpm_rmac_arg *fra = arg; + zebra_mac_t *zrmac = backet->data; + struct zebra_if *zif = fra->zl3vni->vxlan_if->info; + const struct zebra_l2info_vxlan *vxl = &zif->l2info.vxl; + struct zebra_if *br_zif; + vlanid_t vid; + bool sticky; + + /* Entry already sent. */ + if (CHECK_FLAG(zrmac->flags, ZEBRA_MAC_FPM_SENT)) + return; + + sticky = !!CHECK_FLAG(zrmac->flags, + (ZEBRA_MAC_STICKY | ZEBRA_MAC_REMOTE_DEF_GW)); + br_zif = (struct zebra_if *)(zif->brslave_info.br_if->info); + vid = IS_ZEBRA_IF_BRIDGE_VLAN_AWARE(br_zif) ? vxl->access_vlan : 0; + + dplane_ctx_reset(fra->ctx); + dplane_ctx_set_op(fra->ctx, DPLANE_OP_MAC_INSTALL); + dplane_mac_init(fra->ctx, fra->zl3vni->vxlan_if, + zif->brslave_info.br_if, vid, + &zrmac->macaddr, zrmac->fwd_info.r_vtep_ip, sticky); + if (fpm_nl_enqueue(fra->fnc, fra->ctx) == -1) { + thread_add_timer(zrouter.master, fpm_rmac_send, + fra->fnc, 1, &fra->fnc->t_rmacwalk); + } +} + +static void fpm_enqueue_l3vni_table(struct hash_bucket *backet, void *arg) +{ + struct fpm_rmac_arg *fra = arg; + zebra_l3vni_t *zl3vni = backet->data; + + fra->zl3vni = zl3vni; + hash_iterate(zl3vni->rmac_table, fpm_enqueue_rmac_table, zl3vni); +} + +static int fpm_rmac_send(struct thread *t) +{ + struct fpm_rmac_arg fra; + + fra.fnc = THREAD_ARG(t); + fra.ctx = dplane_ctx_alloc(); + hash_iterate(zrouter.l3vni_table, fpm_enqueue_l3vni_table, &fra); + dplane_ctx_fini(&fra.ctx); + + return 0; +} + +/** + * Resets the RIB FPM flags so we send all routes again. + */ +static int fpm_rib_reset(struct thread *t) +{ + struct fpm_nl_ctx *fnc = THREAD_ARG(t); + rib_dest_t *dest; + struct route_node *rn; + struct route_table *rt; + rib_tables_iter_t rt_iter; + + fnc->rib_complete = false; + + rt_iter.state = RIB_TABLES_ITER_S_INIT; + while ((rt = rib_tables_iter_next(&rt_iter))) { + for (rn = route_top(rt); rn; rn = srcdest_route_next(rn)) { + dest = rib_dest_from_rnode(rn); + /* Skip bad route entries. */ + if (dest == NULL) + continue; + + UNSET_FLAG(dest->flags, RIB_DEST_UPDATE_FPM); + } + } + + return 0; +} + +/* + * The next three function will handle RMAC table reset. + */ +static void fpm_unset_rmac_table(struct hash_bucket *backet, void *arg) +{ + zebra_mac_t *zrmac = backet->data; + + UNSET_FLAG(zrmac->flags, ZEBRA_MAC_FPM_SENT); +} + +static void fpm_unset_l3vni_table(struct hash_bucket *backet, void *arg) +{ + zebra_l3vni_t *zl3vni = backet->data; + + hash_iterate(zl3vni->rmac_table, fpm_unset_rmac_table, zl3vni); +} + +static int fpm_rmac_reset(struct thread *t) +{ + hash_iterate(zrouter.l3vni_table, fpm_unset_l3vni_table, NULL); + + return 0; +} + +static int fpm_process_queue(struct thread *t) +{ + struct fpm_nl_ctx *fnc = THREAD_ARG(t); + struct zebra_dplane_ctx *ctx; + + frr_mutex_lock_autounlock(&fnc->ctxqueue_mutex); + + while (true) { + /* No space available yet. */ + if (STREAM_WRITEABLE(fnc->obuf) < NL_PKT_BUF_SIZE) + break; + + /* Dequeue next item or quit processing. */ + ctx = dplane_ctx_dequeue(&fnc->ctxqueue); + if (ctx == NULL) + break; + + fpm_nl_enqueue(fnc, ctx); + + /* Account the processed entries. */ + atomic_fetch_add_explicit(&fnc->counters.dplane_contexts, 1, + memory_order_relaxed); + atomic_fetch_sub_explicit(&fnc->counters.ctxqueue_len, 1, + memory_order_relaxed); + + dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS); + dplane_provider_enqueue_out_ctx(fnc->prov, ctx); + } + + /* Check for more items in the queue. */ + if (atomic_load_explicit(&fnc->counters.ctxqueue_len, + memory_order_relaxed) + > 0) + thread_add_timer(fnc->fthread->master, fpm_process_queue, + fnc, 0, &fnc->t_dequeue); + + return 0; +} + +/** + * Handles external (e.g. CLI, data plane or others) events. + */ +static int fpm_process_event(struct thread *t) +{ + struct fpm_nl_ctx *fnc = THREAD_ARG(t); + int event = THREAD_VAL(t); + + switch (event) { + case FNE_DISABLE: + zlog_info("%s: manual FPM disable event", __func__); + fnc->disabled = true; + atomic_fetch_add_explicit(&fnc->counters.user_disables, 1, + memory_order_relaxed); + + /* Call reconnect to disable timers and clean up context. */ + fpm_reconnect(fnc); + break; + + case FNE_RECONNECT: + zlog_info("%s: manual FPM reconnect event", __func__); + fnc->disabled = false; + atomic_fetch_add_explicit(&fnc->counters.user_configures, 1, + memory_order_relaxed); + fpm_reconnect(fnc); + break; + + case FNE_RESET_COUNTERS: + zlog_info("%s: manual FPM counters reset event", __func__); + memset(&fnc->counters, 0, sizeof(fnc->counters)); + break; + + default: + if (IS_ZEBRA_DEBUG_FPM) + zlog_debug("%s: unhandled event %d", __func__, event); + break; + } + + return 0; +} + +/* + * Data plane functions. + */ +static int fpm_nl_start(struct zebra_dplane_provider *prov) +{ + struct fpm_nl_ctx *fnc; + + fnc = dplane_provider_get_data(prov); + fnc->fthread = frr_pthread_new(NULL, prov_name, prov_name); + assert(frr_pthread_run(fnc->fthread, NULL) == 0); + fnc->ibuf = stream_new(NL_PKT_BUF_SIZE); + fnc->obuf = stream_new(NL_PKT_BUF_SIZE * 128); + pthread_mutex_init(&fnc->obuf_mutex, NULL); + fnc->socket = -1; + fnc->disabled = true; + fnc->prov = prov; + TAILQ_INIT(&fnc->ctxqueue); + pthread_mutex_init(&fnc->ctxqueue_mutex, NULL); + + return 0; +} + +static int fpm_nl_finish_early(struct fpm_nl_ctx *fnc) +{ + /* Disable all events and close socket. */ + THREAD_OFF(fnc->t_ribreset); + THREAD_OFF(fnc->t_ribwalk); + THREAD_OFF(fnc->t_rmacreset); + THREAD_OFF(fnc->t_rmacwalk); + thread_cancel_async(fnc->fthread->master, &fnc->t_read, NULL); + thread_cancel_async(fnc->fthread->master, &fnc->t_write, NULL); + thread_cancel_async(fnc->fthread->master, &fnc->t_connect, NULL); + + if (fnc->socket != -1) { + close(fnc->socket); + fnc->socket = -1; + } + + return 0; +} + +static int fpm_nl_finish_late(struct fpm_nl_ctx *fnc) +{ + /* Stop the running thread. */ + frr_pthread_stop(fnc->fthread, NULL); + + /* Free all allocated resources. */ + pthread_mutex_destroy(&fnc->obuf_mutex); + pthread_mutex_destroy(&fnc->ctxqueue_mutex); + stream_free(fnc->ibuf); + stream_free(fnc->obuf); + free(gfnc); + gfnc = NULL; + + return 0; +} + +static int fpm_nl_finish(struct zebra_dplane_provider *prov, bool early) +{ + struct fpm_nl_ctx *fnc; + + fnc = dplane_provider_get_data(prov); + if (early) + return fpm_nl_finish_early(fnc); + + return fpm_nl_finish_late(fnc); +} + +static int fpm_nl_process(struct zebra_dplane_provider *prov) +{ + struct zebra_dplane_ctx *ctx; + struct fpm_nl_ctx *fnc; + int counter, limit; + uint64_t cur_queue, peak_queue; + + fnc = dplane_provider_get_data(prov); + limit = dplane_provider_get_work_limit(prov); + for (counter = 0; counter < limit; counter++) { + ctx = dplane_provider_dequeue_in_ctx(prov); + if (ctx == NULL) + break; + + /* + * Skip all notifications if not connected, we'll walk the RIB + * anyway. + */ + if (fnc->socket != -1 && fnc->connecting == false) { + frr_mutex_lock_autounlock(&fnc->ctxqueue_mutex); + dplane_ctx_enqueue_tail(&fnc->ctxqueue, ctx); + + /* Account the number of contexts. */ + atomic_fetch_add_explicit(&fnc->counters.ctxqueue_len, + 1, memory_order_relaxed); + cur_queue = atomic_load_explicit( + &fnc->counters.ctxqueue_len, + memory_order_relaxed); + peak_queue = atomic_load_explicit( + &fnc->counters.ctxqueue_len_peak, + memory_order_relaxed); + if (peak_queue < cur_queue) + atomic_store_explicit( + &fnc->counters.ctxqueue_len_peak, + peak_queue, memory_order_relaxed); + continue; + } + + dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS); + dplane_provider_enqueue_out_ctx(prov, ctx); + } + + if (atomic_load_explicit(&fnc->counters.ctxqueue_len, + memory_order_relaxed) + > 0) + thread_add_timer(fnc->fthread->master, fpm_process_queue, + fnc, 0, &fnc->t_dequeue); + + return 0; +} + +static int fpm_nl_new(struct thread_master *tm) +{ + struct zebra_dplane_provider *prov = NULL; + int rv; + + gfnc = calloc(1, sizeof(*gfnc)); + rv = dplane_provider_register(prov_name, DPLANE_PRIO_POSTPROCESS, + DPLANE_PROV_FLAG_THREADED, fpm_nl_start, + fpm_nl_process, fpm_nl_finish, gfnc, + &prov); + + if (IS_ZEBRA_DEBUG_DPLANE) + zlog_debug("%s register status: %d", prov_name, rv); + + install_node(&fpm_node); + install_element(ENABLE_NODE, &fpm_show_counters_cmd); + install_element(ENABLE_NODE, &fpm_show_counters_json_cmd); + install_element(ENABLE_NODE, &fpm_reset_counters_cmd); + install_element(CONFIG_NODE, &fpm_set_address_cmd); + install_element(CONFIG_NODE, &no_fpm_set_address_cmd); + + return 0; +} + +static int fpm_nl_init(void) +{ + hook_register(frr_late_init, fpm_nl_new); + return 0; +} + +FRR_MODULE_SETUP( + .name = "dplane_fpm_nl", + .version = "0.0.1", + .description = "Data plane plugin for FPM using netlink.", + .init = fpm_nl_init, + ) diff --git a/zebra/if_netlink.c b/zebra/if_netlink.c index 65a0add57e..78155b1455 100644 --- a/zebra/if_netlink.c +++ b/zebra/if_netlink.c @@ -764,7 +764,7 @@ static int netlink_request_intf_addr(struct nlsock *netlink_cmd, int family, if (filter_mask) addattr32(&req.n, sizeof(req), IFLA_EXT_MASK, filter_mask); - return netlink_request(netlink_cmd, &req.n); + return netlink_request(netlink_cmd, &req); } /* Interface lookup by netlink socket. */ diff --git a/zebra/interface.c b/zebra/interface.c index 59cbfc6854..9a248ba5d1 100644 --- a/zebra/interface.c +++ b/zebra/interface.c @@ -153,7 +153,7 @@ static int if_zebra_new_hook(struct interface *ifp) rtadv->AdvLinkMTU = 0; rtadv->AdvReachableTime = 0; rtadv->AdvRetransTimer = 0; - rtadv->AdvCurHopLimit = 0; + rtadv->AdvCurHopLimit = RTADV_DEFAULT_HOPLIMIT; rtadv->AdvDefaultLifetime = -1; /* derive from MaxRtrAdvInterval */ rtadv->HomeAgentPreference = 0; @@ -500,7 +500,7 @@ void if_flags_update(struct interface *ifp, uint64_t newflags) /* Wake up configured address if it is not in current kernel address. */ -static void if_addr_wakeup(struct interface *ifp) +void if_addr_wakeup(struct interface *ifp) { struct listnode *node, *nnode; struct connected *ifc; @@ -1664,7 +1664,14 @@ static void interface_update_stats(void) #endif /* HAVE_NET_RT_IFLIST */ } -struct cmd_node interface_node = {INTERFACE_NODE, "%s(config-if)# ", 1}; +static int if_config_write(struct vty *vty); +struct cmd_node interface_node = { + .name = "interface", + .node = INTERFACE_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-if)# ", + .config_write = if_config_write, +}; #ifndef VTYSH_EXTRACT_PL #include "zebra/interface_clippy.c" @@ -1881,6 +1888,24 @@ DEFUN (show_interface_desc_vrf_all, return CMD_SUCCESS; } +int if_multicast_set(struct interface *ifp) +{ + struct zebra_if *if_data; + + if (CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE)) { + if (if_set_flags(ifp, IFF_MULTICAST) < 0) { + zlog_debug("Can't set multicast flag on interface %s", + ifp->name); + return -1; + } + if_refresh(ifp); + } + if_data = ifp->info; + if_data->multicast = IF_ZEBRA_MULTICAST_ON; + + return 0; +} + DEFUN (multicast, multicast_cmd, "multicast", @@ -1904,6 +1929,24 @@ DEFUN (multicast, return CMD_SUCCESS; } +int if_multicast_unset(struct interface *ifp) +{ + struct zebra_if *if_data; + + if (CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE)) { + if (if_unset_flags(ifp, IFF_MULTICAST) < 0) { + zlog_debug("Can't unset multicast flag on interface %s", + ifp->name); + return -1; + } + if_refresh(ifp); + } + if_data = ifp->info; + if_data->multicast = IF_ZEBRA_MULTICAST_OFF; + + return 0; +} + DEFUN (no_multicast, no_multicast_cmd, "no multicast", @@ -1928,23 +1971,35 @@ DEFUN (no_multicast, return CMD_SUCCESS; } -DEFUN (linkdetect, - linkdetect_cmd, - "link-detect", - "Enable link detection on interface\n") +int if_linkdetect(struct interface *ifp, bool detect) { - VTY_DECLVAR_CONTEXT(interface, ifp); int if_was_operative; if_was_operative = if_is_no_ptm_operative(ifp); - SET_FLAG(ifp->status, ZEBRA_INTERFACE_LINKDETECTION); + if (detect) { + SET_FLAG(ifp->status, ZEBRA_INTERFACE_LINKDETECTION); - /* When linkdetection is enabled, if might come down */ - if (!if_is_no_ptm_operative(ifp) && if_was_operative) - if_down(ifp); + /* When linkdetection is enabled, if might come down */ + if (!if_is_no_ptm_operative(ifp) && if_was_operative) + if_down(ifp); + } else { + UNSET_FLAG(ifp->status, ZEBRA_INTERFACE_LINKDETECTION); + /* Interface may come up after disabling link detection */ + if (if_is_operative(ifp) && !if_was_operative) + if_up(ifp); + } /* FIXME: Will defer status change forwarding if interface does not come down! */ + return 0; +} + +DEFUN(linkdetect, linkdetect_cmd, "link-detect", + "Enable link detection on interface\n") +{ + VTY_DECLVAR_CONTEXT(interface, ifp); + + if_linkdetect(ifp, true); return CMD_SUCCESS; } @@ -1957,18 +2012,29 @@ DEFUN (no_linkdetect, "Disable link detection on interface\n") { VTY_DECLVAR_CONTEXT(interface, ifp); - int if_was_operative; - if_was_operative = if_is_no_ptm_operative(ifp); - UNSET_FLAG(ifp->status, ZEBRA_INTERFACE_LINKDETECTION); + if_linkdetect(ifp, false); + + return CMD_SUCCESS; +} - /* Interface may come up after disabling link detection */ - if (if_is_operative(ifp) && !if_was_operative) - if_up(ifp); +int if_shutdown(struct interface *ifp) +{ + struct zebra_if *if_data; - /* FIXME: see linkdetect_cmd */ + if (ifp->ifindex != IFINDEX_INTERNAL) { + /* send RA lifetime of 0 before stopping. rfc4861/6.2.5 */ + rtadv_stop_ra(ifp); + if (if_unset_flags(ifp, IFF_UP) < 0) { + zlog_debug("Can't shutdown interface %s", ifp->name); + return -1; + } + if_refresh(ifp); + } + if_data = ifp->info; + if_data->shutdown = IF_ZEBRA_SHUTDOWN_ON; - return CMD_SUCCESS; + return 0; } DEFUN (shutdown_if, @@ -1996,6 +2062,30 @@ DEFUN (shutdown_if, return CMD_SUCCESS; } +int if_no_shutdown(struct interface *ifp) +{ + struct zebra_if *if_data; + + if (ifp->ifindex != IFINDEX_INTERNAL) { + if (if_set_flags(ifp, IFF_UP | IFF_RUNNING) < 0) { + zlog_debug("Can't up interface %s", ifp->name); + return -1; + } + if_refresh(ifp); + + /* Some addresses (in particular, IPv6 addresses on Linux) get + * removed when the interface goes down. They need to be + * readded. + */ + if_addr_wakeup(ifp); + } + + if_data = ifp->info; + if_data->shutdown = IF_ZEBRA_SHUTDOWN_OFF; + + return 0; +} + DEFUN (no_shutdown_if, no_shutdown_if_cmd, "no shutdown", @@ -2074,7 +2164,10 @@ DEFUN (no_bandwidth_if, struct cmd_node link_params_node = { - LINK_PARAMS_NODE, "%s(config-link-params)# ", 1, + .name = "link-params", + .node = LINK_PARAMS_NODE, + .parent_node = INTERFACE_NODE, + .prompt = "%s(config-link-params)# ", }; static void link_param_cmd_set_uint32(struct interface *ifp, uint32_t *field, @@ -2748,6 +2841,79 @@ DEFUN (no_link_params_use_bw, return CMD_SUCCESS; } +int if_ip_address_install(struct interface *ifp, struct prefix *prefix, + const char *label, struct prefix *pp) +{ + struct zebra_if *if_data; + struct prefix_ipv4 lp; + struct prefix_ipv4 *p; + struct connected *ifc; + enum zebra_dplane_result dplane_res; + + if_data = ifp->info; + + lp.family = prefix->family; + lp.prefix = prefix->u.prefix4; + lp.prefixlen = prefix->prefixlen; + apply_mask_ipv4(&lp); + + ifc = connected_check_ptp(ifp, &lp, pp ? pp : NULL); + if (!ifc) { + ifc = connected_new(); + ifc->ifp = ifp; + + /* Address. */ + p = prefix_ipv4_new(); + *p = lp; + ifc->address = (struct prefix *)p; + + if (pp) { + SET_FLAG(ifc->flags, ZEBRA_IFA_PEER); + p = prefix_ipv4_new(); + *p = *(struct prefix_ipv4 *)pp; + ifc->destination = (struct prefix *)p; + } + + /* Label. */ + if (label) + ifc->label = XSTRDUP(MTYPE_CONNECTED_LABEL, label); + + /* Add to linked list. */ + listnode_add(ifp->connected, ifc); + } + + /* This address is configured from zebra. */ + if (!CHECK_FLAG(ifc->conf, ZEBRA_IFC_CONFIGURED)) + SET_FLAG(ifc->conf, ZEBRA_IFC_CONFIGURED); + + /* In case of this route need to install kernel. */ + if (!CHECK_FLAG(ifc->conf, ZEBRA_IFC_QUEUED) + && CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) + && !(if_data && if_data->shutdown == IF_ZEBRA_SHUTDOWN_ON)) { + /* Some system need to up the interface to set IP address. */ + if (!if_is_up(ifp)) { + if_set_flags(ifp, IFF_UP | IFF_RUNNING); + if_refresh(ifp); + } + + dplane_res = dplane_intf_addr_set(ifp, ifc); + if (dplane_res == ZEBRA_DPLANE_REQUEST_FAILURE) { + zlog_debug( + "dplane can't set interface IP address: %s.\n", + dplane_res2str(dplane_res)); + return NB_ERR; + } + + SET_FLAG(ifc->conf, ZEBRA_IFC_QUEUED); + /* The address will be advertised to zebra clients when the + * notification + * from the kernel has been received. + * It will also be added to the subnet chain list, then. */ + } + + return 0; +} + static int ip_address_install(struct vty *vty, struct interface *ifp, const char *addr_str, const char *peer_str, const char *label) @@ -2842,6 +3008,51 @@ static int ip_address_install(struct vty *vty, struct interface *ifp, return CMD_SUCCESS; } +int if_ip_address_uinstall(struct interface *ifp, struct prefix *prefix) +{ + struct connected *ifc = NULL; + enum zebra_dplane_result dplane_res; + + if (prefix->family == AF_INET) { + /* Check current interface address. */ + ifc = connected_check_ptp(ifp, prefix, NULL); + if (!ifc) { + zlog_debug("interface %s Can't find address\n", + ifp->name); + return -1; + } + + } else if (prefix->family == AF_INET6) { + /* Check current interface address. */ + ifc = connected_check(ifp, prefix); + } + + if (!ifc) { + zlog_debug("interface %s Can't find address\n", ifp->name); + return -1; + } + UNSET_FLAG(ifc->conf, ZEBRA_IFC_CONFIGURED); + + /* This is not real address or interface is not active. */ + if (!CHECK_FLAG(ifc->conf, ZEBRA_IFC_QUEUED) + || !CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE)) { + listnode_delete(ifp->connected, ifc); + connected_free(&ifc); + return CMD_WARNING_CONFIG_FAILED; + } + + /* This is real route. */ + dplane_res = dplane_intf_addr_unset(ifp, ifc); + if (dplane_res == ZEBRA_DPLANE_REQUEST_FAILURE) { + zlog_debug("Can't unset interface IP address: %s.\n", + dplane_res2str(dplane_res)); + return -1; + } + UNSET_FLAG(ifc->conf, ZEBRA_IFC_QUEUED); + + return 0; +} + static int ip_address_uninstall(struct vty *vty, struct interface *ifp, const char *addr_str, const char *peer_str, const char *label) @@ -2995,6 +3206,71 @@ DEFUN (no_ip_address_label, } #endif /* HAVE_NETLINK */ +int if_ipv6_address_install(struct interface *ifp, struct prefix *prefix, + const char *label) +{ + struct zebra_if *if_data; + struct prefix_ipv6 cp; + struct connected *ifc; + struct prefix_ipv6 *p; + enum zebra_dplane_result dplane_res; + + if_data = ifp->info; + + cp.family = prefix->family; + cp.prefixlen = prefix->prefixlen; + cp.prefix = prefix->u.prefix6; + apply_mask_ipv6(&cp); + + ifc = connected_check(ifp, (struct prefix *)&cp); + if (!ifc) { + ifc = connected_new(); + ifc->ifp = ifp; + + /* Address. */ + p = prefix_ipv6_new(); + *p = cp; + ifc->address = (struct prefix *)p; + + /* Label. */ + if (label) + ifc->label = XSTRDUP(MTYPE_CONNECTED_LABEL, label); + + /* Add to linked list. */ + listnode_add(ifp->connected, ifc); + } + + /* This address is configured from zebra. */ + if (!CHECK_FLAG(ifc->conf, ZEBRA_IFC_CONFIGURED)) + SET_FLAG(ifc->conf, ZEBRA_IFC_CONFIGURED); + + /* In case of this route need to install kernel. */ + if (!CHECK_FLAG(ifc->conf, ZEBRA_IFC_QUEUED) + && CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) + && !(if_data && if_data->shutdown == IF_ZEBRA_SHUTDOWN_ON)) { + /* Some system need to up the interface to set IP address. */ + if (!if_is_up(ifp)) { + if_set_flags(ifp, IFF_UP | IFF_RUNNING); + if_refresh(ifp); + } + + dplane_res = dplane_intf_addr_set(ifp, ifc); + if (dplane_res == ZEBRA_DPLANE_REQUEST_FAILURE) { + zlog_debug( + "dplane can't set interface IP address: %s.\n", + dplane_res2str(dplane_res)); + return NB_ERR; + } + + SET_FLAG(ifc->conf, ZEBRA_IFC_QUEUED); + /* The address will be advertised to zebra clients when the + * notification + * from the kernel has been received. */ + } + + return 0; +} + static int ipv6_address_install(struct vty *vty, struct interface *ifp, const char *addr_str, const char *peer_str, const char *label) @@ -3311,8 +3587,8 @@ void zebra_if_init(void) hook_register_prio(if_del, 0, if_zebra_delete_hook); /* Install configuration write function. */ - install_node(&interface_node, if_config_write); - install_node(&link_params_node, NULL); + install_node(&interface_node); + install_node(&link_params_node); if_cmd_init(); /* * This is *intentionally* setting this to NULL, signaling diff --git a/zebra/interface.h b/zebra/interface.h index b7e90a0c31..2dad0c3bb2 100644 --- a/zebra/interface.h +++ b/zebra/interface.h @@ -117,6 +117,7 @@ struct rtadvconf { Default: The value specified in the "Assigned Numbers" RFC [ASSIGNED] that was in effect at the time of implementation. */ int AdvCurHopLimit; +#define RTADV_DEFAULT_HOPLIMIT 64 /* 64 hops */ /* The value to be placed in the Router Lifetime field of Router Advertisements sent from the interface, in seconds. MUST be @@ -440,6 +441,17 @@ extern void zebra_if_update_link(struct interface *ifp, ifindex_t link_ifindex, ns_id_t ns_id); extern void zebra_if_update_all_links(void); extern void zebra_if_set_protodown(struct interface *ifp, bool down); +extern int if_ip_address_install(struct interface *ifp, struct prefix *prefix, + const char *label, struct prefix *pp); +extern int if_ipv6_address_install(struct interface *ifp, struct prefix *prefix, + const char *label); +extern int if_ip_address_uinstall(struct interface *ifp, struct prefix *prefix); +extern int if_shutdown(struct interface *ifp); +extern int if_no_shutdown(struct interface *ifp); +extern int if_multicast_set(struct interface *ifp); +extern int if_multicast_unset(struct interface *ifp); +extern int if_linkdetect(struct interface *ifp, bool detect); +extern void if_addr_wakeup(struct interface *ifp); /* Nexthop group connected functions */ extern void if_nhg_dependents_add(struct interface *ifp, diff --git a/zebra/irdp_interface.c b/zebra/irdp_interface.c index 8e1ca122d3..87a1f5fdc7 100644 --- a/zebra/irdp_interface.c +++ b/zebra/irdp_interface.c @@ -53,6 +53,7 @@ #include "if.h" #include "sockunion.h" #include "log.h" +#include "network.h" extern int irdp_sock; @@ -267,7 +268,7 @@ static void irdp_if_start(struct interface *ifp, int multicast, } srandom(seed); - timer = (random() % IRDP_DEFAULT_INTERVAL) + 1; + timer = (frr_weak_random() % IRDP_DEFAULT_INTERVAL) + 1; irdp->AdvPrefList = list_new(); irdp->AdvPrefList->del = (void (*)(void *))Adv_free; /* Destructor */ diff --git a/zebra/irdp_main.c b/zebra/irdp_main.c index a1e6e8248e..b868d23a94 100644 --- a/zebra/irdp_main.c +++ b/zebra/irdp_main.c @@ -66,6 +66,7 @@ #include "if.h" #include "sockunion.h" #include "log.h" +#include "network.h" /* GLOBAL VARS */ @@ -233,7 +234,7 @@ int irdp_send_thread(struct thread *t_advert) } tmp = irdp->MaxAdvertInterval - irdp->MinAdvertInterval; - timer = random() % (tmp + 1); + timer = frr_weak_random() % (tmp + 1); timer = irdp->MinAdvertInterval + timer; if (irdp->irdp_sent < MAX_INITIAL_ADVERTISEMENTS @@ -303,7 +304,7 @@ void process_solicit(struct interface *ifp) thread_cancel(irdp->t_advertise); irdp->t_advertise = NULL; - timer = (random() % MAX_RESPONSE_DELAY) + 1; + timer = (frr_weak_random() % MAX_RESPONSE_DELAY) + 1; irdp->t_advertise = NULL; thread_add_timer(zrouter.master, irdp_send_thread, ifp, timer, diff --git a/zebra/kernel_netlink.c b/zebra/kernel_netlink.c index 97b0238362..a1f7014ce9 100644 --- a/zebra/kernel_netlink.c +++ b/zebra/kernel_netlink.c @@ -1061,10 +1061,11 @@ int netlink_talk(int (*filter)(struct nlmsghdr *, ns_id_t, int startup), /* Issue request message to kernel via netlink socket. GET messages * are issued through this interface. */ -int netlink_request(struct nlsock *nl, struct nlmsghdr *n) +int netlink_request(struct nlsock *nl, void *req) { int ret; struct sockaddr_nl snl; + struct nlmsghdr *n = (struct nlmsghdr *)req; /* Check netlink socket. */ if (nl->sock < 0) { @@ -1082,7 +1083,7 @@ int netlink_request(struct nlsock *nl, struct nlmsghdr *n) /* Raise capabilities and send message, then lower capabilities. */ frr_with_privs(&zserv_privs) { - ret = sendto(nl->sock, (void *)n, n->nlmsg_len, 0, + ret = sendto(nl->sock, req, n->nlmsg_len, 0, (struct sockaddr *)&snl, sizeof(snl)); } diff --git a/zebra/kernel_netlink.h b/zebra/kernel_netlink.h index 076ca5c5c7..6a4077abf6 100644 --- a/zebra/kernel_netlink.h +++ b/zebra/kernel_netlink.h @@ -68,7 +68,7 @@ int netlink_talk_info(int (*filter)(struct nlmsghdr *, ns_id_t, int startup), struct nlmsghdr *n, const struct zebra_dplane_info *dp_info, int startup); -extern int netlink_request(struct nlsock *nl, struct nlmsghdr *n); +extern int netlink_request(struct nlsock *nl, void *req); #endif /* HAVE_NETLINK */ diff --git a/zebra/label_manager.c b/zebra/label_manager.c index caebdc0f08..93736e672a 100644 --- a/zebra/label_manager.c +++ b/zebra/label_manager.c @@ -4,7 +4,7 @@ * Copyright (C) 2017 by Bingen Eguzkitza, * Volta Networks Inc. * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -52,48 +52,45 @@ DEFINE_MTYPE_STATIC(LBL_MGR, LM_CHUNK, "Label Manager Chunk"); * externally */ -DEFINE_HOOK(lm_client_connect, - (uint8_t proto, uint16_t instance, vrf_id_t vrf_id), - (proto, instance, vrf_id)); -DEFINE_HOOK(lm_client_disconnect, (uint8_t proto, uint16_t instance), - (proto, instance)); +DEFINE_HOOK(lm_client_connect, (struct zserv *client, vrf_id_t vrf_id), + (client, vrf_id)); +DEFINE_HOOK(lm_client_disconnect, (struct zserv *client), (client)); DEFINE_HOOK(lm_get_chunk, - (struct label_manager_chunk * *lmc, uint8_t proto, - uint16_t instance, uint8_t keep, uint32_t size, uint32_t base, - vrf_id_t vrf_id), - (lmc, proto, instance, keep, size, base, vrf_id)); + (struct label_manager_chunk * *lmc, struct zserv *client, + uint8_t keep, uint32_t size, uint32_t base, vrf_id_t vrf_id), + (lmc, client, keep, size, base, vrf_id)); DEFINE_HOOK(lm_release_chunk, - (uint8_t proto, uint16_t instance, uint32_t start, uint32_t end), - (proto, instance, start, end)); + (struct zserv *client, uint32_t start, uint32_t end), + (client, start, end)); DEFINE_HOOK(lm_cbs_inited, (), ()); /* define wrappers to be called in zapi_msg.c (as hooks must be called in * source file where they were defined) */ -void lm_client_connect_call(uint8_t proto, uint16_t instance, vrf_id_t vrf_id) +void lm_client_connect_call(struct zserv *client, vrf_id_t vrf_id) { - hook_call(lm_client_connect, proto, instance, vrf_id); + hook_call(lm_client_connect, client, vrf_id); } -void lm_get_chunk_call(struct label_manager_chunk **lmc, uint8_t proto, - uint16_t instance, uint8_t keep, uint32_t size, - uint32_t base, vrf_id_t vrf_id) +void lm_get_chunk_call(struct label_manager_chunk **lmc, struct zserv *client, + uint8_t keep, uint32_t size, uint32_t base, + vrf_id_t vrf_id) { - hook_call(lm_get_chunk, lmc, proto, instance, keep, size, base, vrf_id); + hook_call(lm_get_chunk, lmc, client, keep, size, base, vrf_id); } -void lm_release_chunk_call(uint8_t proto, uint16_t instance, uint32_t start, - uint32_t end) +void lm_release_chunk_call(struct zserv *client, uint32_t start, uint32_t end) { - hook_call(lm_release_chunk, proto, instance, start, end); + hook_call(lm_release_chunk, client, start, end); } /* forward declarations of the static functions to be used for some hooks */ -static int label_manager_connect(uint8_t proto, uint16_t instance, - vrf_id_t vrf_id); -static int label_manager_disconnect(uint8_t proto, uint16_t instance); +static int label_manager_connect(struct zserv *client, vrf_id_t vrf_id); +static int label_manager_disconnect(struct zserv *client); static int label_manager_get_chunk(struct label_manager_chunk **lmc, - uint8_t proto, uint16_t instance, - uint8_t keep, uint32_t size, uint32_t base, + struct zserv *client, uint8_t keep, + uint32_t size, uint32_t base, vrf_id_t vrf_id); +static int label_manager_release_label_chunk(struct zserv *client, + uint32_t start, uint32_t end); void delete_label_chunk(void *val) { @@ -110,7 +107,7 @@ void delete_label_chunk(void *val) * @param instance Instance, to identify the owner * @return Number of chunks released */ -int release_daemon_label_chunks(uint8_t proto, unsigned short instance) +int release_daemon_label_chunks(struct zserv *client) { struct listnode *node; struct label_manager_chunk *lmc; @@ -118,13 +115,16 @@ int release_daemon_label_chunks(uint8_t proto, unsigned short instance) int ret; if (IS_ZEBRA_DEBUG_PACKET) - zlog_debug("%s: Releasing chunks for client proto %s, instance %d", - __func__, zebra_route_string(proto), instance); + zlog_debug("%s: Releasing chunks for client proto %s, instance %d, session %u", + __func__, zebra_route_string(client->proto), + client->instance, client->session_id); for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) { - if (lmc->proto == proto && lmc->instance == instance - && lmc->keep == 0) { + if (lmc->proto == client->proto && + lmc->instance == client->instance && + lmc->session_id == client->session_id && lmc->keep == 0) { ret = release_label_chunk(lmc->proto, lmc->instance, + lmc->session_id, lmc->start, lmc->end); if (ret == 0) count++; @@ -139,10 +139,7 @@ int release_daemon_label_chunks(uint8_t proto, unsigned short instance) int lm_client_disconnect_cb(struct zserv *client) { - uint8_t proto = client->proto; - uint16_t instance = client->instance; - - hook_call(lm_client_disconnect, proto, instance); + hook_call(lm_client_disconnect, client); return 0; } @@ -151,14 +148,14 @@ void lm_hooks_register(void) hook_register(lm_client_connect, label_manager_connect); hook_register(lm_client_disconnect, label_manager_disconnect); hook_register(lm_get_chunk, label_manager_get_chunk); - hook_register(lm_release_chunk, release_label_chunk); + hook_register(lm_release_chunk, label_manager_release_label_chunk); } void lm_hooks_unregister(void) { hook_unregister(lm_client_connect, label_manager_connect); hook_unregister(lm_client_disconnect, label_manager_disconnect); hook_unregister(lm_get_chunk, label_manager_get_chunk); - hook_unregister(lm_release_chunk, release_label_chunk); + hook_unregister(lm_release_chunk, label_manager_release_label_chunk); } /** @@ -180,6 +177,7 @@ void label_manager_init(void) /* alloc and fill a label chunk */ struct label_manager_chunk *create_label_chunk(uint8_t proto, unsigned short instance, + uint32_t session_id, uint8_t keep, uint32_t start, uint32_t end) { @@ -191,6 +189,7 @@ struct label_manager_chunk *create_label_chunk(uint8_t proto, lmc->end = end; lmc->proto = proto; lmc->instance = instance; + lmc->session_id = session_id; lmc->keep = keep; return lmc; @@ -199,7 +198,8 @@ struct label_manager_chunk *create_label_chunk(uint8_t proto, /* attempt to get a specific label chunk */ static struct label_manager_chunk * assign_specific_label_chunk(uint8_t proto, unsigned short instance, - uint8_t keep, uint32_t size, uint32_t base) + uint32_t session_id, uint8_t keep, uint32_t size, + uint32_t base) { struct label_manager_chunk *lmc; struct listnode *node, *next = NULL; @@ -248,7 +248,8 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance, /* insert chunk between existing chunks */ if (insert_node) { - lmc = create_label_chunk(proto, instance, keep, base, end); + lmc = create_label_chunk(proto, instance, session_id, keep, + base, end); listnode_add_before(lbl_mgr.lc_list, insert_node, lmc); return lmc; } @@ -270,7 +271,8 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance, delete_label_chunk(death); } - lmc = create_label_chunk(proto, instance, keep, base, end); + lmc = create_label_chunk(proto, instance, session_id, keep, + base, end); if (last_node) listnode_add_before(lbl_mgr.lc_list, last_node, lmc); else @@ -280,7 +282,8 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance, } else { /* create a new chunk past all the existing ones and link at * tail */ - lmc = create_label_chunk(proto, instance, keep, base, end); + lmc = create_label_chunk(proto, instance, session_id, keep, + base, end); listnode_add(lbl_mgr.lc_list, lmc); return lmc; } @@ -301,6 +304,7 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance, */ struct label_manager_chunk *assign_label_chunk(uint8_t proto, unsigned short instance, + uint32_t session_id, uint8_t keep, uint32_t size, uint32_t base) { @@ -310,8 +314,8 @@ struct label_manager_chunk *assign_label_chunk(uint8_t proto, /* handle chunks request with a specific base label */ if (base != MPLS_LABEL_BASE_ANY) - return assign_specific_label_chunk(proto, instance, keep, size, - base); + return assign_specific_label_chunk(proto, instance, session_id, + keep, size, base); /* appease scan-build, who gets confused by the use of macros */ assert(lbl_mgr.lc_list); @@ -322,6 +326,7 @@ struct label_manager_chunk *assign_label_chunk(uint8_t proto, && lmc->end - lmc->start + 1 == size) { lmc->proto = proto; lmc->instance = instance; + lmc->session_id = session_id; lmc->keep = keep; return lmc; } @@ -329,8 +334,9 @@ struct label_manager_chunk *assign_label_chunk(uint8_t proto, */ if ((lmc->start > prev_end) && (lmc->start - prev_end >= size)) { - lmc = create_label_chunk(proto, instance, keep, - prev_end + 1, prev_end + size); + lmc = create_label_chunk(proto, instance, session_id, + keep, prev_end + 1, + prev_end + size); listnode_add_before(lbl_mgr.lc_list, node, lmc); return lmc; } @@ -355,13 +361,31 @@ struct label_manager_chunk *assign_label_chunk(uint8_t proto, } /* create chunk and link at tail */ - lmc = create_label_chunk(proto, instance, keep, start_free, + lmc = create_label_chunk(proto, instance, session_id, keep, start_free, start_free + size - 1); listnode_add(lbl_mgr.lc_list, lmc); return lmc; } /** + * Release label chunks from a client. + * + * Called on client disconnection or reconnection. It only releases chunks + * with empty keep value. + * + * @param client Client zapi session + * @param start First label of the chunk + * @param end Last label of the chunk + * @return 0 on success + */ +static int label_manager_release_label_chunk(struct zserv *client, + uint32_t start, uint32_t end) +{ + return release_label_chunk(client->proto, client->instance, + client->session_id, start, end); +} + +/** * Core function, release no longer used label chunks * * @param proto Daemon protocol of client, to identify the owner @@ -370,8 +394,8 @@ struct label_manager_chunk *assign_label_chunk(uint8_t proto, * @param end Last label of the chunk * @return 0 on success, -1 otherwise */ -int release_label_chunk(uint8_t proto, unsigned short instance, uint32_t start, - uint32_t end) +int release_label_chunk(uint8_t proto, unsigned short instance, + uint32_t session_id, uint32_t start, uint32_t end) { struct listnode *node; struct label_manager_chunk *lmc; @@ -386,13 +410,15 @@ int release_label_chunk(uint8_t proto, unsigned short instance, uint32_t start, continue; if (lmc->end != end) continue; - if (lmc->proto != proto || lmc->instance != instance) { + if (lmc->proto != proto || lmc->instance != instance || + lmc->session_id != session_id) { flog_err(EC_ZEBRA_LM_DAEMON_MISMATCH, "%s: Daemon mismatch!!", __func__); continue; } lmc->proto = NO_PROTO; lmc->instance = 0; + lmc->session_id = 0; lmc->keep = 0; ret = 0; break; @@ -405,55 +431,60 @@ int release_label_chunk(uint8_t proto, unsigned short instance, uint32_t start, } /* default functions to be called on hooks */ -static int label_manager_connect(uint8_t proto, uint16_t instance, - vrf_id_t vrf_id) +static int label_manager_connect(struct zserv *client, vrf_id_t vrf_id) { /* * Release previous labels of same protocol and instance. * This is done in case it restarted from an unexpected shutdown. */ - release_daemon_label_chunks(proto, instance); - return lm_client_connect_response(proto, instance, vrf_id, 0); + release_daemon_label_chunks(client); + return zsend_label_manager_connect_response(client, vrf_id, 0); } -static int label_manager_disconnect(uint8_t proto, uint16_t instance) +static int label_manager_disconnect(struct zserv *client) { - release_daemon_label_chunks(proto, instance); + release_daemon_label_chunks(client); return 0; } static int label_manager_get_chunk(struct label_manager_chunk **lmc, - uint8_t proto, uint16_t instance, - uint8_t keep, uint32_t size, uint32_t base, + struct zserv *client, uint8_t keep, + uint32_t size, uint32_t base, vrf_id_t vrf_id) { - *lmc = assign_label_chunk(proto, instance, keep, size, base); - return lm_get_chunk_response(*lmc, proto, instance, vrf_id); + *lmc = assign_label_chunk(client->proto, client->instance, + client->session_id, keep, size, base); + return lm_get_chunk_response(*lmc, client, vrf_id); } /* Respond to a connect request */ int lm_client_connect_response(uint8_t proto, uint16_t instance, - vrf_id_t vrf_id, uint8_t result) + uint32_t session_id, vrf_id_t vrf_id, + uint8_t result) { - struct zserv *client = zserv_find_client(proto, instance); + struct zserv *client = zserv_find_client_session(proto, instance, + session_id); if (!client) { - zlog_err("%s: could not find client for daemon %s instance %u", - __func__, zebra_route_string(proto), instance); + zlog_err("%s: could not find client for daemon %s instance %u session %u", + __func__, zebra_route_string(proto), instance, + session_id); return 1; } return zsend_label_manager_connect_response(client, vrf_id, result); } /* Respond to a get_chunk request */ -int lm_get_chunk_response(struct label_manager_chunk *lmc, uint8_t proto, - uint16_t instance, vrf_id_t vrf_id) +int lm_get_chunk_response(struct label_manager_chunk *lmc, struct zserv *client, + vrf_id_t vrf_id) { - struct zserv *client = zserv_find_client(proto, instance); - if (!client) { - zlog_err("%s: could not find client for daemon %s instance %u", - __func__, zebra_route_string(proto), instance); - return 1; - } - return zsend_assign_label_chunk_response(client, vrf_id, proto, - instance, lmc); + if (!lmc) + flog_err(EC_ZEBRA_LM_CANNOT_ASSIGN_CHUNK, + "Unable to assign Label Chunk to %s instance %u", + zebra_route_string(client->proto), client->instance); + else if (IS_ZEBRA_DEBUG_PACKET) + zlog_debug("Assigned Label Chunk %u - %u to %s instance %u", + lmc->start, lmc->end, + zebra_route_string(client->proto), client->instance); + + return zsend_assign_label_chunk_response(client, vrf_id, lmc); } void label_manager_close(void) diff --git a/zebra/label_manager.h b/zebra/label_manager.h index 74e283e85e..82154982c2 100644 --- a/zebra/label_manager.h +++ b/zebra/label_manager.h @@ -4,7 +4,7 @@ * Copyright (C) 2017 by Bingen Eguzkitza, * Volta Networks Inc. * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -40,19 +40,20 @@ extern "C" { /* * Label chunk struct - * Client daemon which the chunk belongs to can be identified by either - * proto (daemon protocol) + instance. + * Client daemon which the chunk belongs to can be identified by a tuple of: + * proto (daemon protocol) + instance + zapi session_id * If the client then passes a non-empty value to keep field when it requests * for chunks, the chunks won't be garbage collected and the client will be - * responsible of its release. + * responsible for releasing them. * Otherwise, if the keep field is not set (value 0) for the chunk, it will be * automatically released when the client disconnects or when it reconnects * (in case it died unexpectedly, we can know it's the same because it will have - * the same proto and instance values) + * the same proto+instance+session values) */ struct label_manager_chunk { uint8_t proto; unsigned short instance; + uint32_t session_id; uint8_t keep; uint32_t start; /* First label of the chunk */ uint32_t end; /* Last label of the chunk */ @@ -63,41 +64,40 @@ struct label_manager_chunk { * so that any external module wanting to replace those can react */ -DECLARE_HOOK(lm_client_connect, - (uint8_t proto, uint16_t instance, vrf_id_t vrf_id), - (proto, instance, vrf_id)); -DECLARE_HOOK(lm_client_disconnect, (uint8_t proto, uint16_t instance), - (proto, instance)); +DECLARE_HOOK(lm_client_connect, (struct zserv *client, vrf_id_t vrf_id), + (client, vrf_id)); +DECLARE_HOOK(lm_client_disconnect, (struct zserv *client), (client)); DECLARE_HOOK(lm_get_chunk, - (struct label_manager_chunk * *lmc, uint8_t proto, - uint16_t instance, uint8_t keep, uint32_t size, uint32_t base, - vrf_id_t vrf_id), - (lmc, proto, instance, keep, size, base, vrf_id)); + (struct label_manager_chunk * *lmc, struct zserv *client, + uint8_t keep, uint32_t size, uint32_t base, vrf_id_t vrf_id), + (lmc, client, keep, size, base, vrf_id)); DECLARE_HOOK(lm_release_chunk, - (uint8_t proto, uint16_t instance, uint32_t start, uint32_t end), - (proto, instance, start, end)); + (struct zserv *client, uint32_t start, uint32_t end), + (client, start, end)); DECLARE_HOOK(lm_cbs_inited, (), ()); /* declare wrappers to be called in zapi_msg.c (as hooks must be called in * source file where they were defined) */ -void lm_client_connect_call(uint8_t proto, uint16_t instance, vrf_id_t vrf_id); -void lm_get_chunk_call(struct label_manager_chunk **lmc, uint8_t proto, - uint16_t instance, uint8_t keep, uint32_t size, - uint32_t base, vrf_id_t vrf_id); -void lm_release_chunk_call(uint8_t proto, uint16_t instance, uint32_t start, +void lm_client_connect_call(struct zserv *client, vrf_id_t vrf_id); +void lm_get_chunk_call(struct label_manager_chunk **lmc, struct zserv *client, + uint8_t keep, uint32_t size, uint32_t base, + vrf_id_t vrf_id); +void lm_release_chunk_call(struct zserv *client, uint32_t start, uint32_t end); /* API for an external LM to return responses for requests */ int lm_client_connect_response(uint8_t proto, uint16_t instance, - vrf_id_t vrf_id, uint8_t result); -int lm_get_chunk_response(struct label_manager_chunk *lmc, uint8_t proto, - uint16_t instance, vrf_id_t vrf_id); + uint32_t session_id, vrf_id_t vrf_id, + uint8_t result); +int lm_get_chunk_response(struct label_manager_chunk *lmc, struct zserv *client, + vrf_id_t vrf_id); /* convenience function to allocate an lmc to be consumed by the above API */ struct label_manager_chunk *create_label_chunk(uint8_t proto, unsigned short instance, + uint32_t session_id, uint8_t keep, uint32_t start, uint32_t end); void delete_label_chunk(void *val); @@ -117,12 +117,13 @@ struct label_manager { void label_manager_init(void); struct label_manager_chunk *assign_label_chunk(uint8_t proto, unsigned short instance, + uint32_t session_id, uint8_t keep, uint32_t size, uint32_t base); -int release_label_chunk(uint8_t proto, unsigned short instance, uint32_t start, - uint32_t end); +int release_label_chunk(uint8_t proto, unsigned short instance, + uint32_t session_id, uint32_t start, uint32_t end); int lm_client_disconnect_cb(struct zserv *client); -int release_daemon_label_chunks(uint8_t proto, unsigned short instance); +int release_daemon_label_chunks(struct zserv *client); void label_manager_close(void); #ifdef __cplusplus diff --git a/zebra/main.c b/zebra/main.c index dab1449194..e230a744f6 100644 --- a/zebra/main.c +++ b/zebra/main.c @@ -54,6 +54,7 @@ #include "zebra/zebra_pbr.h" #include "zebra/zebra_vxlan.h" #include "zebra/zebra_routemap.h" +#include "zebra/zebra_nb.h" #if defined(HANDLE_NETLINK_FUZZING) #include "zebra/kernel_netlink.h" @@ -174,6 +175,7 @@ static void sigint(void) work_queue_free_and_null(&zrouter.lsp_process_q); vrf_terminate(); + rtadv_terminate(); ns_walk_func(zebra_ns_early_shutdown); zebra_ns_notify_close(); @@ -245,6 +247,8 @@ struct quagga_signal_t zebra_signals[] = { static const struct frr_yang_module_info *const zebra_yang_modules[] = { &frr_interface_info, &frr_route_map_info, + &frr_zebra_info, + &frr_vrf_info, }; FRR_DAEMON_INFO( @@ -329,17 +333,21 @@ int main(int argc, char **argv) case 'a': allow_delete = 1; break; - case 'e': - zrouter.multipath_num = atoi(optarg); - if (zrouter.multipath_num > MULTIPATH_NUM - || zrouter.multipath_num <= 0) { + case 'e': { + unsigned long int parsed_multipath = + strtoul(optarg, NULL, 10); + if (parsed_multipath == 0 + || parsed_multipath > MULTIPATH_NUM + || parsed_multipath > UINT32_MAX) { flog_err( EC_ZEBRA_BAD_MULTIPATH_NUM, - "Multipath Number specified must be less than %d and greater than 0", + "Multipath Number specified must be less than %u and greater than 0", MULTIPATH_NUM); return 1; } + zrouter.multipath_num = parsed_multipath; break; + } case 'o': vrf_default_name_configured = optarg; break; diff --git a/zebra/redistribute.c b/zebra/redistribute.c index 115a69f2c8..4d6346151a 100644 --- a/zebra/redistribute.c +++ b/zebra/redistribute.c @@ -150,6 +150,43 @@ static void zebra_redistribute(struct zserv *client, int type, } } +/* + * Function to check if prefix is candidate for + * redistribute. + */ +static bool zebra_redistribute_check(const struct route_entry *re, + struct zserv *client, + const struct prefix *p, int afi) +{ + /* Process only if there is valid re */ + if (!re) + return false; + + /* If default route and redistributed */ + if (is_default_prefix(p) + && vrf_bitmap_check(client->redist_default[afi], re->vrf_id)) + return true; + + /* If redistribute in enabled for zebra route all */ + if (vrf_bitmap_check(client->redist[afi][ZEBRA_ROUTE_ALL], re->vrf_id)) + return true; + + /* + * If multi-instance then check for route + * redistribution for given instance. + */ + if (re->instance + && redist_check_instance(&client->mi_redist[afi][re->type], + re->instance)) + return true; + + /* If redistribution is enabled for give route type. */ + if (vrf_bitmap_check(client->redist[afi][re->type], re->vrf_id)) + return true; + + return false; +} + /* Either advertise a route for redistribution to registered clients or */ /* withdraw redistribution if add cannot be done for client */ void redistribute_update(const struct prefix *p, const struct prefix *src_p, @@ -158,7 +195,6 @@ void redistribute_update(const struct prefix *p, const struct prefix *src_p, { struct listnode *node, *nnode; struct zserv *client; - int send_redistribute; int afi; char buf[PREFIX_STRLEN]; @@ -185,25 +221,7 @@ void redistribute_update(const struct prefix *p, const struct prefix *src_p, for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { - send_redistribute = 0; - - if (is_default_prefix(p) - && vrf_bitmap_check(client->redist_default[afi], - re->vrf_id)) - send_redistribute = 1; - else if (vrf_bitmap_check(client->redist[afi][ZEBRA_ROUTE_ALL], - re->vrf_id)) - send_redistribute = 1; - else if (re->instance - && redist_check_instance( - &client->mi_redist[afi][re->type], - re->instance)) - send_redistribute = 1; - else if (vrf_bitmap_check(client->redist[afi][re->type], - re->vrf_id)) - send_redistribute = 1; - - if (send_redistribute) { + if (zebra_redistribute_check(re, client, p, afi)) { if (IS_ZEBRA_DEBUG_RIB) { zlog_debug( "%s: client %s %s(%u), type=%d, distance=%d, metric=%d", @@ -215,18 +233,9 @@ void redistribute_update(const struct prefix *p, const struct prefix *src_p, } zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD, client, p, src_p, re); - } else if (prev_re - && ((re->instance - && redist_check_instance( - &client->mi_redist[afi] - [prev_re->type], - re->instance)) - || vrf_bitmap_check( - client->redist[afi][prev_re->type], - re->vrf_id))) { + } else if (zebra_redistribute_check(prev_re, client, p, afi)) zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_DEL, client, p, src_p, prev_re); - } } } @@ -294,46 +303,21 @@ void redistribute_delete(const struct prefix *p, const struct prefix *src_p, /* Do not send unsolicited messages to synchronous clients. */ if (client->synchronous) continue; - - if (new_re) { - /* Skip this client if it will receive an update for the - * 'new' re - */ - if (is_default_prefix(p) - && vrf_bitmap_check(client->redist_default[afi], - new_re->vrf_id)) - continue; - else if (vrf_bitmap_check( - client->redist[afi][ZEBRA_ROUTE_ALL], - new_re->vrf_id)) - continue; - else if (new_re->instance - && redist_check_instance( - &client->mi_redist[afi][new_re->type], - new_re->instance)) - continue; - else if (vrf_bitmap_check( - client->redist[afi][new_re->type], - new_re->vrf_id)) - continue; - } + /* + * Skip this client if it will receive an update for the + * 'new' re + */ + if (zebra_redistribute_check(new_re, client, p, afi)) + continue; /* Send a delete for the 'old' re to any subscribed client. */ - if (old_re - && (vrf_bitmap_check(client->redist[afi][ZEBRA_ROUTE_ALL], - old_re->vrf_id) - || (old_re->instance - && redist_check_instance( - &client->mi_redist[afi][old_re->type], - old_re->instance)) - || vrf_bitmap_check(client->redist[afi][old_re->type], - old_re->vrf_id))) { + if (zebra_redistribute_check(old_re, client, p, afi)) zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_DEL, client, p, src_p, old_re); - } } } + void zebra_redistribute_add(ZAPI_HANDLER_ARGS) { afi_t afi = 0; diff --git a/zebra/rib.h b/zebra/rib.h index 931c97638e..3717a12814 100644 --- a/zebra/rib.h +++ b/zebra/rib.h @@ -107,7 +107,7 @@ struct route_entry { /* Uptime. */ time_t uptime; - /* Type fo this route. */ + /* Type of this route. */ int type; /* VRF identifier. */ @@ -347,10 +347,16 @@ extern int rib_add(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type, struct prefix_ipv6 *src_p, const struct nexthop *nh, uint32_t nhe_id, uint32_t table_id, uint32_t metric, uint32_t mtu, uint8_t distance, route_tag_t tag); - +/* + * Multipath route apis. + */ extern int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, struct prefix_ipv6 *src_p, struct route_entry *re, struct nexthop_group *ng); +extern int rib_add_multipath_nhe(afi_t afi, safi_t safi, struct prefix *p, + struct prefix_ipv6 *src_p, + struct route_entry *re, + struct nhg_hash_entry *nhe); extern void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type, unsigned short instance, int flags, struct prefix *p, diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c index 5a1ae2c217..5c9d2f69a6 100644 --- a/zebra/rt_netlink.c +++ b/zebra/rt_netlink.c @@ -718,14 +718,15 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id, if (IS_ZEBRA_DEBUG_KERNEL) { char buf[PREFIX_STRLEN]; char buf2[PREFIX_STRLEN]; - zlog_debug("%s %s%s%s vrf %u(%u) metric: %d Admin Distance: %d", - nl_msg_type_to_str(h->nlmsg_type), - prefix2str(&p, buf, sizeof(buf)), - src_p.prefixlen ? " from " : "", - src_p.prefixlen - ? prefix2str(&src_p, buf2, sizeof(buf2)) - : "", - vrf_id, table, metric, distance); + zlog_debug( + "%s %s%s%s vrf %s(%u) table_id: %u metric: %d Admin Distance: %d", + nl_msg_type_to_str(h->nlmsg_type), + prefix2str(&p, buf, sizeof(buf)), + src_p.prefixlen ? " from " : "", + src_p.prefixlen ? prefix2str(&src_p, buf2, sizeof(buf2)) + : "", + vrf_id_to_name(vrf_id), vrf_id, table, metric, + distance); } afi_t afi = AFI_IP; @@ -911,9 +912,8 @@ static int netlink_route_change_read_multicast(struct nlmsghdr *h, ifp = if_lookup_by_index(iif, vrf); zlog_debug( "MCAST VRF: %s(%d) %s (%s,%s) IIF: %s(%d) OIF: %s jiffies: %lld", - (zvrf ? zvrf->vrf->name : "Unknown"), vrf, - nl_msg_type_to_str(h->nlmsg_type), sbuf, gbuf, - ifp ? ifp->name : "Unknown", iif, oif_list, + zvrf_name(zvrf), vrf, nl_msg_type_to_str(h->nlmsg_type), + sbuf, gbuf, ifp ? ifp->name : "Unknown", iif, oif_list, m->lastused); } return 0; @@ -983,7 +983,7 @@ static int netlink_request_route(struct zebra_ns *zns, int family, int type) req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg)); req.rtm.rtm_family = family; - return netlink_request(&zns->netlink_cmd, &req.n); + return netlink_request(&zns->netlink_cmd, &req); } /* Routing table read function using netlink interface. Only called @@ -1122,9 +1122,13 @@ static void _netlink_route_build_singlepath(const struct prefix *p, mpls_lse_t out_lse[MPLS_MAX_LABELS]; char label_buf[256]; int num_labels = 0; + struct vrf *vrf; + char addrstr[INET6_ADDRSTRLEN]; assert(nexthop); + vrf = vrf_lookup_by_id(nexthop->vrf_id); + /* * label_buf is *only* currently used within debugging. * As such when we assign it we are guarding it inside @@ -1176,10 +1180,10 @@ static void _netlink_route_build_singlepath(const struct prefix *p, &nexthop->src.ipv4, bytelen); if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug( - " 5549: _netlink_route_build_singlepath() (%s): %pFX nexthop via %s %s if %u(%u)", - routedesc, p, ipv4_ll_buf, label_buf, - nexthop->ifindex, nexthop->vrf_id); + zlog_debug("%s: 5549 (%s): %pFX nexthop via %s %s if %u vrf %s(%u)", + __func__, routedesc, p, ipv4_ll_buf, + label_buf, nexthop->ifindex, + VRF_LOGNAME(vrf), nexthop->vrf_id); return; } @@ -1200,11 +1204,14 @@ static void _netlink_route_build_singlepath(const struct prefix *p, &nexthop->src.ipv4, bytelen); } - if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug( - "netlink_route_multipath() (%s): %pFX nexthop via %s %s if %u(%u)", - routedesc, p, inet_ntoa(nexthop->gate.ipv4), - label_buf, nexthop->ifindex, nexthop->vrf_id); + if (IS_ZEBRA_DEBUG_KERNEL) { + inet_ntop(AF_INET, &nexthop->gate.ipv4, addrstr, + sizeof(addrstr)); + zlog_debug("%s: (%s): %pFX nexthop via %s %s if %u vrf %s(%u)", + __func__, routedesc, p, addrstr, label_buf, + nexthop->ifindex, VRF_LOGNAME(vrf), + nexthop->vrf_id); + } } if (nexthop->type == NEXTHOP_TYPE_IPV6 @@ -1222,11 +1229,14 @@ static void _netlink_route_build_singlepath(const struct prefix *p, &nexthop->src.ipv6, bytelen); } - if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug( - "netlink_route_multipath() (%s): %pFX nexthop via %s %s if %u(%u)", - routedesc, p, inet6_ntoa(nexthop->gate.ipv6), - label_buf, nexthop->ifindex, nexthop->vrf_id); + if (IS_ZEBRA_DEBUG_KERNEL) { + inet_ntop(AF_INET6, &nexthop->gate.ipv6, addrstr, + sizeof(addrstr)); + zlog_debug("%s: (%s): %pFX nexthop via %s %s if %u vrf %s(%u)", + __func__, routedesc, p, addrstr, label_buf, + nexthop->ifindex, VRF_LOGNAME(vrf), + nexthop->vrf_id); + } } /* @@ -1248,10 +1258,9 @@ static void _netlink_route_build_singlepath(const struct prefix *p, } if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug( - "netlink_route_multipath() (%s): %pFX nexthop via if %u(%u)", - routedesc, p, nexthop->ifindex, - nexthop->vrf_id); + zlog_debug("%s: (%s): %pFX nexthop via if %u vrf %s(%u)", + __func__, routedesc, p, nexthop->ifindex, + VRF_LOGNAME(vrf), nexthop->vrf_id); } } @@ -1280,6 +1289,7 @@ _netlink_route_build_multipath(const struct prefix *p, const char *routedesc, mpls_lse_t out_lse[MPLS_MAX_LABELS]; char label_buf[256]; int num_labels = 0; + struct vrf *vrf; rtnh->rtnh_len = sizeof(*rtnh); rtnh->rtnh_flags = 0; @@ -1288,6 +1298,8 @@ _netlink_route_build_multipath(const struct prefix *p, const char *routedesc, assert(nexthop); + vrf = vrf_lookup_by_id(nexthop->vrf_id); + /* * label_buf is *only* currently used within debugging. * As such when we assign it we are guarding it inside @@ -1347,9 +1359,10 @@ _netlink_route_build_multipath(const struct prefix *p, const char *routedesc, if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - " 5549: netlink_route_build_multipath() (%s): %pFX nexthop via %s %s if %u", - routedesc, p, ipv4_ll_buf, label_buf, - nexthop->ifindex); + "%s: 5549 (%s): %pFX nexthop via %s %s if %u vrf %s(%u)", + __func__, routedesc, p, ipv4_ll_buf, label_buf, + nexthop->ifindex, VRF_LOGNAME(vrf), + nexthop->vrf_id); return; } @@ -1364,10 +1377,10 @@ _netlink_route_build_multipath(const struct prefix *p, const char *routedesc, *src = &nexthop->src; if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug( - "netlink_route_multipath() (%s): %pFX nexthop via %s %s if %u", - routedesc, p, inet_ntoa(nexthop->gate.ipv4), - label_buf, nexthop->ifindex); + zlog_debug("%s: (%s): %pFX nexthop via %pI4 %s if %u vrf %s(%u)", + __func__, routedesc, p, &nexthop->gate.ipv4, + label_buf, nexthop->ifindex, + VRF_LOGNAME(vrf), nexthop->vrf_id); } if (nexthop->type == NEXTHOP_TYPE_IPV6 || nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX) { @@ -1381,10 +1394,10 @@ _netlink_route_build_multipath(const struct prefix *p, const char *routedesc, *src = &nexthop->src; if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug( - "netlink_route_multipath() (%s): %pFX nexthop via %s %s if %u", - routedesc, p, inet6_ntoa(nexthop->gate.ipv6), - label_buf, nexthop->ifindex); + zlog_debug("%s: (%s): %pFX nexthop via %pI6 %s if %u vrf %s(%u)", + __func__, routedesc, p, &nexthop->gate.ipv6, + label_buf, nexthop->ifindex, + VRF_LOGNAME(vrf), nexthop->vrf_id); } /* @@ -1403,9 +1416,9 @@ _netlink_route_build_multipath(const struct prefix *p, const char *routedesc, *src = &nexthop->src; if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug( - "netlink_route_multipath() (%s): %pFX nexthop via if %u", - routedesc, p, nexthop->ifindex); + zlog_debug("%s: (%s): %pFX nexthop via if %u vrf %s(%u)", + __func__, routedesc, p, nexthop->ifindex, + VRF_LOGNAME(vrf), nexthop->vrf_id); } if (nexthop->weight) @@ -1444,37 +1457,6 @@ _netlink_mpls_build_multipath(const struct prefix *p, const char *routedesc, rta, rtnh, rtmsg, src); } - -/* Log debug information for netlink_route_multipath - * if debug logging is enabled. - * - * @param cmd: Netlink command which is to be processed - * @param p: Prefix for which the change is due - * @param family: Address family which the change concerns - * @param zvrf: The vrf we are in - * @param tableid: The table we are working on - */ -static void _netlink_route_debug(int cmd, const struct prefix *p, - int family, vrf_id_t vrfid, - uint32_t tableid) -{ - if (IS_ZEBRA_DEBUG_KERNEL) { - char buf[PREFIX_STRLEN]; - zlog_debug( - "netlink_route_multipath(): %s %s vrf %u(%u)", - nl_msg_type_to_str(cmd), - prefix2str(p, buf, sizeof(buf)), - vrfid, tableid); - } -} - -static void _netlink_nexthop_debug(int cmd, uint32_t id) -{ - if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("netlink_nexthop(): %s, id=%u", - nl_msg_type_to_str(cmd), id); -} - static void _netlink_mpls_debug(int cmd, uint32_t label, const char *routedesc) { if (IS_ZEBRA_DEBUG_KERNEL) @@ -1539,15 +1521,32 @@ static bool nexthop_set_src(const struct nexthop *nexthop, int family, return false; } +static void netlink_route_nexthop_encap(struct nlmsghdr *n, size_t nlen, + struct nexthop *nh) +{ + struct rtattr *nest; + + switch (nh->nh_encap_type) { + case NET_VXLAN: + addattr_l(n, nlen, RTA_ENCAP_TYPE, &nh->nh_encap_type, + sizeof(uint16_t)); + + nest = addattr_nest(n, nlen, RTA_ENCAP); + addattr32(n, nlen, 0 /* VXLAN_VNI */, nh->nh_encap.vni); + addattr_nest_end(n, nest); + break; + } +} + /* * Routing table change via netlink interface, using a dataplane context object */ -static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) +ssize_t netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx, + uint8_t *data, size_t datalen, bool fpm) { int bytelen; struct nexthop *nexthop = NULL; unsigned int nexthop_num; - int family; const char *routedesc; bool setsrc = false; union g_addr src; @@ -1557,38 +1556,36 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) struct { struct nlmsghdr n; struct rtmsg r; - char buf[NL_PKT_BUF_SIZE]; - } req; + char buf[]; + } *req = (void *)data; p = dplane_ctx_get_dest(ctx); src_p = dplane_ctx_get_src(ctx); - family = PREFIX_FAMILY(p); + memset(req, 0, sizeof(*req)); - memset(&req, 0, sizeof(req) - NL_PKT_BUF_SIZE); - - bytelen = (family == AF_INET ? 4 : 16); + bytelen = (p->family == AF_INET ? 4 : 16); - req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg)); - req.n.nlmsg_flags = NLM_F_CREATE | NLM_F_REQUEST; + req->n.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg)); + req->n.nlmsg_flags = NLM_F_CREATE | NLM_F_REQUEST; if ((cmd == RTM_NEWROUTE) && ((p->family == AF_INET) || v6_rr_semantics)) - req.n.nlmsg_flags |= NLM_F_REPLACE; + req->n.nlmsg_flags |= NLM_F_REPLACE; - req.n.nlmsg_type = cmd; + req->n.nlmsg_type = cmd; - req.n.nlmsg_pid = dplane_ctx_get_ns(ctx)->nls.snl.nl_pid; + req->n.nlmsg_pid = dplane_ctx_get_ns(ctx)->nls.snl.nl_pid; - req.r.rtm_family = family; - req.r.rtm_dst_len = p->prefixlen; - req.r.rtm_src_len = src_p ? src_p->prefixlen : 0; - req.r.rtm_scope = RT_SCOPE_UNIVERSE; + req->r.rtm_family = p->family; + req->r.rtm_dst_len = p->prefixlen; + req->r.rtm_src_len = src_p ? src_p->prefixlen : 0; + req->r.rtm_scope = RT_SCOPE_UNIVERSE; if (cmd == RTM_DELROUTE) - req.r.rtm_protocol = zebra2proto(dplane_ctx_get_old_type(ctx)); + req->r.rtm_protocol = zebra2proto(dplane_ctx_get_old_type(ctx)); else - req.r.rtm_protocol = zebra2proto(dplane_ctx_get_type(ctx)); + req->r.rtm_protocol = zebra2proto(dplane_ctx_get_type(ctx)); /* * blackhole routes are not RTN_UNICAST, they are @@ -1599,12 +1596,11 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) * the RTM_DELROUTE case */ if (cmd != RTM_DELROUTE) - req.r.rtm_type = RTN_UNICAST; + req->r.rtm_type = RTN_UNICAST; - addattr_l(&req.n, sizeof(req), RTA_DST, &p->u.prefix, bytelen); + addattr_l(&req->n, datalen, RTA_DST, &p->u.prefix, bytelen); if (src_p) - addattr_l(&req.n, sizeof(req), RTA_SRC, &src_p->u.prefix, - bytelen); + addattr_l(&req->n, datalen, RTA_SRC, &src_p->u.prefix, bytelen); /* Metric. */ /* Hardcode the metric for all routes coming from zebra. Metric isn't @@ -1613,7 +1609,7 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) * path(s) * by the routing protocol and for communicating with protocol peers. */ - addattr32(&req.n, sizeof(req), RTA_PRIORITY, NL_DEFAULT_ROUTE_METRIC); + addattr32(&req->n, datalen, RTA_PRIORITY, NL_DEFAULT_ROUTE_METRIC); #if defined(SUPPORT_REALMS) { @@ -1625,19 +1621,23 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) tag = dplane_ctx_get_tag(ctx); if (tag > 0 && tag <= 255) - addattr32(&req.n, sizeof(req), RTA_FLOW, tag); + addattr32(&req->n, datalen, RTA_FLOW, tag); } #endif /* Table corresponding to this route. */ table_id = dplane_ctx_get_table(ctx); if (table_id < 256) - req.r.rtm_table = table_id; + req->r.rtm_table = table_id; else { - req.r.rtm_table = RT_TABLE_UNSPEC; - addattr32(&req.n, sizeof(req), RTA_TABLE, table_id); + req->r.rtm_table = RT_TABLE_UNSPEC; + addattr32(&req->n, datalen, RTA_TABLE, table_id); } - _netlink_route_debug(cmd, p, family, dplane_ctx_get_vrf(ctx), table_id); + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug( + "%s: %s %pFX vrf %u(%u)", __func__, + nl_msg_type_to_str(cmd), p, dplane_ctx_get_vrf(ctx), + table_id); /* * If we are not updating the route and we have received @@ -1646,7 +1646,7 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) * it. */ if (cmd == RTM_DELROUTE) - goto skip; + return req->n.nlmsg_len; if (dplane_ctx_get_mtu(ctx) || dplane_ctx_get_nh_mtu(ctx)) { char buf[NL_PKT_BUF_SIZE]; @@ -1660,7 +1660,7 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) rta->rta_len = RTA_LENGTH(0); rta_addattr_l(rta, NL_PKT_BUF_SIZE, RTAX_MTU, &mtu, sizeof(mtu)); - addattr_l(&req.n, NL_PKT_BUF_SIZE, RTA_METRICS, RTA_DATA(rta), + addattr_l(&req->n, datalen, RTA_METRICS, RTA_DATA(rta), RTA_PAYLOAD(rta)); } @@ -1670,7 +1670,8 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) zlog_debug( "netlink_route_multipath(): %pFX nhg_id is %u", p, dplane_ctx_get_nhe_id(ctx)); - addattr32(&req.n, sizeof(req), RTA_NH_ID, + + addattr32(&req->n, datalen, RTA_NH_ID, dplane_ctx_get_nhe_id(ctx)); /* Have to determine src still */ @@ -1678,18 +1679,19 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) if (setsrc) break; - setsrc = nexthop_set_src(nexthop, family, &src); + setsrc = nexthop_set_src(nexthop, p->family, &src); } if (setsrc) { - if (family == AF_INET) - addattr_l(&req.n, sizeof(req), RTA_PREFSRC, + if (p->family == AF_INET) + addattr_l(&req->n, datalen, RTA_PREFSRC, &src.ipv4, bytelen); - else if (family == AF_INET6) - addattr_l(&req.n, sizeof(req), RTA_PREFSRC, + else if (p->family == AF_INET6) + addattr_l(&req->n, datalen, RTA_PREFSRC, &src.ipv6, bytelen); } - goto skip; + + return req->n.nlmsg_len; } /* Count overall nexthops so we can decide whether to use singlepath @@ -1699,7 +1701,7 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx), nexthop)) { if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) continue; - if (cmd == RTM_NEWROUTE && !NEXTHOP_IS_ACTIVE(nexthop->flags)) + if (!NEXTHOP_IS_ACTIVE(nexthop->flags)) continue; nexthop_num++; @@ -1719,16 +1721,16 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) if (nexthop->type == NEXTHOP_TYPE_BLACKHOLE) { switch (nexthop->bh_type) { case BLACKHOLE_ADMINPROHIB: - req.r.rtm_type = RTN_PROHIBIT; + req->r.rtm_type = RTN_PROHIBIT; break; case BLACKHOLE_REJECT: - req.r.rtm_type = RTN_UNREACHABLE; + req->r.rtm_type = RTN_UNREACHABLE; break; default: - req.r.rtm_type = RTN_BLACKHOLE; + req->r.rtm_type = RTN_BLACKHOLE; break; } - goto skip; + return req->n.nlmsg_len; } if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) { @@ -1736,30 +1738,38 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) if (setsrc) continue; - setsrc = nexthop_set_src(nexthop, family, &src); - + setsrc = nexthop_set_src(nexthop, p->family, + &src); continue; } - if ((cmd == RTM_NEWROUTE - && NEXTHOP_IS_ACTIVE(nexthop->flags))) { + if (NEXTHOP_IS_ACTIVE(nexthop->flags)) { routedesc = nexthop->rparent ? "recursive, single-path" : "single-path"; _netlink_route_build_singlepath( - p, routedesc, bytelen, nexthop, &req.n, - &req.r, sizeof(req), cmd); + p, routedesc, bytelen, nexthop, &req->n, + &req->r, datalen, cmd); nexthop_num++; break; } + + /* + * Add encapsulation information when installing via + * FPM. + */ + if (fpm) + netlink_route_nexthop_encap(&req->n, datalen, + nexthop); } + if (setsrc) { - if (family == AF_INET) - addattr_l(&req.n, sizeof(req), RTA_PREFSRC, + if (p->family == AF_INET) + addattr_l(&req->n, datalen, RTA_PREFSRC, &src.ipv4, bytelen); - else if (family == AF_INET6) - addattr_l(&req.n, sizeof(req), RTA_PREFSRC, + else if (p->family == AF_INET6) + addattr_l(&req->n, datalen, RTA_PREFSRC, &src.ipv6, bytelen); } } else { /* Multipath case */ @@ -1780,13 +1790,12 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) if (setsrc) continue; - setsrc = nexthop_set_src(nexthop, family, &src); - + setsrc = nexthop_set_src(nexthop, p->family, + &src); continue; } - if ((cmd == RTM_NEWROUTE - && NEXTHOP_IS_ACTIVE(nexthop->flags))) { + if (NEXTHOP_IS_ACTIVE(nexthop->flags)) { routedesc = nexthop->rparent ? "recursive, multipath" : "multipath"; @@ -1794,47 +1803,51 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) _netlink_route_build_multipath( p, routedesc, bytelen, nexthop, rta, - rtnh, &req.r, &src1); + rtnh, &req->r, &src1); rtnh = RTNH_NEXT(rtnh); if (!setsrc && src1) { - if (family == AF_INET) + if (p->family == AF_INET) src.ipv4 = src1->ipv4; - else if (family == AF_INET6) + else if (p->family == AF_INET6) src.ipv6 = src1->ipv6; setsrc = 1; } } + + /* + * Add encapsulation information when installing via + * FPM. + */ + if (fpm) + netlink_route_nexthop_encap(&req->n, datalen, + nexthop); } + if (setsrc) { - if (family == AF_INET) - addattr_l(&req.n, sizeof(req), RTA_PREFSRC, + if (p->family == AF_INET) + addattr_l(&req->n, datalen, RTA_PREFSRC, &src.ipv4, bytelen); - else if (family == AF_INET6) - addattr_l(&req.n, sizeof(req), RTA_PREFSRC, + else if (p->family == AF_INET6) + addattr_l(&req->n, datalen, RTA_PREFSRC, &src.ipv6, bytelen); if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug("Setting source"); } if (rta->rta_len > RTA_LENGTH(0)) - addattr_l(&req.n, NL_PKT_BUF_SIZE, RTA_MULTIPATH, + addattr_l(&req->n, datalen, RTA_MULTIPATH, RTA_DATA(rta), RTA_PAYLOAD(rta)); } /* If there is no useful nexthop then return. */ if (nexthop_num == 0) { if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug( - "netlink_route_multipath(): No useful nexthop."); - return 0; + zlog_debug("%s: No useful nexthop.", __func__); } -skip: - /* Talk to netlink socket. */ - return netlink_talk_info(netlink_talk_filter, &req.n, - dplane_ctx_get_ns(ctx), 0); + return req->n.nlmsg_len; } int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *in) @@ -1982,6 +1995,12 @@ static int netlink_nexthop(int cmd, struct zebra_dplane_ctx *ctx) addattr32(&req.n, req_size, NHA_ID, id); if (cmd == RTM_NEWNEXTHOP) { + /* + * We distinguish between a "group", which is a collection + * of ids, and a singleton nexthop with an id. The + * group is installed as an id that just refers to a list of + * other ids. + */ if (dplane_ctx_get_nhe_nh_grp_count(ctx)) _netlink_nexthop_build_group( &req.n, req_size, id, @@ -2068,14 +2087,13 @@ static int netlink_nexthop(int cmd, struct zebra_dplane_ctx *ctx) } } - nexthop_done: - if (IS_ZEBRA_DEBUG_KERNEL) { - char buf[NEXTHOP_STRLEN]; +nexthop_done: - snprintfrr(buf, sizeof(buf), "%pNHv", nh); - zlog_debug("%s: ID (%u): %s (%u) %s ", __func__, - id, buf, nh->vrf_id, label_buf); - } + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug("%s: ID (%u): %pNHv vrf %s(%u) %s ", + __func__, id, nh, + vrf_id_to_name(nh->vrf_id), + nh->vrf_id, label_buf); } req.nhm.nh_protocol = zebra2proto(dplane_ctx_get_nhe_type(ctx)); @@ -2088,7 +2106,9 @@ static int netlink_nexthop(int cmd, struct zebra_dplane_ctx *ctx) return -1; } - _netlink_nexthop_debug(cmd, id); + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug("%s: %s, id=%u", __func__, nl_msg_type_to_str(cmd), + id); return netlink_talk_info(netlink_talk_filter, &req.n, dplane_ctx_get_ns(ctx), 0); @@ -2103,43 +2123,19 @@ static int netlink_nexthop(int cmd, struct zebra_dplane_ctx *ctx) */ enum zebra_dplane_result kernel_nexthop_update(struct zebra_dplane_ctx *ctx) { + enum dplane_op_e op; int cmd = 0; int ret = 0; - switch (dplane_ctx_get_op(ctx)) { - case DPLANE_OP_NH_DELETE: - cmd = RTM_DELNEXTHOP; - break; - case DPLANE_OP_NH_INSTALL: - case DPLANE_OP_NH_UPDATE: + op = dplane_ctx_get_op(ctx); + if (op == DPLANE_OP_NH_INSTALL || op == DPLANE_OP_NH_UPDATE) cmd = RTM_NEWNEXTHOP; - break; - case DPLANE_OP_ROUTE_INSTALL: - case DPLANE_OP_ROUTE_UPDATE: - case DPLANE_OP_ROUTE_DELETE: - case DPLANE_OP_ROUTE_NOTIFY: - case DPLANE_OP_LSP_INSTALL: - case DPLANE_OP_LSP_UPDATE: - case DPLANE_OP_LSP_DELETE: - case DPLANE_OP_LSP_NOTIFY: - case DPLANE_OP_PW_INSTALL: - case DPLANE_OP_PW_UNINSTALL: - case DPLANE_OP_SYS_ROUTE_ADD: - case DPLANE_OP_SYS_ROUTE_DELETE: - case DPLANE_OP_ADDR_INSTALL: - case DPLANE_OP_ADDR_UNINSTALL: - case DPLANE_OP_MAC_INSTALL: - case DPLANE_OP_MAC_DELETE: - case DPLANE_OP_NEIGH_INSTALL: - case DPLANE_OP_NEIGH_UPDATE: - case DPLANE_OP_NEIGH_DELETE: - case DPLANE_OP_VTEP_ADD: - case DPLANE_OP_VTEP_DELETE: - case DPLANE_OP_NONE: - flog_err( - EC_ZEBRA_NHG_FIB_UPDATE, - "Context received for kernel nexthop update with incorrect OP code (%u)", - dplane_ctx_get_op(ctx)); + else if (op == DPLANE_OP_NH_DELETE) + cmd = RTM_DELNEXTHOP; + else { + flog_err(EC_ZEBRA_NHG_FIB_UPDATE, + "Context received for kernel nexthop update with incorrect OP code (%u)", + op); return ZEBRA_DPLANE_REQUEST_FAILURE; } @@ -2158,6 +2154,7 @@ enum zebra_dplane_result kernel_route_update(struct zebra_dplane_ctx *ctx) int cmd, ret; const struct prefix *p = dplane_ctx_get_dest(ctx); struct nexthop *nexthop; + uint8_t nl_pkt[NL_PKT_BUF_SIZE]; if (dplane_ctx_get_op(ctx) == DPLANE_OP_ROUTE_DELETE) { cmd = RTM_DELROUTE; @@ -2178,9 +2175,14 @@ enum zebra_dplane_result kernel_route_update(struct zebra_dplane_ctx *ctx) * the kernel the old non-system route */ if (RSYSTEM_ROUTE(dplane_ctx_get_type(ctx)) && - !RSYSTEM_ROUTE(dplane_ctx_get_old_type(ctx))) - (void)netlink_route_multipath(RTM_DELROUTE, - ctx); + !RSYSTEM_ROUTE(dplane_ctx_get_old_type(ctx))) { + netlink_route_multipath(RTM_DELROUTE, ctx, + nl_pkt, sizeof(nl_pkt), + false); + netlink_talk_info(netlink_talk_filter, + (struct nlmsghdr *)nl_pkt, + dplane_ctx_get_ns(ctx), 0); + } } else { /* * So v6 route replace semantics are not in @@ -2194,9 +2196,14 @@ enum zebra_dplane_result kernel_route_update(struct zebra_dplane_ctx *ctx) * of the route delete. If that happens yeah we're * screwed. */ - if (!RSYSTEM_ROUTE(dplane_ctx_get_old_type(ctx))) - (void)netlink_route_multipath(RTM_DELROUTE, - ctx); + if (!RSYSTEM_ROUTE(dplane_ctx_get_old_type(ctx))) { + netlink_route_multipath(RTM_DELROUTE, ctx, + nl_pkt, sizeof(nl_pkt), + false); + netlink_talk_info(netlink_talk_filter, + (struct nlmsghdr *)nl_pkt, + dplane_ctx_get_ns(ctx), 0); + } cmd = RTM_NEWROUTE; } @@ -2204,9 +2211,13 @@ enum zebra_dplane_result kernel_route_update(struct zebra_dplane_ctx *ctx) return ZEBRA_DPLANE_REQUEST_FAILURE; } - if (!RSYSTEM_ROUTE(dplane_ctx_get_type(ctx))) - ret = netlink_route_multipath(cmd, ctx); - else + if (!RSYSTEM_ROUTE(dplane_ctx_get_type(ctx))) { + netlink_route_multipath(cmd, ctx, nl_pkt, sizeof(nl_pkt), + false); + ret = netlink_talk_info(netlink_talk_filter, + (struct nlmsghdr *)nl_pkt, + dplane_ctx_get_ns(ctx), 0); + } else ret = 0; if ((cmd == RTM_NEWROUTE) && (ret == 0)) { /* Update installed nexthops to signal which have been @@ -2481,7 +2492,7 @@ static int netlink_request_nexthop(struct zebra_ns *zns, int family, int type) req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct nhmsg)); req.nhm.nh_family = family; - return netlink_request(&zns->netlink_cmd, &req.n); + return netlink_request(&zns->netlink_cmd, &req); } @@ -2528,44 +2539,95 @@ int kernel_neigh_update(int add, int ifindex, uint32_t addr, char *lla, addr, lla, llalen, ns_id); } -/* - * Add remote VTEP to the flood list for this VxLAN interface (VNI). This - * is done by adding an FDB entry with a MAC of 00:00:00:00:00:00. +/** + * netlink_update_neigh_ctx_internal() - Common helper api for evpn + * neighbor updates using dataplane context object. + * @ctx: Dataplane context + * @cmd: Netlink command (RTM_NEWNEIGH or RTM_DELNEIGH) + * @mac: A neighbor cache link layer address + * @ip: A neighbor cache n/w layer destination address + * @replace_obj: Whether NEW request should replace existing object or + * add to the end of the list + * @family: AF_* netlink family + * @type: RTN_* route type + * @flags: NTF_* flags + * @state: NUD_* states + * @data: data buffer pointer + * @datalen: total amount of data buffer space + * + * Return: Result status */ -static int netlink_vxlan_flood_update_ctx(const struct zebra_dplane_ctx *ctx, - int cmd) +static ssize_t +netlink_update_neigh_ctx_internal(const struct zebra_dplane_ctx *ctx, + int cmd, const struct ethaddr *mac, + const struct ipaddr *ip, bool replace_obj, + uint8_t family, uint8_t type, uint8_t flags, + uint16_t state, void *data, size_t datalen) { uint8_t protocol = RTPROT_ZEBRA; struct { struct nlmsghdr n; struct ndmsg ndm; - char buf[256]; - } req; - uint8_t dst_mac[6] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; - const struct ipaddr *addr; + char buf[]; + } *req = data; + int ipa_len; + enum dplane_op_e op; - memset(&req, 0, sizeof(req)); + memset(req, 0, datalen); - req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg)); - req.n.nlmsg_flags = NLM_F_REQUEST; + op = dplane_ctx_get_op(ctx); + + req->n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg)); + req->n.nlmsg_flags = NLM_F_REQUEST; if (cmd == RTM_NEWNEIGH) - req.n.nlmsg_flags |= (NLM_F_CREATE | NLM_F_APPEND); - req.n.nlmsg_type = cmd; - req.ndm.ndm_family = PF_BRIDGE; - req.ndm.ndm_state = NUD_NOARP | NUD_PERMANENT; - req.ndm.ndm_flags |= NTF_SELF; /* Handle by "self", not "master" */ + req->n.nlmsg_flags |= + NLM_F_CREATE + | (replace_obj ? NLM_F_REPLACE : NLM_F_APPEND); + req->n.nlmsg_type = cmd; + req->ndm.ndm_family = family; + req->ndm.ndm_type = type; + req->ndm.ndm_state = state; + req->ndm.ndm_flags = flags; + req->ndm.ndm_ifindex = dplane_ctx_get_ifindex(ctx); + + addattr_l(&req->n, sizeof(req), + NDA_PROTOCOL, &protocol, sizeof(protocol)); + if (mac) + addattr_l(&req->n, datalen, NDA_LLADDR, mac, 6); + ipa_len = IS_IPADDR_V4(ip) ? IPV4_MAX_BYTELEN : IPV6_MAX_BYTELEN; + addattr_l(&req->n, datalen, NDA_DST, &ip->ip.addr, ipa_len); - addattr_l(&req.n, sizeof(req), - NDA_PROTOCOL, &protocol, sizeof(protocol)); - addattr_l(&req.n, sizeof(req), NDA_LLADDR, &dst_mac, 6); - req.ndm.ndm_ifindex = dplane_ctx_get_ifindex(ctx); + if (op == DPLANE_OP_MAC_INSTALL || op == DPLANE_OP_MAC_DELETE) { + vlanid_t vid = dplane_ctx_mac_get_vlan(ctx); - addr = dplane_ctx_neigh_get_ipaddr(ctx); + if (vid > 0) + addattr16(&req->n, datalen, NDA_VLAN, vid); - addattr_l(&req.n, sizeof(req), NDA_DST, &(addr->ipaddr_v4), 4); + addattr32(&req->n, datalen, NDA_MASTER, + dplane_ctx_mac_get_br_ifindex(ctx)); + } - return netlink_talk_info(netlink_talk_filter, &req.n, + return NLMSG_ALIGN(req->n.nlmsg_len); +} + +/* + * Add remote VTEP to the flood list for this VxLAN interface (VNI). This + * is done by adding an FDB entry with a MAC of 00:00:00:00:00:00. + */ +static int netlink_vxlan_flood_update_ctx(const struct zebra_dplane_ctx *ctx, + int cmd) +{ + struct ethaddr dst_mac = {.octet = {0}}; + uint8_t nl_pkt[NL_PKT_BUF_SIZE]; + + netlink_update_neigh_ctx_internal( + ctx, cmd, &dst_mac, dplane_ctx_neigh_get_ipaddr(ctx), false, + PF_BRIDGE, 0, NTF_SELF, (NUD_NOARP | NUD_PERMANENT), nl_pkt, + sizeof(nl_pkt)); + + return netlink_talk_info(netlink_talk_filter, + (struct nlmsghdr *)nl_pkt, dplane_ctx_get_ns(ctx), 0); } @@ -2760,7 +2822,7 @@ static int netlink_request_macs(struct nlsock *netlink_cmd, int family, if (master_ifindex) addattr32(&req.n, sizeof(req), IFLA_MASTER, master_ifindex); - return netlink_request(netlink_cmd, &req.n); + return netlink_request(netlink_cmd, &req); } /* @@ -2856,12 +2918,14 @@ static int netlink_request_specific_mac_in_bridge(struct zebra_ns *zns, addattr32(&req.n, sizeof(req), NDA_MASTER, br_if->ifindex); if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("%s: Tx family %s IF %s(%u) MAC %s vid %u", __func__, - nl_family_to_str(req.ndm.ndm_family), br_if->name, - br_if->ifindex, - prefix_mac2str(mac, buf, sizeof(buf)), vid); + zlog_debug( + "%s: Tx family %s IF %s(%u) vrf %s(%u) MAC %s vid %u", + __func__, nl_family_to_str(req.ndm.ndm_family), + br_if->name, br_if->ifindex, + vrf_id_to_name(br_if->vrf_id), br_if->vrf_id, + prefix_mac2str(mac, buf, sizeof(buf)), vid); - return netlink_request(&zns->netlink_cmd, &req.n); + return netlink_request(&zns->netlink_cmd, &req); } int netlink_macfdb_read_specific_mac(struct zebra_ns *zns, @@ -2890,92 +2954,60 @@ int netlink_macfdb_read_specific_mac(struct zebra_ns *zns, /* * Netlink-specific handler for MAC updates using dataplane context object. */ -static enum zebra_dplane_result -netlink_macfdb_update_ctx(struct zebra_dplane_ctx *ctx) +ssize_t +netlink_macfdb_update_ctx(struct zebra_dplane_ctx *ctx, uint8_t *data, + size_t datalen) { - uint8_t protocol = RTPROT_ZEBRA; - struct { - struct nlmsghdr n; - struct ndmsg ndm; - char buf[256]; - } req; - int ret; - int dst_alen; - int vid_present = 0; - int cmd; - struct in_addr vtep_ip; + struct ipaddr vtep_ip; vlanid_t vid; + ssize_t total; + int cmd; + uint8_t flags; + uint16_t state; + uint8_t nl_pkt[NL_PKT_BUF_SIZE]; - if (dplane_ctx_get_op(ctx) == DPLANE_OP_MAC_INSTALL) - cmd = RTM_NEWNEIGH; - else - cmd = RTM_DELNEIGH; - - memset(&req, 0, sizeof(req)); + cmd = dplane_ctx_get_op(ctx) == DPLANE_OP_MAC_INSTALL + ? RTM_NEWNEIGH : RTM_DELNEIGH; - req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg)); - req.n.nlmsg_flags = NLM_F_REQUEST; - if (cmd == RTM_NEWNEIGH) - req.n.nlmsg_flags |= (NLM_F_CREATE | NLM_F_REPLACE); - req.n.nlmsg_type = cmd; - req.ndm.ndm_family = AF_BRIDGE; - req.ndm.ndm_flags |= NTF_SELF | NTF_MASTER; - req.ndm.ndm_state = NUD_REACHABLE; + flags = (NTF_SELF | NTF_MASTER); + state = NUD_REACHABLE; if (dplane_ctx_mac_is_sticky(ctx)) - req.ndm.ndm_state |= NUD_NOARP; + state |= NUD_NOARP; else - req.ndm.ndm_flags |= NTF_EXT_LEARNED; - - addattr_l(&req.n, sizeof(req), - NDA_PROTOCOL, &protocol, sizeof(protocol)); - addattr_l(&req.n, sizeof(req), NDA_LLADDR, - dplane_ctx_mac_get_addr(ctx), 6); - req.ndm.ndm_ifindex = dplane_ctx_get_ifindex(ctx); - - dst_alen = 4; // TODO: hardcoded - vtep_ip = *(dplane_ctx_mac_get_vtep_ip(ctx)); - addattr_l(&req.n, sizeof(req), NDA_DST, &vtep_ip, dst_alen); + flags |= NTF_EXT_LEARNED; - vid = dplane_ctx_mac_get_vlan(ctx); - - if (vid > 0) { - addattr16(&req.n, sizeof(req), NDA_VLAN, vid); - vid_present = 1; - } - addattr32(&req.n, sizeof(req), NDA_MASTER, - dplane_ctx_mac_get_br_ifindex(ctx)); + vtep_ip.ipaddr_v4 = *(dplane_ctx_mac_get_vtep_ip(ctx)); + SET_IPADDR_V4(&vtep_ip); if (IS_ZEBRA_DEBUG_KERNEL) { char ipbuf[PREFIX_STRLEN]; char buf[ETHER_ADDR_STRLEN]; - char dst_buf[PREFIX_STRLEN + 10]; char vid_buf[20]; - if (vid_present) + vid = dplane_ctx_mac_get_vlan(ctx); + if (vid > 0) snprintf(vid_buf, sizeof(vid_buf), " VLAN %u", vid); else vid_buf[0] = '\0'; - inet_ntop(AF_INET, &vtep_ip, ipbuf, sizeof(ipbuf)); - snprintf(dst_buf, sizeof(dst_buf), " dst %s", ipbuf); - prefix_mac2str(dplane_ctx_mac_get_addr(ctx), buf, sizeof(buf)); + const struct ethaddr *mac = dplane_ctx_mac_get_addr(ctx); - zlog_debug("Tx %s family %s IF %s(%u)%s %sMAC %s%s", - nl_msg_type_to_str(cmd), - nl_family_to_str(req.ndm.ndm_family), + zlog_debug("Tx %s family %s IF %s(%u)%s %sMAC %s dst %s", + nl_msg_type_to_str(cmd), nl_family_to_str(AF_BRIDGE), dplane_ctx_get_ifname(ctx), dplane_ctx_get_ifindex(ctx), vid_buf, dplane_ctx_mac_is_sticky(ctx) ? "sticky " : "", - buf, dst_buf); + prefix_mac2str(mac, buf, sizeof(buf)), + ipaddr2str(&vtep_ip, ipbuf, sizeof(ipbuf))); } - ret = netlink_talk_info(netlink_talk_filter, &req.n, - dplane_ctx_get_ns(ctx), 0); - if (ret == 0) - return ZEBRA_DPLANE_REQUEST_SUCCESS; - else - return ZEBRA_DPLANE_REQUEST_FAILURE; + total = netlink_update_neigh_ctx_internal( + ctx, cmd, dplane_ctx_mac_get_addr(ctx), + dplane_ctx_neigh_get_ipaddr(ctx), true, AF_BRIDGE, 0, + flags, state, nl_pkt, sizeof(nl_pkt)); + + return total; } /* @@ -3017,6 +3049,7 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) struct interface *link_if; struct ethaddr mac; struct ipaddr ip; + struct vrf *vrf; char buf[ETHER_ADDR_STRLEN]; char buf2[INET6_ADDRSTRLEN]; int mac_present = 0; @@ -3031,6 +3064,7 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) if (!ifp || !ifp->info) return 0; + vrf = vrf_lookup_by_id(ifp->vrf_id); zif = (struct zebra_if *)ifp->info; /* Parse attributes and extract fields of interest. */ @@ -3038,10 +3072,10 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) netlink_parse_rtattr(tb, NDA_MAX, NDA_RTA(ndm), len); if (!tb[NDA_DST]) { - zlog_debug("%s family %s IF %s(%u) - no DST", + zlog_debug("%s family %s IF %s(%u) vrf %s(%u) - no DST", nl_msg_type_to_str(h->nlmsg_type), nl_family_to_str(ndm->ndm_family), ifp->name, - ndm->ndm_ifindex); + ndm->ndm_ifindex, VRF_LOGNAME(vrf), ifp->vrf_id); return 0; } @@ -3093,12 +3127,13 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) if (RTA_PAYLOAD(tb[NDA_LLADDR]) != ETH_ALEN) { if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - "%s family %s IF %s(%u) - LLADDR is not MAC, len %lu", + "%s family %s IF %s(%u) vrf %s(%u) - LLADDR is not MAC, len %lu", nl_msg_type_to_str( h->nlmsg_type), nl_family_to_str( ndm->ndm_family), ifp->name, ndm->ndm_ifindex, + VRF_LOGNAME(vrf), ifp->vrf_id, (unsigned long)RTA_PAYLOAD( tb[NDA_LLADDR])); return 0; @@ -3113,10 +3148,10 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - "Rx %s family %s IF %s(%u) IP %s MAC %s state 0x%x flags 0x%x", + "Rx %s family %s IF %s(%u) vrf %s(%u) IP %s MAC %s state 0x%x flags 0x%x", nl_msg_type_to_str(h->nlmsg_type), nl_family_to_str(ndm->ndm_family), ifp->name, - ndm->ndm_ifindex, + ndm->ndm_ifindex, VRF_LOGNAME(vrf), ifp->vrf_id, ipaddr2str(&ip, buf2, sizeof(buf2)), mac_present ? prefix_mac2str(&mac, buf, sizeof(buf)) @@ -3138,10 +3173,10 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) } if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("Rx %s family %s IF %s(%u) IP %s", + zlog_debug("Rx %s family %s IF %s(%u) vrf %s(%u) IP %s", nl_msg_type_to_str(h->nlmsg_type), nl_family_to_str(ndm->ndm_family), ifp->name, - ndm->ndm_ifindex, + ndm->ndm_ifindex, VRF_LOGNAME(vrf), ifp->vrf_id, ipaddr2str(&ip, buf2, sizeof(buf2))); /* Process the delete - it may result in re-adding the neighbor if it is @@ -3190,7 +3225,7 @@ static int netlink_request_neigh(struct nlsock *netlink_cmd, int family, if (ifindex) addattr32(&req.n, sizeof(req), NDA_IFINDEX, ifindex); - return netlink_request(netlink_cmd, &req.n); + return netlink_request(netlink_cmd, &req); } /* @@ -3278,7 +3313,7 @@ static int netlink_request_specific_neigh_in_vlan(struct zebra_ns *zns, ipaddr2str(ip, buf, sizeof(buf)), req.n.nlmsg_flags); } - return netlink_request(&zns->netlink_cmd, &req.n); + return netlink_request(&zns->netlink_cmd, &req); } int netlink_neigh_read_specific_ip(struct ipaddr *ip, @@ -3295,9 +3330,10 @@ int netlink_neigh_read_specific_ip(struct ipaddr *ip, zebra_dplane_info_from_zns(&dp_info, zns, true /*is_cmd*/); if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("%s: neigh request IF %s(%u) IP %s vrf_id %u", + zlog_debug("%s: neigh request IF %s(%u) IP %s vrf %s(%u)", __func__, vlan_if->name, vlan_if->ifindex, - ipaddr2str(ip, buf, sizeof(buf)), vlan_if->vrf_id); + ipaddr2str(ip, buf, sizeof(buf)), + vrf_id_to_name(vlan_if->vrf_id), vlan_if->vrf_id); ret = netlink_request_specific_neigh_in_vlan(zns, RTM_GETNEIGH, ip, vlan_if->ifindex); @@ -3355,21 +3391,12 @@ int netlink_neigh_change(struct nlmsghdr *h, ns_id_t ns_id) static int netlink_neigh_update_ctx(const struct zebra_dplane_ctx *ctx, int cmd) { - uint8_t protocol = RTPROT_ZEBRA; - struct { - struct nlmsghdr n; - struct ndmsg ndm; - char buf[256]; - } req; - int ipa_len; - char buf[INET6_ADDRSTRLEN]; - char buf2[ETHER_ADDR_STRLEN]; const struct ipaddr *ip; const struct ethaddr *mac; uint8_t flags; uint16_t state; - - memset(&req, 0, sizeof(req)); + uint8_t family; + uint8_t nl_pkt[NL_PKT_BUF_SIZE]; ip = dplane_ctx_neigh_get_ipaddr(ctx); mac = dplane_ctx_neigh_get_mac(ctx); @@ -3379,36 +3406,26 @@ static int netlink_neigh_update_ctx(const struct zebra_dplane_ctx *ctx, flags = neigh_flags_to_netlink(dplane_ctx_neigh_get_flags(ctx)); state = neigh_state_to_netlink(dplane_ctx_neigh_get_state(ctx)); - req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg)); - req.n.nlmsg_flags = NLM_F_REQUEST; - if (cmd == RTM_NEWNEIGH) - req.n.nlmsg_flags |= (NLM_F_CREATE | NLM_F_REPLACE); - req.n.nlmsg_type = cmd; // RTM_NEWNEIGH or RTM_DELNEIGH - req.ndm.ndm_family = IS_IPADDR_V4(ip) ? AF_INET : AF_INET6; - req.ndm.ndm_state = state; - req.ndm.ndm_ifindex = dplane_ctx_get_ifindex(ctx); - req.ndm.ndm_type = RTN_UNICAST; - req.ndm.ndm_flags = flags; + family = IS_IPADDR_V4(ip) ? AF_INET : AF_INET6; - addattr_l(&req.n, sizeof(req), - NDA_PROTOCOL, &protocol, sizeof(protocol)); - ipa_len = IS_IPADDR_V4(ip) ? IPV4_MAX_BYTELEN : IPV6_MAX_BYTELEN; - addattr_l(&req.n, sizeof(req), NDA_DST, &ip->ip.addr, ipa_len); - if (mac) - addattr_l(&req.n, sizeof(req), NDA_LLADDR, mac, 6); + if (IS_ZEBRA_DEBUG_KERNEL) { + char buf[INET6_ADDRSTRLEN]; + char buf2[ETHER_ADDR_STRLEN]; - if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("Tx %s family %s IF %s(%u) Neigh %s MAC %s flags 0x%x state 0x%x", - nl_msg_type_to_str(cmd), - nl_family_to_str(req.ndm.ndm_family), - dplane_ctx_get_ifname(ctx), - dplane_ctx_get_ifindex(ctx), - ipaddr2str(ip, buf, sizeof(buf)), - mac ? prefix_mac2str(mac, buf2, sizeof(buf2)) - : "null", - flags, state); + zlog_debug( + "Tx %s family %s IF %s(%u) Neigh %s MAC %s flags 0x%x state 0x%x", + nl_msg_type_to_str(cmd), nl_family_to_str(family), + dplane_ctx_get_ifname(ctx), dplane_ctx_get_ifindex(ctx), + ipaddr2str(ip, buf, sizeof(buf)), + mac ? prefix_mac2str(mac, buf2, sizeof(buf2)) : "null", + flags, state); + } - return netlink_talk_info(netlink_talk_filter, &req.n, + netlink_update_neigh_ctx_internal( + ctx, cmd, mac, ip, true, family, RTN_UNICAST, flags, + state, nl_pkt, sizeof(nl_pkt)); + + return netlink_talk_info(netlink_talk_filter, (struct nlmsghdr *)nl_pkt, dplane_ctx_get_ns(ctx), 0); } @@ -3417,7 +3434,18 @@ static int netlink_neigh_update_ctx(const struct zebra_dplane_ctx *ctx, */ enum zebra_dplane_result kernel_mac_update_ctx(struct zebra_dplane_ctx *ctx) { - return netlink_macfdb_update_ctx(ctx); + uint8_t nl_pkt[NL_PKT_BUF_SIZE]; + ssize_t rv; + + rv = netlink_macfdb_update_ctx(ctx, nl_pkt, sizeof(nl_pkt)); + if (rv <= 0) + return ZEBRA_DPLANE_REQUEST_FAILURE; + + rv = netlink_talk_info(netlink_talk_filter, (struct nlmsghdr *)nl_pkt, + dplane_ctx_get_ns(ctx), 0); + + return rv == 0 ? + ZEBRA_DPLANE_REQUEST_SUCCESS : ZEBRA_DPLANE_REQUEST_FAILURE; } enum zebra_dplane_result kernel_neigh_update_ctx(struct zebra_dplane_ctx *ctx) diff --git a/zebra/rt_netlink.h b/zebra/rt_netlink.h index 2b4b145149..d6a993e78a 100644 --- a/zebra/rt_netlink.h +++ b/zebra/rt_netlink.h @@ -66,6 +66,12 @@ void rt_netlink_init(void); /* MPLS label forwarding table change, using dataplane context information. */ extern int netlink_mpls_multipath(int cmd, struct zebra_dplane_ctx *ctx); +extern ssize_t netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx, + uint8_t *data, size_t datalen, + bool fpm); +extern ssize_t netlink_macfdb_update_ctx(struct zebra_dplane_ctx *ctx, + uint8_t *data, size_t datalen); + extern int netlink_route_change(struct nlmsghdr *h, ns_id_t ns_id, int startup); extern int netlink_route_read(struct zebra_ns *zns); diff --git a/zebra/rtadv.c b/zebra/rtadv.c index 60ac471b5a..11434edfcf 100644 --- a/zebra/rtadv.c +++ b/zebra/rtadv.c @@ -46,6 +46,10 @@ #include "zebra/zebra_errors.h" #include "zebra/zebra_router.h" +#ifndef VTYSH_EXTRACT_PL +#include "zebra/rtadv_clippy.c" +#endif + extern struct zebra_privs_t zserv_privs; #if defined(HAVE_RTADV) @@ -204,9 +208,12 @@ static void rtadv_send_packet(int sock, struct interface *ifp, } /* Logging of packet. */ - if (IS_ZEBRA_DEBUG_PACKET) - zlog_debug("%s(%u): Tx RA, socket %u", ifp->name, ifp->ifindex, - sock); + if (IS_ZEBRA_DEBUG_PACKET) { + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + + zlog_debug("%s(%s:%u): Tx RA, socket %u", ifp->name, + VRF_LOGNAME(vrf), ifp->ifindex, sock); + } /* Fill in sockaddr_in6. */ memset(&addr, 0, sizeof(struct sockaddr_in6)); @@ -227,7 +234,7 @@ static void rtadv_send_packet(int sock, struct interface *ifp, rtadv->nd_ra_code = 0; rtadv->nd_ra_cksum = 0; - rtadv->nd_ra_curhoplimit = 64; + rtadv->nd_ra_curhoplimit = zif->rtadv.AdvCurHopLimit; /* RFC4191: Default Router Preference is 0 if Router Lifetime is 0. */ rtadv->nd_ra_flags_reserved = zif->rtadv.AdvDefaultLifetime == 0 @@ -333,16 +340,6 @@ static void rtadv_send_packet(int sock, struct interface *ifp, IPV6_ADDR_COPY(&pinfo->nd_opt_pi_prefix, &rprefix->prefix.prefix); -#ifdef DEBUG - { - uint8_t buf[INET6_ADDRSTRLEN]; - - zlog_debug("DEBUG %s", - inet_ntop(AF_INET6, &pinfo->nd_opt_pi_prefix, - buf, INET6_ADDRSTRLEN)); - } -#endif /* DEBUG */ - len += sizeof(struct nd_opt_prefix_info); } @@ -388,9 +385,11 @@ static void rtadv_send_packet(int sock, struct interface *ifp, sizeof(struct nd_opt_rdnss) + sizeof(struct in6_addr); if (len + opt_len > max_len) { + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + zlog_warn( - "%s(%u): Tx RA: RDNSS option would exceed MTU, omitting it", - ifp->name, ifp->ifindex); + "%s(%s:%u): Tx RA: RDNSS option would exceed MTU, omitting it", + ifp->name, VRF_LOGNAME(vrf), ifp->ifindex); goto no_more_opts; } struct nd_opt_rdnss *opt = (struct nd_opt_rdnss *)(buf + len); @@ -510,10 +509,17 @@ static int rtadv_timer(struct thread *thread) <= 0) zif->rtadv.inFastRexmit = 0; - if (IS_ZEBRA_DEBUG_SEND) + if (IS_ZEBRA_DEBUG_SEND) { + struct vrf *vrf = + vrf_lookup_by_id( + ifp->vrf_id); + zlog_debug( - "Fast RA Rexmit on interface %s", - ifp->name); + "Fast RA Rexmit on interface %s(%s:%u)", + ifp->name, + VRF_LOGNAME(vrf), + ifp->ifindex); + } rtadv_send_packet(rtadv_get_socket(zvrf), ifp, RA_ENABLE); @@ -612,9 +618,14 @@ static void rtadv_process_advert(uint8_t *msg, unsigned int len, inet_ntop(AF_INET6, &addr->sin6_addr, addr_str, INET6_ADDRSTRLEN); if (len < sizeof(struct nd_router_advert)) { - if (IS_ZEBRA_DEBUG_PACKET) - zlog_debug("%s(%u): Rx RA with invalid length %d from %s", - ifp->name, ifp->ifindex, len, addr_str); + if (IS_ZEBRA_DEBUG_PACKET) { + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + + zlog_debug( + "%s(%s:%u): Rx RA with invalid length %d from %s", + ifp->name, VRF_LOGNAME(vrf), ifp->ifindex, len, + addr_str); + } return; } @@ -622,9 +633,14 @@ static void rtadv_process_advert(uint8_t *msg, unsigned int len, rtadv_process_optional(msg + sizeof(struct nd_router_advert), len - sizeof(struct nd_router_advert), ifp, addr); - if (IS_ZEBRA_DEBUG_PACKET) - zlog_debug("%s(%u): Rx RA with non-linklocal source address from %s", - ifp->name, ifp->ifindex, addr_str); + if (IS_ZEBRA_DEBUG_PACKET) { + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + + zlog_debug( + "%s(%s:%u): Rx RA with non-linklocal source address from %s", + ifp->name, VRF_LOGNAME(vrf), ifp->ifindex, + addr_str); + } return; } @@ -703,9 +719,12 @@ static void rtadv_process_packet(uint8_t *buf, unsigned int len, return; } - if (IS_ZEBRA_DEBUG_PACKET) - zlog_debug("%s(%u): Rx RA/RS len %d from %s", ifp->name, - ifp->ifindex, len, addr_str); + if (IS_ZEBRA_DEBUG_PACKET) { + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + + zlog_debug("%s(%s:%u): Rx RA/RS len %d from %s", ifp->name, + VRF_LOGNAME(vrf), ifp->ifindex, len, addr_str); + } if (if_is_loopback(ifp) || CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_VRF_LOOPBACK)) @@ -718,8 +737,11 @@ static void rtadv_process_packet(uint8_t *buf, unsigned int len, /* ICMP message length check. */ if (len < sizeof(struct icmp6_hdr)) { - zlog_debug("%s(%u): Rx RA with Invalid ICMPV6 packet length %d", - ifp->name, ifp->ifindex, len); + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + + zlog_debug( + "%s(%s:%u): Rx RA with Invalid ICMPV6 packet length %d", + ifp->name, VRF_LOGNAME(vrf), ifp->ifindex, len); return; } @@ -728,15 +750,20 @@ static void rtadv_process_packet(uint8_t *buf, unsigned int len, /* ICMP message type check. */ if (icmph->icmp6_type != ND_ROUTER_SOLICIT && icmph->icmp6_type != ND_ROUTER_ADVERT) { - zlog_debug("%s(%u): Rx RA - Unwanted ICMPV6 message type %d", - ifp->name, ifp->ifindex, icmph->icmp6_type); + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + + zlog_debug("%s(%s:%u): Rx RA - Unwanted ICMPV6 message type %d", + ifp->name, VRF_LOGNAME(vrf), ifp->ifindex, + icmph->icmp6_type); return; } /* Hoplimit check. */ if (hoplimit >= 0 && hoplimit != 255) { - zlog_debug("%s(%u): Rx RA - Invalid hoplimit %d", ifp->name, - ifp->ifindex, hoplimit); + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + + zlog_debug("%s(%s:%u): Rx RA - Invalid hoplimit %d", ifp->name, + VRF_LOGNAME(vrf), ifp->ifindex, hoplimit); return; } @@ -1055,25 +1082,34 @@ static void zebra_interface_radv_set(ZAPI_HANDLER_ARGS, int enable) unsigned int ra_interval = ra_interval_rxd; - if (IS_ZEBRA_DEBUG_EVENT) - zlog_debug("%u: IF %u RA %s from client %s, interval %ums", - zvrf_id(zvrf), ifindex, + if (IS_ZEBRA_DEBUG_EVENT) { + struct vrf *vrf = zvrf->vrf; + + zlog_debug("%s:%u: IF %u RA %s from client %s, interval %ums", + VRF_LOGNAME(vrf), zvrf_id(zvrf), ifindex, enable ? "enable" : "disable", zebra_route_string(client->proto), ra_interval); + } /* Locate interface and check VRF match. */ ifp = if_lookup_by_index(ifindex, zvrf->vrf->vrf_id); if (!ifp) { + struct vrf *vrf = zvrf->vrf; + flog_warn(EC_ZEBRA_UNKNOWN_INTERFACE, - "%u: IF %u RA %s client %s - interface unknown", - zvrf_id(zvrf), ifindex, enable ? "enable" : "disable", + "%s:%u: IF %u RA %s client %s - interface unknown", + VRF_LOGNAME(vrf), zvrf_id(zvrf), ifindex, + enable ? "enable" : "disable", zebra_route_string(client->proto)); return; } if (ifp->vrf_id != zvrf_id(zvrf)) { + struct vrf *vrf = zvrf->vrf; + zlog_debug( - "%u: IF %u RA %s client %s - VRF mismatch, IF VRF %u", - zvrf_id(zvrf), ifindex, enable ? "enable" : "disable", + "%s:%u: IF %u RA %s client %s - VRF mismatch, IF VRF %u", + VRF_LOGNAME(vrf), zvrf_id(zvrf), ifindex, + enable ? "enable" : "disable", zebra_route_string(client->proto), ifp->vrf_id); return; } @@ -1199,6 +1235,53 @@ DEFUN (no_ipv6_nd_ra_fast_retrans, return CMD_SUCCESS; } +DEFPY (ipv6_nd_ra_hop_limit, + ipv6_nd_ra_hop_limit_cmd, + "ipv6 nd ra-hop-limit (0-255)$hopcount", + "Interface IPv6 config commands\n" + "Neighbor discovery\n" + "Advertisement Hop Limit\n" + "Advertisement Hop Limit in hops (default:64)\n") +{ + VTY_DECLVAR_CONTEXT(interface, ifp); + struct zebra_if *zif = ifp->info; + + if (if_is_loopback(ifp) + || CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_VRF_LOOPBACK)) { + vty_out(vty, + "Cannot configure IPv6 Router Advertisements on this interface\n"); + return CMD_WARNING_CONFIG_FAILED; + } + + zif->rtadv.AdvCurHopLimit = hopcount; + + return CMD_SUCCESS; +} + +DEFPY (no_ipv6_nd_ra_hop_limit, + no_ipv6_nd_ra_hop_limit_cmd, + "no ipv6 nd ra-hop-limit [(0-255)]", + NO_STR + "Interface IPv6 config commands\n" + "Neighbor discovery\n" + "Advertisement Hop Limit\n" + "Advertisement Hop Limit in hops\n") +{ + VTY_DECLVAR_CONTEXT(interface, ifp); + struct zebra_if *zif = ifp->info; + + if (if_is_loopback(ifp) + || CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_VRF_LOOPBACK)) { + vty_out(vty, + "Cannot configure IPv6 Router Advertisements on this interface\n"); + return CMD_WARNING_CONFIG_FAILED; + } + + zif->rtadv.AdvCurHopLimit = RTADV_DEFAULT_HOPLIMIT; + + return CMD_SUCCESS; +} + DEFUN (ipv6_nd_suppress_ra, ipv6_nd_suppress_ra_cmd, "ipv6 nd suppress-ra", @@ -2146,6 +2229,8 @@ static int nd_dump_vty(struct vty *vty, struct interface *ifp) vty_out(vty, " ND advertised retransmit interval is %d milliseconds\n", rtadv->AdvRetransTimer); + vty_out(vty, " ND advertised hop-count limit is %d hops\n", + rtadv->AdvCurHopLimit); vty_out(vty, " ND router advertisements sent: %d rcvd: %d\n", zif->ra_sent, zif->ra_rcvd); interval = rtadv->MaxRtrAdvInterval; @@ -2237,6 +2322,10 @@ static int rtadv_config_write(struct vty *vty, struct interface *ifp) if (!zif->rtadv.UseFastRexmit) vty_out(vty, " no ipv6 nd ra-fast-retrans\n"); + if (zif->rtadv.AdvCurHopLimit != RTADV_DEFAULT_HOPLIMIT) + vty_out(vty, " ipv6 nd ra-hop-limit %d\n", + zif->rtadv.AdvCurHopLimit); + if (zif->rtadv.AdvDefaultLifetime != -1) vty_out(vty, " ipv6 nd ra-lifetime %d\n", zif->rtadv.AdvDefaultLifetime); @@ -2329,6 +2418,13 @@ static void rtadv_event(struct zebra_vrf *zvrf, enum rtadv_event event, int val) { struct rtadv *rtadv = &zvrf->rtadv; + if (IS_ZEBRA_DEBUG_EVENT) { + struct vrf *vrf = zvrf->vrf; + + zlog_debug("%s(%s) with event: %d and val: %d", __func__, + VRF_LOGNAME(vrf), event, val); + } + switch (event) { case RTADV_START: thread_add_read(zrouter.master, rtadv_read, zvrf, val, @@ -2371,20 +2467,26 @@ void rtadv_init(struct zebra_vrf *zvrf) } } -void rtadv_terminate(struct zebra_vrf *zvrf) +void rtadv_vrf_terminate(struct zebra_vrf *zvrf) { rtadv_event(zvrf, RTADV_STOP, 0); if (zvrf->rtadv.sock >= 0) { close(zvrf->rtadv.sock); zvrf->rtadv.sock = -1; - } else if (zrouter.rtadv_sock >= 0) { - close(zrouter.rtadv_sock); - zrouter.rtadv_sock = -1; } + zvrf->rtadv.adv_if_count = 0; zvrf->rtadv.adv_msec_if_count = 0; } +void rtadv_terminate(void) +{ + if (zrouter.rtadv_sock >= 0) { + close(zrouter.rtadv_sock); + zrouter.rtadv_sock = -1; + } +} + void rtadv_cmd_init(void) { hook_register(zebra_if_extra_info, nd_dump_vty); @@ -2392,6 +2494,8 @@ void rtadv_cmd_init(void) install_element(INTERFACE_NODE, &ipv6_nd_ra_fast_retrans_cmd); install_element(INTERFACE_NODE, &no_ipv6_nd_ra_fast_retrans_cmd); + install_element(INTERFACE_NODE, &ipv6_nd_ra_hop_limit_cmd); + install_element(INTERFACE_NODE, &no_ipv6_nd_ra_hop_limit_cmd); install_element(INTERFACE_NODE, &ipv6_nd_suppress_ra_cmd); install_element(INTERFACE_NODE, &no_ipv6_nd_suppress_ra_cmd); install_element(INTERFACE_NODE, &ipv6_nd_ra_interval_cmd); @@ -2445,10 +2549,13 @@ static int if_join_all_router(int sock, struct interface *ifp) ifp->name, ifp->ifindex, sock, safe_strerror(errno)); - if (IS_ZEBRA_DEBUG_EVENT) + if (IS_ZEBRA_DEBUG_EVENT) { + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + zlog_debug( - "%s(%u): Join All-Routers multicast group, socket %u", - ifp->name, ifp->ifindex, sock); + "%s(%s:%u): Join All-Routers multicast group, socket %u", + ifp->name, VRF_LOGNAME(vrf), ifp->ifindex, sock); + } return 0; } @@ -2465,17 +2572,22 @@ static int if_leave_all_router(int sock, struct interface *ifp) ret = setsockopt(sock, IPPROTO_IPV6, IPV6_LEAVE_GROUP, (char *)&mreq, sizeof(mreq)); - if (ret < 0) + if (ret < 0) { + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + flog_err_sys( EC_LIB_SOCKET, - "%s(%u): Failed to leave group, socket %u error %s", - ifp->name, ifp->ifindex, sock, safe_strerror(errno)); + "%s(%s:%u): Failed to leave group, socket %u error %s", + ifp->name, VRF_LOGNAME(vrf), ifp->ifindex, sock, + safe_strerror(errno)); + } + if (IS_ZEBRA_DEBUG_EVENT) { + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); - if (IS_ZEBRA_DEBUG_EVENT) zlog_debug( - "%s(%u): Leave All-Routers multicast group, socket %u", - ifp->name, ifp->ifindex, sock); - + "%s(%s:%u): Leave All-Routers multicast group, socket %u", + ifp->name, VRF_LOGNAME(vrf), ifp->ifindex, sock); + } return 0; } diff --git a/zebra/rtadv.h b/zebra/rtadv.h index 64b28cbfd6..68a5bbcdbe 100644 --- a/zebra/rtadv.h +++ b/zebra/rtadv.h @@ -153,7 +153,8 @@ typedef enum { } ipv6_nd_suppress_ra_status; extern void rtadv_init(struct zebra_vrf *zvrf); -extern void rtadv_terminate(struct zebra_vrf *zvrf); +extern void rtadv_vrf_terminate(struct zebra_vrf *zvrf); +extern void rtadv_terminate(void); extern void rtadv_stop_ra(struct interface *ifp); extern void rtadv_stop_ra_all(void); extern void rtadv_cmd_init(void); diff --git a/zebra/rule_netlink.c b/zebra/rule_netlink.c index c9699c7d95..a5a605f27e 100644 --- a/zebra/rule_netlink.c +++ b/zebra/rule_netlink.c @@ -174,11 +174,40 @@ enum zebra_dplane_result kernel_del_pbr_rule(struct zebra_pbr_rule *rule) } /* + * Update specified rule for a specific interface. + */ +enum zebra_dplane_result kernel_update_pbr_rule(struct zebra_pbr_rule *old_rule, + struct zebra_pbr_rule *new_rule) +{ + int ret = 0; + + /* Add the new, updated one */ + ret = netlink_rule_update(RTM_NEWRULE, new_rule); + + /** + * Delete the old one. + * + * Don't care about this result right? + */ + netlink_rule_update(RTM_DELRULE, old_rule); + + kernel_pbr_rule_add_del_status(new_rule, + (!ret) ? ZEBRA_DPLANE_INSTALL_SUCCESS + : ZEBRA_DPLANE_INSTALL_FAILURE); + + return ZEBRA_DPLANE_REQUEST_SUCCESS; +} + +/* * Handle netlink notification informing a rule add or delete. * Handling of an ADD is TBD. * DELs are notified up, if other attributes indicate it may be a * notification of interest. The expectation is that if this corresponds * to a PBR rule added by FRR, it will be readded. + * + * If startup and we see a rule we created, delete it as its leftover + * from a previous instance and should have been removed on shutdown. + * */ int netlink_rule_change(struct nlmsghdr *h, ns_id_t ns_id, int startup) { @@ -190,15 +219,12 @@ int netlink_rule_change(struct nlmsghdr *h, ns_id_t ns_id, int startup) struct zebra_pbr_rule rule = {}; char buf1[PREFIX_STRLEN]; char buf2[PREFIX_STRLEN]; + uint8_t proto = 0; /* Basic validation followed by extracting attributes. */ if (h->nlmsg_type != RTM_NEWRULE && h->nlmsg_type != RTM_DELRULE) return 0; - /* TBD */ - if (h->nlmsg_type == RTM_NEWRULE) - return 0; - len = h->nlmsg_len - NLMSG_LENGTH(sizeof(struct fib_rule_hdr)); if (len < 0) { zlog_err( @@ -222,19 +248,6 @@ int netlink_rule_change(struct nlmsghdr *h, ns_id_t ns_id, int startup) memset(tb, 0, sizeof(tb)); netlink_parse_rtattr(tb, FRA_MAX, RTM_RTA(frh), len); - /* TBD: We don't care about rules not specifying an IIF. */ - if (tb[FRA_IFNAME] == NULL) - return 0; - - ifname = (char *)RTA_DATA(tb[FRA_IFNAME]); - zns = zebra_ns_lookup(ns_id); - - /* If we don't know the interface, we don't care. */ - if (!if_lookup_by_name_per_ns(zns, ifname)) - return 0; - - strlcpy(rule.ifname, ifname, sizeof(rule.ifname)); - if (tb[FRA_PRIORITY]) rule.rule.priority = *(uint32_t *)RTA_DATA(tb[FRA_PRIORITY]); @@ -246,6 +259,7 @@ int netlink_rule_change(struct nlmsghdr *h, ns_id_t ns_id, int startup) memcpy(&rule.rule.filter.src_ip.u.prefix6, RTA_DATA(tb[FRA_SRC]), 16); rule.rule.filter.src_ip.prefixlen = frh->src_len; + rule.rule.filter.src_ip.family = frh->family; rule.rule.filter.filter_bm |= PBR_FILTER_SRC_IP; } @@ -257,6 +271,7 @@ int netlink_rule_change(struct nlmsghdr *h, ns_id_t ns_id, int startup) memcpy(&rule.rule.filter.dst_ip.u.prefix6, RTA_DATA(tb[FRA_DST]), 16); rule.rule.filter.dst_ip.prefixlen = frh->dst_len; + rule.rule.filter.dst_ip.family = frh->family; rule.rule.filter.filter_bm |= PBR_FILTER_DST_IP; } @@ -265,6 +280,49 @@ int netlink_rule_change(struct nlmsghdr *h, ns_id_t ns_id, int startup) else rule.rule.action.table = frh->table; + /* TBD: We don't care about rules not specifying an IIF. */ + if (tb[FRA_IFNAME] == NULL) + return 0; + + if (tb[FRA_PROTOCOL]) + proto = *(uint8_t *)RTA_DATA(tb[FRA_PROTOCOL]); + + ifname = (char *)RTA_DATA(tb[FRA_IFNAME]); + strlcpy(rule.ifname, ifname, sizeof(rule.ifname)); + + if (h->nlmsg_type == RTM_NEWRULE) { + /* + * If we see a rule at startup we created, delete it now. + * It should have been flushed on a previous shutdown. + */ + if (startup && proto == RTPROT_ZEBRA) { + int ret; + + ret = netlink_rule_update(RTM_DELRULE, &rule); + + zlog_debug( + "%s: %s leftover rule: family %s IF %s(%u) Pref %u Src %s Dst %s Table %u", + __func__, + ((ret == 0) ? "Removed" : "Failed to remove"), + nl_family_to_str(frh->family), rule.ifname, + rule.rule.ifindex, rule.rule.priority, + prefix2str(&rule.rule.filter.src_ip, buf1, + sizeof(buf1)), + prefix2str(&rule.rule.filter.dst_ip, buf2, + sizeof(buf2)), + rule.rule.action.table); + } + + /* TBD */ + return 0; + } + + zns = zebra_ns_lookup(ns_id); + + /* If we don't know the interface, we don't care. */ + if (!if_lookup_by_name_per_ns(zns, ifname)) + return 0; + if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( "Rx %s family %s IF %s(%u) Pref %u Src %s Dst %s Table %u", @@ -281,12 +339,51 @@ int netlink_rule_change(struct nlmsghdr *h, ns_id_t ns_id, int startup) } /* + * Request rules from the kernel + */ +static int netlink_request_rules(struct zebra_ns *zns, int family, int type) +{ + struct { + struct nlmsghdr n; + struct fib_rule_hdr frh; + char buf[NL_PKT_BUF_SIZE]; + } req; + + memset(&req, 0, sizeof(req)); + req.n.nlmsg_type = type; + req.n.nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST; + req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct fib_rule_hdr)); + req.frh.family = family; + + return netlink_request(&zns->netlink_cmd, &req); +} + +/* * Get to know existing PBR rules in the kernel - typically called at startup. - * TBD. */ int netlink_rules_read(struct zebra_ns *zns) { - return 0; + int ret; + struct zebra_dplane_info dp_info; + + zebra_dplane_info_from_zns(&dp_info, zns, true); + + ret = netlink_request_rules(zns, AF_INET, RTM_GETRULE); + if (ret < 0) + return ret; + + ret = netlink_parse_info(netlink_rule_change, &zns->netlink_cmd, + &dp_info, 0, 1); + if (ret < 0) + return ret; + + ret = netlink_request_rules(zns, AF_INET6, RTM_GETRULE); + if (ret < 0) + return ret; + + ret = netlink_parse_info(netlink_rule_change, &zns->netlink_cmd, + &dp_info, 0, 1); + return ret; } #endif /* HAVE_NETLINK */ diff --git a/zebra/rule_socket.c b/zebra/rule_socket.c index e2c650b4ad..219fa7de6f 100644 --- a/zebra/rule_socket.c +++ b/zebra/rule_socket.c @@ -57,4 +57,12 @@ enum zebra_dplane_result kernel_del_pbr_rule(struct zebra_pbr_rule *rule) return ZEBRA_DPLANE_REQUEST_FAILURE; } +enum zebra_dplane_result kernel_update_pbr_rule(struct zebra_pbr_rule *old_rule, + struct zebra_pbr_rule *new_rule) +{ + flog_err(EC_LIB_UNAVAILABLE, "%s not Implemented for this platform", + __PRETTY_FUNCTION__); + return ZEBRA_DPLANE_REQUEST_FAILURE; +} + #endif diff --git a/zebra/subdir.am b/zebra/subdir.am index 1d49de5410..aafb4abb01 100644 --- a/zebra/subdir.am +++ b/zebra/subdir.am @@ -101,6 +101,10 @@ zebra_zebra_SOURCES = \ zebra/zebra_netns_notify.c \ zebra/table_manager.c \ zebra/zapi_msg.c \ + zebra/zebra_nb.c \ + zebra/zebra_nb_config.c \ + zebra/zebra_nb_rpcs.c \ + zebra/zebra_nb_state.c \ zebra/zebra_errors.c \ zebra/zebra_gr.c \ # end @@ -119,6 +123,9 @@ zebra/zebra_vty.$(OBJEXT): zebra/zebra_vty_clippy.c zebra/zebra_routemap_clippy.c: $(CLIPPY_DEPS) zebra/zebra_routemap.$(OBJEXT): zebra/zebra_routemap_clippy.c +zebra/rtadv_clippy.c: $(CLIPPY_DEPS) +zebra/rtadv.$(OBJEXT): zebra/rtadv_clippy.c + noinst_HEADERS += \ zebra/connected.h \ zebra/debug.h \ @@ -164,6 +171,7 @@ noinst_HEADERS += \ zebra/zebra_netns_notify.h \ zebra/table_manager.h \ zebra/zapi_msg.h \ + zebra/zebra_nb.h \ zebra/zebra_errors.h \ # end @@ -191,5 +199,19 @@ zebra_zebra_fpm_la_SOURCES += zebra/zebra_fpm_dt.c endif endif +nodist_zebra_zebra_SOURCES = \ + yang/frr-zebra.yang.c \ + # end + zebra_zebra_cumulus_mlag_la_SOURCES = zebra/zebra_mlag_private.c zebra_zebra_cumulus_mlag_la_LDFLAGS = -avoid-version -module -shared -export-dynamic + +if LINUX +module_LTLIBRARIES += zebra/dplane_fpm_nl.la + +zebra_dplane_fpm_nl_la_SOURCES = zebra/dplane_fpm_nl.c +zebra_dplane_fpm_nl_la_LDFLAGS = -avoid-version -module -shared -export-dynamic +zebra_dplane_fpm_nl_la_LIBADD = + +vtysh_scan += $(top_srcdir)/zebra/dplane_fpm_nl.c +endif diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index 2190bfab4f..092b5dd3c2 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -957,7 +957,6 @@ int zsend_pw_update(struct zserv *client, struct zebra_pw *pw) /* Send response to a get label chunk request to client */ int zsend_assign_label_chunk_response(struct zserv *client, vrf_id_t vrf_id, - uint8_t proto, uint16_t instance, struct label_manager_chunk *lmc) { int ret; @@ -965,9 +964,9 @@ int zsend_assign_label_chunk_response(struct zserv *client, vrf_id_t vrf_id, zclient_create_header(s, ZEBRA_GET_LABEL_CHUNK, vrf_id); /* proto */ - stream_putc(s, proto); + stream_putc(s, client->proto); /* instance */ - stream_putw(s, instance); + stream_putw(s, client->instance); if (lmc) { /* keep */ @@ -1413,6 +1412,132 @@ void zserv_nexthop_num_warn(const char *caller, const struct prefix *p, } } +/* + * Create a new nexthop based on a zapi nexthop. + */ +static struct nexthop *nexthop_from_zapi(struct route_entry *re, + const struct zapi_nexthop *api_nh, + const struct zapi_route *api) +{ + struct nexthop *nexthop = NULL; + struct ipaddr vtep_ip; + struct interface *ifp; + char nhbuf[INET6_ADDRSTRLEN] = ""; + + switch (api_nh->type) { + case NEXTHOP_TYPE_IFINDEX: + nexthop = nexthop_from_ifindex(api_nh->ifindex, api_nh->vrf_id); + break; + case NEXTHOP_TYPE_IPV4: + if (IS_ZEBRA_DEBUG_RECV) { + inet_ntop(AF_INET, &api_nh->gate.ipv4, nhbuf, + sizeof(nhbuf)); + zlog_debug("%s: nh=%s, vrf_id=%d", __func__, + nhbuf, api_nh->vrf_id); + } + nexthop = nexthop_from_ipv4(&api_nh->gate.ipv4, NULL, + api_nh->vrf_id); + break; + case NEXTHOP_TYPE_IPV4_IFINDEX: + if (IS_ZEBRA_DEBUG_RECV) { + inet_ntop(AF_INET, &api_nh->gate.ipv4, nhbuf, + sizeof(nhbuf)); + zlog_debug("%s: nh=%s, vrf_id=%d, ifindex=%d", + __func__, nhbuf, api_nh->vrf_id, + api_nh->ifindex); + } + + nexthop = nexthop_from_ipv4_ifindex( + &api_nh->gate.ipv4, NULL, api_nh->ifindex, + api_nh->vrf_id); + + ifp = if_lookup_by_index(api_nh->ifindex, api_nh->vrf_id); + if (ifp && connected_is_unnumbered(ifp)) + SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK); + + /* Special handling for IPv4 routes sourced from EVPN: + * the nexthop and associated MAC need to be installed. + */ + if (CHECK_FLAG(api->flags, ZEBRA_FLAG_EVPN_ROUTE)) { + memset(&vtep_ip, 0, sizeof(struct ipaddr)); + vtep_ip.ipa_type = IPADDR_V4; + memcpy(&(vtep_ip.ipaddr_v4), &(api_nh->gate.ipv4), + sizeof(struct in_addr)); + zebra_vxlan_evpn_vrf_route_add( + api_nh->vrf_id, &api_nh->rmac, + &vtep_ip, &api->prefix); + } + break; + case NEXTHOP_TYPE_IPV6: + if (IS_ZEBRA_DEBUG_RECV) { + inet_ntop(AF_INET6, &api_nh->gate.ipv6, nhbuf, + sizeof(nhbuf)); + zlog_debug("%s: nh=%s, vrf_id=%d", __func__, + nhbuf, api_nh->vrf_id); + } + nexthop = nexthop_from_ipv6(&api_nh->gate.ipv6, api_nh->vrf_id); + break; + case NEXTHOP_TYPE_IPV6_IFINDEX: + if (IS_ZEBRA_DEBUG_RECV) { + inet_ntop(AF_INET6, &api_nh->gate.ipv6, nhbuf, + sizeof(nhbuf)); + zlog_debug("%s: nh=%s, vrf_id=%d, ifindex=%d", + __func__, nhbuf, api_nh->vrf_id, + api_nh->ifindex); + } + nexthop = nexthop_from_ipv6_ifindex(&api_nh->gate.ipv6, + api_nh->ifindex, + api_nh->vrf_id); + + /* Special handling for IPv6 routes sourced from EVPN: + * the nexthop and associated MAC need to be installed. + */ + if (CHECK_FLAG(api->flags, ZEBRA_FLAG_EVPN_ROUTE)) { + memset(&vtep_ip, 0, sizeof(struct ipaddr)); + vtep_ip.ipa_type = IPADDR_V6; + memcpy(&vtep_ip.ipaddr_v6, &(api_nh->gate.ipv6), + sizeof(struct in6_addr)); + zebra_vxlan_evpn_vrf_route_add( + api_nh->vrf_id, &api_nh->rmac, + &vtep_ip, &api->prefix); + } + break; + case NEXTHOP_TYPE_BLACKHOLE: + if (IS_ZEBRA_DEBUG_RECV) + zlog_debug("%s: nh blackhole %d", + __func__, api_nh->bh_type); + + nexthop = nexthop_from_blackhole(api_nh->bh_type); + break; + } + + /* Return early if we couldn't process the zapi nexthop */ + if (nexthop == NULL) { + goto done; + } + + if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_ONLINK)) + SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK); + + if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_WEIGHT)) + nexthop->weight = api_nh->weight; + + if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP)) { + if (api_nh->backup_idx < api->backup_nexthop_num) { + /* Capture backup info */ + SET_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP); + nexthop->backup_idx = api_nh->backup_idx; + } else { + /* Warn about invalid backup index */ + if (IS_ZEBRA_DEBUG_RECV || IS_ZEBRA_DEBUG_EVENT) + zlog_debug("%s: invalid backup nh idx %d", + __func__, api_nh->backup_idx); + } + } +done: + return nexthop; +} + static void zread_route_add(ZAPI_HANDLER_ARGS) { struct stream *s; @@ -1421,12 +1546,15 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) afi_t afi; struct prefix_ipv6 *src_p = NULL; struct route_entry *re; - struct nexthop *nexthop = NULL; + struct nexthop *nexthop = NULL, *last_nh; struct nexthop_group *ng = NULL; + struct nhg_backup_info *bnhg = NULL; int i, ret; vrf_id_t vrf_id; - struct ipaddr vtep_ip; - struct interface *ifp; + struct nhg_hash_entry nhe; + enum lsp_types_t label_type; + char nhbuf[NEXTHOP_STRLEN]; + char labelbuf[MPLS_LABEL_STRLEN]; s = msg; if (zapi_route_decode(s, &api) < 0) { @@ -1440,8 +1568,8 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) char buf_prefix[PREFIX_STRLEN]; prefix2str(&api.prefix, buf_prefix, sizeof(buf_prefix)); - zlog_debug("%s: p=%s, flags=0x%x", - __func__, buf_prefix, api.flags); + zlog_debug("%s: p=%s, msg flags=0x%x, flags=0x%x", + __func__, buf_prefix, (int)api.message, api.flags); } /* Allocate new route. */ @@ -1469,6 +1597,15 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) return; } + /* Report misuse of the backup flag */ + if (CHECK_FLAG(api.message, ZAPI_MESSAGE_BACKUP_NEXTHOPS) && + api.backup_nexthop_num == 0) { + if (IS_ZEBRA_DEBUG_RECV || IS_ZEBRA_DEBUG_EVENT) + zlog_debug("%s: client %s: BACKUP flag set but no backup nexthops, prefix %pFX", + __func__, + zebra_route_string(client->proto), &api.prefix); + } + /* Use temporary list of nexthops */ ng = nexthop_group_new(); @@ -1479,130 +1616,138 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) */ for (i = 0; i < api.nexthop_num; i++) { api_nh = &api.nexthops[i]; - ifindex_t ifindex = 0; - nexthop = NULL; + /* Convert zapi nexthop */ + nexthop = nexthop_from_zapi(re, api_nh, &api); + if (!nexthop) { + flog_warn( + EC_ZEBRA_NEXTHOP_CREATION_FAILED, + "%s: Nexthops Specified: %d but we failed to properly create one", + __func__, api.nexthop_num); + nexthop_group_delete(&ng); + XFREE(MTYPE_RE, re); + return; + } - if (IS_ZEBRA_DEBUG_RECV) - zlog_debug("nh type %d", api_nh->type); + /* MPLS labels for BGP-LU or Segment Routing */ + if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_LABEL) + && api_nh->type != NEXTHOP_TYPE_IFINDEX + && api_nh->type != NEXTHOP_TYPE_BLACKHOLE + && api_nh->label_num > 0) { - switch (api_nh->type) { - case NEXTHOP_TYPE_IFINDEX: - nexthop = nexthop_from_ifindex(api_nh->ifindex, - api_nh->vrf_id); - break; - case NEXTHOP_TYPE_IPV4: - if (IS_ZEBRA_DEBUG_RECV) { - char nhbuf[INET6_ADDRSTRLEN] = {0}; + label_type = lsp_type_from_re_type(client->proto); + nexthop_add_labels(nexthop, label_type, + api_nh->label_num, + &api_nh->labels[0]); + } - inet_ntop(AF_INET, &api_nh->gate.ipv4, nhbuf, - INET6_ADDRSTRLEN); - zlog_debug("%s: nh=%s, vrf_id=%d", __func__, - nhbuf, api_nh->vrf_id); - } - nexthop = nexthop_from_ipv4(&api_nh->gate.ipv4, - NULL, api_nh->vrf_id); - break; - case NEXTHOP_TYPE_IPV4_IFINDEX: + if (IS_ZEBRA_DEBUG_RECV) { + labelbuf[0] = '\0'; + nhbuf[0] = '\0'; - memset(&vtep_ip, 0, sizeof(struct ipaddr)); - ifindex = api_nh->ifindex; - if (IS_ZEBRA_DEBUG_RECV) { - char nhbuf[INET6_ADDRSTRLEN] = {0}; + nexthop2str(nexthop, nhbuf, sizeof(nhbuf)); - inet_ntop(AF_INET, &api_nh->gate.ipv4, nhbuf, - INET6_ADDRSTRLEN); - zlog_debug( - "%s: nh=%s, vrf_id=%d (re->vrf_id=%d), ifindex=%d", - __func__, nhbuf, api_nh->vrf_id, - re->vrf_id, ifindex); - } - nexthop = nexthop_from_ipv4_ifindex( - &api_nh->gate.ipv4, NULL, ifindex, - api_nh->vrf_id); - - ifp = if_lookup_by_index(ifindex, api_nh->vrf_id); - if (ifp && connected_is_unnumbered(ifp)) - SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK); - /* Special handling for IPv4 routes sourced from EVPN: - * the nexthop and associated MAC need to be installed. - */ - if (CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) { - vtep_ip.ipa_type = IPADDR_V4; - memcpy(&(vtep_ip.ipaddr_v4), - &(api_nh->gate.ipv4), - sizeof(struct in_addr)); - zebra_vxlan_evpn_vrf_route_add( - api_nh->vrf_id, &api_nh->rmac, - &vtep_ip, &api.prefix); - } - break; - case NEXTHOP_TYPE_IPV6: - nexthop = nexthop_from_ipv6(&api_nh->gate.ipv6, - api_nh->vrf_id); - break; - case NEXTHOP_TYPE_IPV6_IFINDEX: - memset(&vtep_ip, 0, sizeof(struct ipaddr)); - ifindex = api_nh->ifindex; - nexthop = nexthop_from_ipv6_ifindex(&api_nh->gate.ipv6, - ifindex, - api_nh->vrf_id); - - /* Special handling for IPv6 routes sourced from EVPN: - * the nexthop and associated MAC need to be installed. - */ - if (CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) { - vtep_ip.ipa_type = IPADDR_V6; - memcpy(&vtep_ip.ipaddr_v6, &(api_nh->gate.ipv6), - sizeof(struct in6_addr)); - zebra_vxlan_evpn_vrf_route_add( - api_nh->vrf_id, &api_nh->rmac, - &vtep_ip, &api.prefix); + if (nexthop->nh_label && + nexthop->nh_label->num_labels > 0) { + mpls_label2str(nexthop->nh_label->num_labels, + nexthop->nh_label->label, + labelbuf, sizeof(labelbuf), + false); } - break; - case NEXTHOP_TYPE_BLACKHOLE: - nexthop = nexthop_from_blackhole(api_nh->bh_type); - break; + + zlog_debug("%s: nh=%s, vrf_id=%d %s", + __func__, nhbuf, api_nh->vrf_id, labelbuf); } + /* Add new nexthop to temporary list. This list is + * canonicalized - sorted - so that it can be hashed later + * in route processing. We expect that the sender has sent + * the list sorted, and the zapi client api attempts to enforce + * that, so this should be inexpensive - but it is necessary + * to support shared nexthop-groups. + */ + nexthop_group_add_sorted(ng, nexthop); + } + + /* Allocate temporary list of backup nexthops, if necessary */ + if (api.backup_nexthop_num > 0) { + if (IS_ZEBRA_DEBUG_RECV) + zlog_debug("%s: adding %d backup nexthops", + __func__, api.backup_nexthop_num); + + bnhg = zebra_nhg_backup_alloc(); + nexthop = NULL; + last_nh = NULL; + } + + /* Copy backup nexthops also, if present */ + for (i = 0; i < api.backup_nexthop_num; i++) { + api_nh = &api.backup_nexthops[i]; + + /* Convert zapi backup nexthop */ + nexthop = nexthop_from_zapi(re, api_nh, &api); if (!nexthop) { flog_warn( EC_ZEBRA_NEXTHOP_CREATION_FAILED, - "%s: Nexthops Specified: %d but we failed to properly create one", - __func__, api.nexthop_num); + "%s: Backup Nexthops Specified: %d but we failed to properly create one", + __func__, api.backup_nexthop_num); nexthop_group_delete(&ng); + zebra_nhg_backup_free(&bnhg); XFREE(MTYPE_RE, re); return; } - if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_ONLINK)) - SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK); - - if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_WEIGHT)) - nexthop->weight = api_nh->weight; + /* Backup nexthops can't have backups; that's not valid. */ + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) { + if (IS_ZEBRA_DEBUG_RECV) { + nexthop2str(nexthop, nhbuf, sizeof(nhbuf)); + zlog_debug("%s: backup nh %s with BACKUP flag!", + __func__, nhbuf); + } + UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP); + nexthop->backup_idx = 0; + } /* MPLS labels for BGP-LU or Segment Routing */ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_LABEL) && api_nh->type != NEXTHOP_TYPE_IFINDEX - && api_nh->type != NEXTHOP_TYPE_BLACKHOLE) { - enum lsp_types_t label_type; + && api_nh->type != NEXTHOP_TYPE_BLACKHOLE + && api_nh->label_num > 0) { label_type = lsp_type_from_re_type(client->proto); - - if (IS_ZEBRA_DEBUG_RECV) { - zlog_debug( - "%s: adding %d labels of type %d (1st=%u)", - __func__, api_nh->label_num, label_type, - api_nh->labels[0]); - } - nexthop_add_labels(nexthop, label_type, api_nh->label_num, &api_nh->labels[0]); } - /* Add new nexthop to temporary list */ - nexthop_group_add_sorted(ng, nexthop); + if (IS_ZEBRA_DEBUG_RECV) { + labelbuf[0] = '\0'; + nhbuf[0] = '\0'; + + nexthop2str(nexthop, nhbuf, sizeof(nhbuf)); + + if (nexthop->nh_label && + nexthop->nh_label->num_labels > 0) { + mpls_label2str(nexthop->nh_label->num_labels, + nexthop->nh_label->label, + labelbuf, sizeof(labelbuf), + false); + } + + zlog_debug("%s: backup nh=%s, vrf_id=%d %s", + __func__, nhbuf, api_nh->vrf_id, labelbuf); + } + + /* Note that the order of the backup nexthops is significant, + * so we don't sort this list as we do the primary nexthops, + * we just append. + */ + if (last_nh) + NEXTHOP_APPEND(last_nh, nexthop); + else + bnhg->nhe->nhg.nexthop = nexthop; + + last_nh = nexthop; } if (CHECK_FLAG(api.message, ZAPI_MESSAGE_DISTANCE)) @@ -1620,6 +1765,7 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) "%s: Received SRC Prefix but afi is not v6", __func__); nexthop_group_delete(&ng); + zebra_nhg_backup_free(&bnhg); XFREE(MTYPE_RE, re); return; } @@ -1631,10 +1777,28 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) "%s: Received safi: %d but we can only accept UNICAST or MULTICAST", __func__, api.safi); nexthop_group_delete(&ng); + zebra_nhg_backup_free(&bnhg); XFREE(MTYPE_RE, re); return; } - ret = rib_add_multipath(afi, api.safi, &api.prefix, src_p, re, ng); + + /* Include backup info with the route. We use a temporary nhe here; + * if this is a new/unknown nhe, a new copy will be allocated + * and stored. + */ + zebra_nhe_init(&nhe, afi, ng->nexthop); + nhe.nhg.nexthop = ng->nexthop; + nhe.backup_info = bnhg; + ret = rib_add_multipath_nhe(afi, api.safi, &api.prefix, src_p, + re, &nhe); + + /* At this point, these allocations are not needed: 're' has been + * retained or freed, and if 're' still exists, it is using + * a reference to a shared group object. + */ + nexthop_group_delete(&ng); + if (bnhg) + zebra_nhg_backup_free(&bnhg); /* Stats */ switch (api.prefix.family) { @@ -1766,9 +1930,11 @@ static void zread_hello(ZAPI_HANDLER_ARGS) unsigned short instance; uint8_t notify; uint8_t synchronous; + uint32_t session_id; STREAM_GETC(msg, proto); STREAM_GETW(msg, instance); + STREAM_GETL(msg, session_id); STREAM_GETC(msg, notify); STREAM_GETC(msg, synchronous); if (notify) @@ -1788,6 +1954,7 @@ static void zread_hello(ZAPI_HANDLER_ARGS) client->proto = proto; client->instance = instance; + client->session_id = session_id; /* Graceful restart processing for client connect */ zebra_gr_client_reconnect(client); @@ -2031,7 +2198,7 @@ static void zread_label_manager_connect(struct zserv *client, client->instance = instance; /* call hook for connection using wrapper */ - lm_client_connect_call(proto, instance, vrf_id); + lm_client_connect_call(client, vrf_id); stream_failure: return; @@ -2057,19 +2224,10 @@ static void zread_get_label_chunk(struct zserv *client, struct stream *msg, STREAM_GETL(s, size); STREAM_GETL(s, base); - /* call hook to get a chunk using wrapper */ - lm_get_chunk_call(&lmc, proto, instance, keep, size, base, vrf_id); + assert(proto == client->proto && instance == client->instance); - if (!lmc) - flog_err( - EC_ZEBRA_LM_CANNOT_ASSIGN_CHUNK, - "Unable to assign Label Chunk of size %u to %s instance %u", - size, zebra_route_string(proto), instance); - else - if (IS_ZEBRA_DEBUG_PACKET) - zlog_debug("Assigned Label Chunk %u - %u to %s instance %u", - lmc->start, lmc->end, - zebra_route_string(proto), instance); + /* call hook to get a chunk using wrapper */ + lm_get_chunk_call(&lmc, client, keep, size, base, vrf_id); stream_failure: return; @@ -2091,8 +2249,10 @@ static void zread_release_label_chunk(struct zserv *client, struct stream *msg) STREAM_GETL(s, start); STREAM_GETL(s, end); + assert(proto == client->proto && instance == client->instance); + /* call hook to release a chunk using wrapper */ - lm_release_chunk_call(proto, instance, start, end); + lm_release_chunk_call(client, start, end); stream_failure: return; diff --git a/zebra/zapi_msg.h b/zebra/zapi_msg.h index 996a255ff4..a4f5e74e4d 100644 --- a/zebra/zapi_msg.h +++ b/zebra/zapi_msg.h @@ -94,8 +94,7 @@ extern void zserv_nexthop_num_warn(const char *caller, const struct prefix *p, extern void zsend_capabilities_all_clients(void); extern int zsend_assign_label_chunk_response(struct zserv *client, - vrf_id_t vrf_id, uint8_t proto, - uint16_t instance, + vrf_id_t vrf_id, struct label_manager_chunk *lmc); extern int zsend_label_manager_connect_response(struct zserv *client, vrf_id_t vrf_id, diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c index 459d2bc620..143354b166 100644 --- a/zebra/zebra_dplane.c +++ b/zebra/zebra_dplane.c @@ -32,6 +32,7 @@ #include "zebra/zebra_memory.h" #include "zebra/zebra_router.h" #include "zebra/zebra_dplane.h" +#include "zebra/zebra_vxlan_private.h" #include "zebra/rt.h" #include "zebra/debug.h" @@ -113,10 +114,15 @@ struct dplane_route_info { struct dplane_nexthop_info nhe; /* Nexthops */ + uint32_t zd_nhg_id; struct nexthop_group zd_ng; + /* Backup nexthops (if present) */ + struct nexthop_group backup_ng; + /* "Previous" nexthops, used only in route updates without netlink */ struct nexthop_group zd_old_ng; + struct nexthop_group old_backup_ng; /* TODO -- use fixed array of nexthops, to avoid mallocs? */ @@ -173,7 +179,6 @@ struct dplane_mac_info { struct ethaddr mac; struct in_addr vtep_ip; bool is_sticky; - }; /* @@ -396,7 +401,7 @@ static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw, static enum zebra_dplane_result intf_addr_update_internal( const struct interface *ifp, const struct connected *ifc, enum dplane_op_e op); -static enum zebra_dplane_result mac_update_internal( +static enum zebra_dplane_result mac_update_common( enum dplane_op_e op, const struct interface *ifp, const struct interface *br_ifp, vlanid_t vid, const struct ethaddr *mac, @@ -440,23 +445,15 @@ void dplane_enable_sys_route_notifs(void) } /* - * Free a dataplane results context. + * Clean up dependent/internal allocations inside a context object */ -static void dplane_ctx_free(struct zebra_dplane_ctx **pctx) +static void dplane_ctx_free_internal(struct zebra_dplane_ctx *ctx) { - if (pctx == NULL) - return; - - DPLANE_CTX_VALID(*pctx); - - /* TODO -- just freeing memory, but would like to maintain - * a pool - */ - - /* Some internal allocations may need to be freed, depending on + /* + * Some internal allocations may need to be freed, depending on * the type of info captured in the ctx. */ - switch ((*pctx)->zd_op) { + switch (ctx->zd_op) { case DPLANE_OP_ROUTE_INSTALL: case DPLANE_OP_ROUTE_UPDATE: case DPLANE_OP_ROUTE_DELETE: @@ -465,18 +462,33 @@ static void dplane_ctx_free(struct zebra_dplane_ctx **pctx) case DPLANE_OP_ROUTE_NOTIFY: /* Free allocated nexthops */ - if ((*pctx)->u.rinfo.zd_ng.nexthop) { + if (ctx->u.rinfo.zd_ng.nexthop) { /* This deals with recursive nexthops too */ - nexthops_free((*pctx)->u.rinfo.zd_ng.nexthop); + nexthops_free(ctx->u.rinfo.zd_ng.nexthop); - (*pctx)->u.rinfo.zd_ng.nexthop = NULL; + ctx->u.rinfo.zd_ng.nexthop = NULL; } - if ((*pctx)->u.rinfo.zd_old_ng.nexthop) { + /* Free backup info also (if present) */ + if (ctx->u.rinfo.backup_ng.nexthop) { /* This deals with recursive nexthops too */ - nexthops_free((*pctx)->u.rinfo.zd_old_ng.nexthop); + nexthops_free(ctx->u.rinfo.backup_ng.nexthop); - (*pctx)->u.rinfo.zd_old_ng.nexthop = NULL; + ctx->u.rinfo.backup_ng.nexthop = NULL; + } + + if (ctx->u.rinfo.zd_old_ng.nexthop) { + /* This deals with recursive nexthops too */ + nexthops_free(ctx->u.rinfo.zd_old_ng.nexthop); + + ctx->u.rinfo.zd_old_ng.nexthop = NULL; + } + + if (ctx->u.rinfo.old_backup_ng.nexthop) { + /* This deals with recursive nexthops too */ + nexthops_free(ctx->u.rinfo.old_backup_ng.nexthop); + + ctx->u.rinfo.old_backup_ng.nexthop = NULL; } break; @@ -484,11 +496,11 @@ static void dplane_ctx_free(struct zebra_dplane_ctx **pctx) case DPLANE_OP_NH_INSTALL: case DPLANE_OP_NH_UPDATE: case DPLANE_OP_NH_DELETE: { - if ((*pctx)->u.rinfo.nhe.ng.nexthop) { + if (ctx->u.rinfo.nhe.ng.nexthop) { /* This deals with recursive nexthops too */ - nexthops_free((*pctx)->u.rinfo.nhe.ng.nexthop); + nexthops_free(ctx->u.rinfo.nhe.ng.nexthop); - (*pctx)->u.rinfo.nhe.ng.nexthop = NULL; + ctx->u.rinfo.nhe.ng.nexthop = NULL; } break; } @@ -501,7 +513,7 @@ static void dplane_ctx_free(struct zebra_dplane_ctx **pctx) zebra_nhlfe_t *nhlfe, *next; /* Free allocated NHLFEs */ - for (nhlfe = (*pctx)->u.lsp.nhlfe_list; nhlfe; nhlfe = next) { + for (nhlfe = ctx->u.lsp.nhlfe_list; nhlfe; nhlfe = next) { next = nhlfe->next; zebra_mpls_nhlfe_del(nhlfe); @@ -510,8 +522,8 @@ static void dplane_ctx_free(struct zebra_dplane_ctx **pctx) /* Clear pointers in lsp struct, in case we're cacheing * free context structs. */ - (*pctx)->u.lsp.nhlfe_list = NULL; - (*pctx)->u.lsp.best_nhlfe = NULL; + ctx->u.lsp.nhlfe_list = NULL; + ctx->u.lsp.best_nhlfe = NULL; break; } @@ -519,21 +531,21 @@ static void dplane_ctx_free(struct zebra_dplane_ctx **pctx) case DPLANE_OP_PW_INSTALL: case DPLANE_OP_PW_UNINSTALL: /* Free allocated nexthops */ - if ((*pctx)->u.pw.nhg.nexthop) { + if (ctx->u.pw.nhg.nexthop) { /* This deals with recursive nexthops too */ - nexthops_free((*pctx)->u.pw.nhg.nexthop); + nexthops_free(ctx->u.pw.nhg.nexthop); - (*pctx)->u.pw.nhg.nexthop = NULL; + ctx->u.pw.nhg.nexthop = NULL; } break; case DPLANE_OP_ADDR_INSTALL: case DPLANE_OP_ADDR_UNINSTALL: /* Maybe free label string, if allocated */ - if ((*pctx)->u.intf.label != NULL && - (*pctx)->u.intf.label != (*pctx)->u.intf.label_buf) { - free((*pctx)->u.intf.label); - (*pctx)->u.intf.label = NULL; + if (ctx->u.intf.label != NULL && + ctx->u.intf.label != ctx->u.intf.label_buf) { + free(ctx->u.intf.label); + ctx->u.intf.label = NULL; } break; @@ -547,11 +559,41 @@ static void dplane_ctx_free(struct zebra_dplane_ctx **pctx) case DPLANE_OP_NONE: break; } +} + +/* + * Free a dataplane results context. + */ +static void dplane_ctx_free(struct zebra_dplane_ctx **pctx) +{ + if (pctx == NULL) + return; + + DPLANE_CTX_VALID(*pctx); + + /* TODO -- just freeing memory, but would like to maintain + * a pool + */ + + /* Some internal allocations may need to be freed, depending on + * the type of info captured in the ctx. + */ + dplane_ctx_free_internal(*pctx); XFREE(MTYPE_DP_CTX, *pctx); } /* + * Reset an allocated context object for re-use. All internal allocations are + * freed and the context is memset. + */ +void dplane_ctx_reset(struct zebra_dplane_ctx *ctx) +{ + dplane_ctx_free_internal(ctx); + memset(ctx, 0, sizeof(*ctx)); +} + +/* * Return a context block to the dplane module after processing */ void dplane_ctx_fini(struct zebra_dplane_ctx **pctx) @@ -1038,6 +1080,12 @@ void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh) nexthop_group_copy_nh_sorted(&(ctx->u.rinfo.zd_ng), nh); } +uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx *ctx) +{ + DPLANE_CTX_VALID(ctx); + return ctx->u.rinfo.zd_nhg_id; +} + const struct nexthop_group *dplane_ctx_get_ng( const struct zebra_dplane_ctx *ctx) { @@ -1046,14 +1094,30 @@ const struct nexthop_group *dplane_ctx_get_ng( return &(ctx->u.rinfo.zd_ng); } -const struct nexthop_group *dplane_ctx_get_old_ng( - const struct zebra_dplane_ctx *ctx) +const struct nexthop_group * +dplane_ctx_get_backup_ng(const struct zebra_dplane_ctx *ctx) +{ + DPLANE_CTX_VALID(ctx); + + return &(ctx->u.rinfo.backup_ng); +} + +const struct nexthop_group * +dplane_ctx_get_old_ng(const struct zebra_dplane_ctx *ctx) { DPLANE_CTX_VALID(ctx); return &(ctx->u.rinfo.zd_old_ng); } +const struct nexthop_group * +dplane_ctx_get_old_backup_ng(const struct zebra_dplane_ctx *ctx) +{ + DPLANE_CTX_VALID(ctx); + + return &(ctx->u.rinfo.old_backup_ng); +} + const struct zebra_dplane_info *dplane_ctx_get_ns( const struct zebra_dplane_ctx *ctx) { @@ -1461,10 +1525,8 @@ static int dplane_ctx_ns_init(struct zebra_dplane_ctx *ctx, /* * Initialize a context block for a route update from zebra data structs. */ -static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, - enum dplane_op_e op, - struct route_node *rn, - struct route_entry *re) +int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op, + struct route_node *rn, struct route_entry *re) { int ret = EINVAL; const struct route_table *table = NULL; @@ -1473,6 +1535,7 @@ static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, struct zebra_ns *zns; struct zebra_vrf *zvrf; struct nexthop *nexthop; + zebra_l3vni_t *zl3vni; if (!ctx || !rn || !re) goto done; @@ -1514,11 +1577,32 @@ static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, /* Copy nexthops; recursive info is included too */ copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop), re->nhe->nhg.nexthop, NULL); + ctx->u.rinfo.zd_nhg_id = re->nhe->id; - /* Ensure that the dplane nexthops' flags are clear. */ - for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop)) + /* Copy backup nexthop info, if present */ + if (re->nhe->backup_info && re->nhe->backup_info->nhe) { + copy_nexthops(&(ctx->u.rinfo.backup_ng.nexthop), + re->nhe->backup_info->nhe->nhg.nexthop, NULL); + } + + /* + * Ensure that the dplane nexthops' flags are clear and copy + * encapsulation information. + */ + for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop)) { UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB); + /* Check for available encapsulations. */ + if (!CHECK_FLAG(re->flags, ZEBRA_FLAG_EVPN_ROUTE)) + continue; + + zl3vni = zl3vni_from_vrf(nexthop->vrf_id); + if (zl3vni && is_l3vni_oper_up(zl3vni)) { + nexthop->nh_encap_type = NET_VXLAN; + nexthop->nh_encap.vni = zl3vni->vni; + } + } + /* Don't need some info when capturing a system notification */ if (op == DPLANE_OP_SYS_ROUTE_ADD || op == DPLANE_OP_SYS_ROUTE_DELETE) { @@ -1532,9 +1616,8 @@ static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE)); #ifdef HAVE_NETLINK - if (re->nhe_id) { - struct nhg_hash_entry *nhe = - zebra_nhg_resolve(zebra_nhg_lookup_id(re->nhe_id)); + { + struct nhg_hash_entry *nhe = zebra_nhg_resolve(re->nhe); ctx->u.rinfo.nhe.id = nhe->id; /* @@ -1581,7 +1664,6 @@ static int dplane_ctx_nexthop_init(struct zebra_dplane_ctx *ctx, { struct zebra_vrf *zvrf = NULL; struct zebra_ns *zns = NULL; - int ret = EINVAL; if (!ctx || !nhe) @@ -1850,6 +1932,17 @@ dplane_route_update_internal(struct route_node *rn, */ copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop), old_re->nhe->nhg.nexthop, NULL); + + if (zebra_nhg_get_backup_nhg(old_re->nhe) != NULL) { + struct nexthop_group *nhg; + struct nexthop **nh; + + nhg = zebra_nhg_get_backup_nhg(old_re->nhe); + nh = &(ctx->u.rinfo.old_backup_ng.nexthop); + + if (nhg->nexthop) + copy_nexthops(nh, nhg->nexthop, NULL); + } #endif /* !HAVE_NETLINK */ } @@ -2412,8 +2505,8 @@ enum zebra_dplane_result dplane_mac_add(const struct interface *ifp, enum zebra_dplane_result result; /* Use common helper api */ - result = mac_update_internal(DPLANE_OP_MAC_INSTALL, ifp, bridge_ifp, - vid, mac, vtep_ip, sticky); + result = mac_update_common(DPLANE_OP_MAC_INSTALL, ifp, bridge_ifp, + vid, mac, vtep_ip, sticky); return result; } @@ -2429,41 +2522,25 @@ enum zebra_dplane_result dplane_mac_del(const struct interface *ifp, enum zebra_dplane_result result; /* Use common helper api */ - result = mac_update_internal(DPLANE_OP_MAC_DELETE, ifp, bridge_ifp, - vid, mac, vtep_ip, false); + result = mac_update_common(DPLANE_OP_MAC_DELETE, ifp, bridge_ifp, + vid, mac, vtep_ip, false); return result; } /* - * Common helper api for MAC address/vxlan updates + * Public api to init an empty context - either newly-allocated or + * reset/cleared - for a MAC update. */ -static enum zebra_dplane_result -mac_update_internal(enum dplane_op_e op, - const struct interface *ifp, - const struct interface *br_ifp, - vlanid_t vid, - const struct ethaddr *mac, - struct in_addr vtep_ip, - bool sticky) +void dplane_mac_init(struct zebra_dplane_ctx *ctx, + const struct interface *ifp, + const struct interface *br_ifp, + vlanid_t vid, + const struct ethaddr *mac, + struct in_addr vtep_ip, + bool sticky) { - enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE; - int ret; - struct zebra_dplane_ctx *ctx = NULL; struct zebra_ns *zns; - if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) { - char buf1[ETHER_ADDR_STRLEN], buf2[PREFIX_STRLEN]; - - zlog_debug("init mac ctx %s: mac %s, ifp %s, vtep %s", - dplane_op2str(op), - prefix_mac2str(mac, buf1, sizeof(buf1)), - ifp->name, - inet_ntop(AF_INET, &vtep_ip, buf2, sizeof(buf2))); - } - - ctx = dplane_ctx_alloc(); - - ctx->zd_op = op; ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS; ctx->zd_vrf_id = ifp->vrf_id; @@ -2481,6 +2558,39 @@ mac_update_internal(enum dplane_op_e op, ctx->u.macinfo.mac = *mac; ctx->u.macinfo.vid = vid; ctx->u.macinfo.is_sticky = sticky; +} + +/* + * Common helper api for MAC address/vxlan updates + */ +static enum zebra_dplane_result +mac_update_common(enum dplane_op_e op, + const struct interface *ifp, + const struct interface *br_ifp, + vlanid_t vid, + const struct ethaddr *mac, + struct in_addr vtep_ip, + bool sticky) +{ + enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE; + int ret; + struct zebra_dplane_ctx *ctx = NULL; + + if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) { + char buf1[ETHER_ADDR_STRLEN], buf2[PREFIX_STRLEN]; + + zlog_debug("init mac ctx %s: mac %s, ifp %s, vtep %s", + dplane_op2str(op), + prefix_mac2str(mac, buf1, sizeof(buf1)), + ifp->name, + inet_ntop(AF_INET, &vtep_ip, buf2, sizeof(buf2))); + } + + ctx = dplane_ctx_alloc(); + ctx->zd_op = op; + + /* Common init for the ctx */ + dplane_mac_init(ctx, ifp, br_ifp, vid, mac, vtep_ip, sticky); /* Enqueue for processing on the dplane pthread */ ret = dplane_update_enqueue(ctx); @@ -3447,12 +3557,20 @@ bool dplane_is_in_shutdown(void) */ void zebra_dplane_pre_finish(void) { + struct zebra_dplane_provider *dp; + if (IS_ZEBRA_DEBUG_DPLANE) zlog_debug("Zebra dataplane pre-fini called"); zdplane_info.dg_is_shutdown = true; - /* TODO -- Notify provider(s) of pending shutdown */ + /* Notify provider(s) of pending shutdown. */ + TAILQ_FOREACH(dp, &zdplane_info.dg_providers_q, dp_prov_link) { + if (dp->dp_fini == NULL) + continue; + + dp->dp_fini(dp, true); + } } /* @@ -3753,6 +3871,8 @@ done: */ void zebra_dplane_shutdown(void) { + struct zebra_dplane_provider *dp; + if (IS_ZEBRA_DEBUG_DPLANE) zlog_debug("Zebra dataplane shutdown called"); @@ -3771,7 +3891,13 @@ void zebra_dplane_shutdown(void) zdplane_info.dg_pthread = NULL; zdplane_info.dg_master = NULL; - /* TODO -- Notify provider(s) of final shutdown */ + /* Notify provider(s) of final shutdown. */ + TAILQ_FOREACH(dp, &zdplane_info.dg_providers_q, dp_prov_link) { + if (dp->dp_fini == NULL) + continue; + + dp->dp_fini(dp, false); + } /* TODO -- Clean-up provider objects */ diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h index c0b04e71b0..f01ca2e84c 100644 --- a/zebra/zebra_dplane.h +++ b/zebra/zebra_dplane.h @@ -180,6 +180,12 @@ TAILQ_HEAD(dplane_ctx_q, zebra_dplane_ctx); /* Allocate a context object */ struct zebra_dplane_ctx *dplane_ctx_alloc(void); +/* + * Reset an allocated context object for re-use. All internal allocations are + * freed. + */ +void dplane_ctx_reset(struct zebra_dplane_ctx *ctx); + /* Return a dataplane results context block after use; the caller's pointer will * be cleared. */ @@ -270,11 +276,19 @@ void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance); uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx); void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh); + +uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx *ctx); const struct nexthop_group *dplane_ctx_get_ng( const struct zebra_dplane_ctx *ctx); const struct nexthop_group *dplane_ctx_get_old_ng( const struct zebra_dplane_ctx *ctx); +/* Backup nexthop information (list of nexthops) if present. */ +const struct nexthop_group * +dplane_ctx_get_backup_ng(const struct zebra_dplane_ctx *ctx); +const struct nexthop_group * +dplane_ctx_get_old_backup_ng(const struct zebra_dplane_ctx *ctx); + /* Accessors for nexthop information */ uint32_t dplane_ctx_get_nhe_id(const struct zebra_dplane_ctx *ctx); afi_t dplane_ctx_get_nhe_afi(const struct zebra_dplane_ctx *ctx); @@ -430,6 +444,12 @@ enum zebra_dplane_result dplane_intf_addr_unset(const struct interface *ifp, /* * Enqueue evpn mac operations for the dataplane. */ +extern struct zebra_dplane_ctx *mac_update_internal( + enum dplane_op_e op, const struct interface *ifp, + const struct interface *br_ifp, + vlanid_t vid, const struct ethaddr *mac, + struct in_addr vtep_ip, bool sticky); + enum zebra_dplane_result dplane_mac_add(const struct interface *ifp, const struct interface *bridge_ifp, vlanid_t vid, @@ -443,6 +463,15 @@ enum zebra_dplane_result dplane_mac_del(const struct interface *ifp, const struct ethaddr *mac, struct in_addr vtep_ip); +/* Helper api to init an empty or new context for a MAC update */ +void dplane_mac_init(struct zebra_dplane_ctx *ctx, + const struct interface *ifp, + const struct interface *br_ifp, + vlanid_t vid, + const struct ethaddr *mac, + struct in_addr vtep_ip, + bool sticky); + /* * Enqueue evpn neighbor updates for the dataplane. */ @@ -466,6 +495,9 @@ enum zebra_dplane_result dplane_vtep_delete(const struct interface *ifp, const struct in_addr *ip, vni_t vni); +/* Encode route information into data plane context. */ +int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op, + struct route_node *rn, struct route_entry *re); /* Retrieve the limit on the number of pending, unprocessed updates. */ uint32_t dplane_get_in_queue_limit(void); diff --git a/zebra/zebra_fpm.c b/zebra/zebra_fpm.c index 0190ee2b8d..41d73f3c97 100644 --- a/zebra/zebra_fpm.c +++ b/zebra/zebra_fpm.c @@ -1470,8 +1470,6 @@ static int zfpm_trigger_update(struct route_node *rn, const char *reason) /* * Generate Key for FPM MAC info hash entry - * Key is generated using MAC address and VNI id which should be sufficient - * to provide uniqueness */ static unsigned int zfpm_mac_info_hash_keymake(const void *p) { @@ -1494,8 +1492,6 @@ static bool zfpm_mac_info_cmp(const void *p1, const void *p2) if (memcmp(fpm_mac1->macaddr.octet, fpm_mac2->macaddr.octet, ETH_ALEN) != 0) return false; - if (fpm_mac1->r_vtep_ip.s_addr != fpm_mac2->r_vtep_ip.s_addr) - return false; if (fpm_mac1->vni != fpm_mac2->vni) return false; @@ -1521,7 +1517,6 @@ static void *zfpm_mac_info_alloc(void *p) fpm_mac = XCALLOC(MTYPE_FPM_MAC_INFO, sizeof(struct fpm_mac_info_t)); memcpy(&fpm_mac->macaddr, &key->macaddr, ETH_ALEN); - memcpy(&fpm_mac->r_vtep_ip, &key->r_vtep_ip, sizeof(struct in_addr)); fpm_mac->vni = key->vni; return (void *)fpm_mac; @@ -1552,6 +1547,7 @@ static int zfpm_trigger_rmac_update(zebra_mac_t *rmac, zebra_l3vni_t *zl3vni, char buf[ETHER_ADDR_STRLEN]; struct fpm_mac_info_t *fpm_mac, key; struct interface *vxlan_if, *svi_if; + bool mac_found = false; /* * Ignore if the connection is down. We will update the FPM about @@ -1572,56 +1568,34 @@ static int zfpm_trigger_rmac_update(zebra_mac_t *rmac, zebra_l3vni_t *zl3vni, memset(&key, 0, sizeof(struct fpm_mac_info_t)); memcpy(&key.macaddr, &rmac->macaddr, ETH_ALEN); - key.r_vtep_ip.s_addr = rmac->fwd_info.r_vtep_ip.s_addr; key.vni = zl3vni->vni; /* Check if this MAC is already present in the queue. */ fpm_mac = zfpm_mac_info_lookup(&key); if (fpm_mac) { - if (!!CHECK_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM) - == delete) { - /* - * MAC is already present in the queue - * with the same op as this one. Do nothing - */ - zfpm_g->stats.redundant_triggers++; - return 0; - } + mac_found = true; /* - * A new op for an already existing fpm_mac_info_t node. - * Update the existing node for the new op. + * If the enqueued op is "add" and current op is "delete", + * this is a noop. So, Unset ZEBRA_MAC_UPDATE_FPM flag. + * While processing FPM queue, we will silently delete this + * MAC entry without sending any update for this MAC. */ - if (!delete) { - /* - * New op is "add". Previous op is "delete". - * Update the fpm_mac_info_t for the new add. - */ - fpm_mac->zebra_flags = rmac->flags; - - fpm_mac->vxlan_if = vxlan_if ? vxlan_if->ifindex : 0; - fpm_mac->svi_if = svi_if ? svi_if->ifindex : 0; - - UNSET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM); - SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM); - } else { - /* - * New op is "delete". Previous op is "add". - * Thus, no-op. Unset ZEBRA_MAC_UPDATE_FPM flag. - */ + if (!CHECK_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM) && + delete == 1) { SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM); UNSET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM); + return 0; } - - return 0; + } else { + fpm_mac = hash_get(zfpm_g->fpm_mac_info_table, &key, + zfpm_mac_info_alloc); + if (!fpm_mac) + return 0; } - fpm_mac = hash_get(zfpm_g->fpm_mac_info_table, &key, - zfpm_mac_info_alloc); - if (!fpm_mac) - return 0; - + fpm_mac->r_vtep_ip.s_addr = rmac->fwd_info.r_vtep_ip.s_addr; fpm_mac->zebra_flags = rmac->flags; fpm_mac->vxlan_if = vxlan_if ? vxlan_if->ifindex : 0; fpm_mac->svi_if = svi_if ? svi_if->ifindex : 0; @@ -1629,8 +1603,11 @@ static int zfpm_trigger_rmac_update(zebra_mac_t *rmac, zebra_l3vni_t *zl3vni, SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM); if (delete) SET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM); + else + UNSET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_DELETE_FPM); - TAILQ_INSERT_TAIL(&zfpm_g->mac_q, fpm_mac, fpm_mac_q_entries); + if (!mac_found) + TAILQ_INSERT_TAIL(&zfpm_g->mac_q, fpm_mac, fpm_mac_q_entries); zfpm_g->stats.updates_triggered++; @@ -1961,8 +1938,15 @@ static int fpm_remote_srv_write(struct vty *vty) } +static int fpm_remote_srv_write(struct vty *vty); /* Zebra node */ -static struct cmd_node zebra_node = {ZEBRA_NODE, "", 1}; +static struct cmd_node zebra_node = { + .name = "zebra", + .node = ZEBRA_NODE, + .parent_node = CONFIG_NODE, + .prompt = "", + .config_write = fpm_remote_srv_write, +}; /** @@ -1999,7 +1983,7 @@ static int zfpm_init(struct thread_master *master) zfpm_stats_init(&zfpm_g->last_ivl_stats); zfpm_stats_init(&zfpm_g->cumulative_stats); - install_node(&zebra_node, fpm_remote_srv_write); + install_node(&zebra_node); install_element(ENABLE_NODE, &show_zebra_fpm_stats_cmd); install_element(ENABLE_NODE, &clear_zebra_fpm_stats_cmd); install_element(CONFIG_NODE, &fpm_remote_ip_cmd); diff --git a/zebra/zebra_fpm_protobuf.c b/zebra/zebra_fpm_protobuf.c index ade4b636d6..4b31cc0281 100644 --- a/zebra/zebra_fpm_protobuf.c +++ b/zebra/zebra_fpm_protobuf.c @@ -294,7 +294,7 @@ int zfpm_protobuf_encode_route(rib_dest_t *dest, struct route_entry *re, return 0; } - len = fpm__message__pack(msg, (uint8_t *)in_buf); + len = fpm__message__pack(msg, in_buf); assert(len <= in_buf_len); QPB_RESET_STACK_ALLOCATOR(allocator); diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c index d373fdf370..999e91486d 100644 --- a/zebra/zebra_mpls.c +++ b/zebra/zebra_mpls.c @@ -98,14 +98,14 @@ static void lsp_free(struct hash *lsp_table, zebra_lsp_t **plsp); static char *nhlfe2str(zebra_nhlfe_t *nhlfe, char *buf, int size); static int nhlfe_nhop_match(zebra_nhlfe_t *nhlfe, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex); + const union g_addr *gate, ifindex_t ifindex); static zebra_nhlfe_t *nhlfe_find(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, - enum nexthop_types_t gtype, union g_addr *gate, - ifindex_t ifindex); + enum nexthop_types_t gtype, + const union g_addr *gate, ifindex_t ifindex); static zebra_nhlfe_t *nhlfe_add(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, - enum nexthop_types_t gtype, union g_addr *gate, - ifindex_t ifindex, uint8_t num_labels, - mpls_label_t *labels); + enum nexthop_types_t gtype, + const union g_addr *gate, ifindex_t ifindex, + uint8_t num_labels, mpls_label_t *labels); static int nhlfe_del(zebra_nhlfe_t *snhlfe); static void nhlfe_out_label_update(zebra_nhlfe_t *nhlfe, struct mpls_label_stack *nh_label); @@ -117,13 +117,13 @@ static void nhlfe_print(zebra_nhlfe_t *nhlfe, struct vty *vty); static void lsp_print(zebra_lsp_t *lsp, void *ctxt); static void *slsp_alloc(void *p); static int snhlfe_match(zebra_snhlfe_t *snhlfe, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex); + const union g_addr *gate, ifindex_t ifindex); static zebra_snhlfe_t *snhlfe_find(zebra_slsp_t *slsp, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex); + const union g_addr *gate, ifindex_t ifindex); static zebra_snhlfe_t *snhlfe_add(zebra_slsp_t *slsp, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex, + const union g_addr *gate, ifindex_t ifindex, mpls_label_t out_label); static int snhlfe_del(zebra_snhlfe_t *snhlfe); static int snhlfe_del_all(zebra_slsp_t *slsp); @@ -960,7 +960,7 @@ static wq_item_status lsp_process(struct work_queue *wq, void *data) UNSET_FLAG(lsp->flags, LSP_FLAG_CHANGED); /* We leave the INSTALLED flag set here - * so we know an update in in-flight. + * so we know an update is in-flight. */ /* @@ -1149,7 +1149,7 @@ static char *nhlfe2str(zebra_nhlfe_t *nhlfe, char *buf, int size) * Check if NHLFE matches with search info passed. */ static int nhlfe_nhop_match(zebra_nhlfe_t *nhlfe, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex) + const union g_addr *gate, ifindex_t ifindex) { struct nexthop *nhop; int cmp = 1; @@ -1191,8 +1191,8 @@ static int nhlfe_nhop_match(zebra_nhlfe_t *nhlfe, enum nexthop_types_t gtype, * Locate NHLFE that matches with passed info. */ static zebra_nhlfe_t *nhlfe_find(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, - enum nexthop_types_t gtype, union g_addr *gate, - ifindex_t ifindex) + enum nexthop_types_t gtype, + const union g_addr *gate, ifindex_t ifindex) { zebra_nhlfe_t *nhlfe; @@ -1214,9 +1214,9 @@ static zebra_nhlfe_t *nhlfe_find(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, * check done. */ static zebra_nhlfe_t *nhlfe_add(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, - enum nexthop_types_t gtype, union g_addr *gate, - ifindex_t ifindex, uint8_t num_labels, - mpls_label_t labels[]) + enum nexthop_types_t gtype, + const union g_addr *gate, ifindex_t ifindex, + uint8_t num_labels, mpls_label_t labels[]) { zebra_nhlfe_t *nhlfe; struct nexthop *nexthop; @@ -1520,7 +1520,7 @@ static struct list *hash_get_sorted_list(struct hash *hash, void *cmp) /* * Compare two LSPs based on their label values. */ -static int lsp_cmp(zebra_lsp_t *lsp1, zebra_lsp_t *lsp2) +static int lsp_cmp(const zebra_lsp_t *lsp1, const zebra_lsp_t *lsp2) { if (lsp1->ile.in_label < lsp2->ile.in_label) return -1; @@ -1547,7 +1547,7 @@ static void *slsp_alloc(void *p) /* * Compare two static LSPs based on their label values. */ -static int slsp_cmp(zebra_slsp_t *slsp1, zebra_slsp_t *slsp2) +static int slsp_cmp(const zebra_slsp_t *slsp1, const zebra_slsp_t *slsp2) { if (slsp1->ile.in_label < slsp2->ile.in_label) return -1; @@ -1562,7 +1562,7 @@ static int slsp_cmp(zebra_slsp_t *slsp1, zebra_slsp_t *slsp2) * Check if static NHLFE matches with search info passed. */ static int snhlfe_match(zebra_snhlfe_t *snhlfe, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex) + const union g_addr *gate, ifindex_t ifindex) { int cmp = 1; @@ -1593,7 +1593,7 @@ static int snhlfe_match(zebra_snhlfe_t *snhlfe, enum nexthop_types_t gtype, */ static zebra_snhlfe_t *snhlfe_find(zebra_slsp_t *slsp, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex) + const union g_addr *gate, ifindex_t ifindex) { zebra_snhlfe_t *snhlfe; @@ -1615,7 +1615,7 @@ static zebra_snhlfe_t *snhlfe_find(zebra_slsp_t *slsp, */ static zebra_snhlfe_t *snhlfe_add(zebra_slsp_t *slsp, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex, + const union g_addr *gate, ifindex_t ifindex, mpls_label_t out_label) { zebra_snhlfe_t *snhlfe; @@ -2746,7 +2746,7 @@ int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type, mpls_label_t in_label, uint8_t num_out_labels, mpls_label_t out_labels[], enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex) + const union g_addr *gate, ifindex_t ifindex) { struct hash *lsp_table; zebra_ile_t tmp_ile; @@ -2759,11 +2759,12 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type, if (!lsp_table) return -1; - /* If entry is present, exit. */ + /* Find or create LSP object */ tmp_ile.in_label = in_label; lsp = hash_get(lsp_table, &tmp_ile, lsp_alloc); if (!lsp) return -1; + nhlfe = nhlfe_find(lsp, type, gtype, gate, ifindex); if (nhlfe) { struct nexthop *nh = nhlfe->nexthop; @@ -2780,8 +2781,8 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type, return 0; if (IS_ZEBRA_DEBUG_MPLS) { - char buf2[BUFSIZ]; - char buf3[BUFSIZ]; + char buf2[MPLS_LABEL_STRLEN]; + char buf3[MPLS_LABEL_STRLEN]; nhlfe2str(nhlfe, buf, BUFSIZ); mpls_label2str(num_out_labels, out_labels, buf2, @@ -2842,7 +2843,7 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type, */ int mpls_lsp_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, mpls_label_t in_label, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex) + const union g_addr *gate, ifindex_t ifindex) { struct hash *lsp_table; zebra_ile_t tmp_ile; @@ -3056,11 +3057,12 @@ int zebra_mpls_static_lsp_add(struct zebra_vrf *zvrf, mpls_label_t in_label, if (!slsp_table) return -1; - /* If entry is present, exit. */ + /* Find or create LSP. */ tmp_ile.in_label = in_label; slsp = hash_get(slsp_table, &tmp_ile, slsp_alloc); if (!slsp) return -1; + snhlfe = snhlfe_find(slsp, gtype, gate, ifindex); if (snhlfe) { if (snhlfe->out_label == out_label) diff --git a/zebra/zebra_mpls.h b/zebra/zebra_mpls.h index 2489e8e510..33cb614346 100644 --- a/zebra/zebra_mpls.h +++ b/zebra/zebra_mpls.h @@ -288,7 +288,7 @@ int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type, mpls_label_t in_label, uint8_t num_out_labels, mpls_label_t out_labels[], enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex); + const union g_addr *gate, ifindex_t ifindex); /* * Uninstall a particular NHLFE in the forwarding table. If this is @@ -296,7 +296,7 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type, */ int mpls_lsp_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, mpls_label_t in_label, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex); + const union g_addr *gate, ifindex_t ifindex); /* * Uninstall all NHLFEs for a particular LSP forwarding entry. diff --git a/zebra/zebra_mpls_vty.c b/zebra/zebra_mpls_vty.c index 796aa3f666..d789f20071 100644 --- a/zebra/zebra_mpls_vty.c +++ b/zebra/zebra_mpls_vty.c @@ -449,15 +449,21 @@ DEFUN (no_mpls_label_global_block, return zebra_mpls_global_block(vty, 0, NULL, NULL); } +static int zebra_mpls_config(struct vty *vty); /* MPLS node for MPLS LSP. */ -static struct cmd_node mpls_node = {MPLS_NODE, "", 1}; +static struct cmd_node mpls_node = { + .name = "mpls", + .node = MPLS_NODE, + .prompt = "", + .config_write = zebra_mpls_config, +}; /* MPLS VTY. */ void zebra_mpls_vty_init(void) { install_element(VIEW_NODE, &show_mpls_status_cmd); - install_node(&mpls_node, zebra_mpls_config); + install_node(&mpls_node); install_element(CONFIG_NODE, &mpls_transit_lsp_cmd); install_element(CONFIG_NODE, &no_mpls_transit_lsp_cmd); diff --git a/zebra/zebra_nb.c b/zebra/zebra_nb.c new file mode 100644 index 0000000000..1f3468d6dc --- /dev/null +++ b/zebra/zebra_nb.c @@ -0,0 +1,692 @@ +/* + * Copyright (C) 2020 Cumulus Networks, Inc. + * Chirag Shah + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> +#include "northbound.h" +#include "libfrr.h" +#include "zebra_nb.h" + +/* clang-format off */ +const struct frr_yang_module_info frr_zebra_info = { + .name = "frr-zebra", + .nodes = { + { + .xpath = "/frr-zebra:zebra/mcast-rpf-lookup", + .cbs = { + .modify = zebra_mcast_rpf_lookup_modify, + } + }, + { + .xpath = "/frr-zebra:zebra/ip-forwarding", + .cbs = { + .modify = zebra_ip_forwarding_modify, + .destroy = zebra_ip_forwarding_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/ipv6-forwarding", + .cbs = { + .modify = zebra_ipv6_forwarding_modify, + .destroy = zebra_ipv6_forwarding_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/workqueue-hold-timer", + .cbs = { + .modify = zebra_workqueue_hold_timer_modify, + } + }, + { + .xpath = "/frr-zebra:zebra/zapi-packets", + .cbs = { + .modify = zebra_zapi_packets_modify, + } + }, + { + .xpath = "/frr-zebra:zebra/import-kernel-table/table-id", + .cbs = { + .modify = zebra_import_kernel_table_table_id_modify, + .destroy = zebra_import_kernel_table_table_id_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/import-kernel-table/distance", + .cbs = { + .modify = zebra_import_kernel_table_distance_modify, + } + }, + { + .xpath = "/frr-zebra:zebra/import-kernel-table/route-map", + .cbs = { + .modify = zebra_import_kernel_table_route_map_modify, + .destroy = zebra_import_kernel_table_route_map_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/allow-external-route-update", + .cbs = { + .create = zebra_allow_external_route_update_create, + .destroy = zebra_allow_external_route_update_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/dplane-queue-limit", + .cbs = { + .modify = zebra_dplane_queue_limit_modify, + } + }, + { + .xpath = "/frr-zebra:zebra/vrf-vni-mapping", + .cbs = { + .create = zebra_vrf_vni_mapping_create, + .destroy = zebra_vrf_vni_mapping_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/vrf-vni-mapping/vni-id", + .cbs = { + .modify = zebra_vrf_vni_mapping_vni_id_modify, + .destroy = zebra_vrf_vni_mapping_vni_id_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/vrf-vni-mapping/prefix-only", + .cbs = { + .create = zebra_vrf_vni_mapping_prefix_only_create, + .destroy = zebra_vrf_vni_mapping_prefix_only_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-events", + .cbs = { + .modify = zebra_debugs_debug_events_modify, + .destroy = zebra_debugs_debug_events_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-zapi-send", + .cbs = { + .modify = zebra_debugs_debug_zapi_send_modify, + .destroy = zebra_debugs_debug_zapi_send_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-zapi-recv", + .cbs = { + .modify = zebra_debugs_debug_zapi_recv_modify, + .destroy = zebra_debugs_debug_zapi_recv_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-zapi-detail", + .cbs = { + .modify = zebra_debugs_debug_zapi_detail_modify, + .destroy = zebra_debugs_debug_zapi_detail_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-kernel", + .cbs = { + .modify = zebra_debugs_debug_kernel_modify, + .destroy = zebra_debugs_debug_kernel_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-kernel-msg-send", + .cbs = { + .modify = zebra_debugs_debug_kernel_msg_send_modify, + .destroy = zebra_debugs_debug_kernel_msg_send_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-kernel-msg-recv", + .cbs = { + .modify = zebra_debugs_debug_kernel_msg_recv_modify, + .destroy = zebra_debugs_debug_kernel_msg_recv_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-rib", + .cbs = { + .modify = zebra_debugs_debug_rib_modify, + .destroy = zebra_debugs_debug_rib_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-rib-detail", + .cbs = { + .modify = zebra_debugs_debug_rib_detail_modify, + .destroy = zebra_debugs_debug_rib_detail_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-fpm", + .cbs = { + .modify = zebra_debugs_debug_fpm_modify, + .destroy = zebra_debugs_debug_fpm_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-nht", + .cbs = { + .modify = zebra_debugs_debug_nht_modify, + .destroy = zebra_debugs_debug_nht_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-nht-detail", + .cbs = { + .modify = zebra_debugs_debug_nht_detail_modify, + .destroy = zebra_debugs_debug_nht_detail_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-mpls", + .cbs = { + .modify = zebra_debugs_debug_mpls_modify, + .destroy = zebra_debugs_debug_mpls_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-vxlan", + .cbs = { + .modify = zebra_debugs_debug_vxlan_modify, + .destroy = zebra_debugs_debug_vxlan_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-pw", + .cbs = { + .modify = zebra_debugs_debug_pw_modify, + .destroy = zebra_debugs_debug_pw_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-dplane", + .cbs = { + .modify = zebra_debugs_debug_dplane_modify, + .destroy = zebra_debugs_debug_dplane_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-dplane-detail", + .cbs = { + .modify = zebra_debugs_debug_dplane_detail_modify, + .destroy = zebra_debugs_debug_dplane_detail_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-mlag", + .cbs = { + .modify = zebra_debugs_debug_mlag_modify, + .destroy = zebra_debugs_debug_mlag_destroy, + } + }, + { + .xpath = "/frr-zebra:get-route-information", + .cbs = { + .rpc = get_route_information_rpc, + } + }, + { + .xpath = "/frr-zebra:get-v6-mroute-info", + .cbs = { + .rpc = get_v6_mroute_info_rpc, + } + }, + { + .xpath = "/frr-zebra:get-vrf-info", + .cbs = { + .rpc = get_vrf_info_rpc, + } + }, + { + .xpath = "/frr-zebra:get-vrf-vni-info", + .cbs = { + .rpc = get_vrf_vni_info_rpc, + } + }, + { + .xpath = "/frr-zebra:get-evpn-info", + .cbs = { + .rpc = get_evpn_info_rpc, + } + }, + { + .xpath = "/frr-zebra:get-vni-info", + .cbs = { + .rpc = get_vni_info_rpc, + } + }, + { + .xpath = "/frr-zebra:get-evpn-vni-rmac", + .cbs = { + .rpc = get_evpn_vni_rmac_rpc, + } + }, + { + .xpath = "/frr-zebra:get-evpn-vni-nexthops", + .cbs = { + .rpc = get_evpn_vni_nexthops_rpc, + } + }, + { + .xpath = "/frr-zebra:clear-evpn-dup-addr", + .cbs = { + .rpc = clear_evpn_dup_addr_rpc, + } + }, + { + .xpath = "/frr-zebra:get-evpn-macs", + .cbs = { + .rpc = get_evpn_macs_rpc, + } + }, + { + .xpath = "/frr-zebra:get-evpn-arp-cache", + .cbs = { + .rpc = get_evpn_arp_cache_rpc, + } + }, + { + .xpath = "/frr-zebra:get-pbr-ipset", + .cbs = { + .rpc = get_pbr_ipset_rpc, + } + }, + { + .xpath = "/frr-zebra:get-pbr-iptable", + .cbs = { + .rpc = get_pbr_iptable_rpc, + } + }, + { + .xpath = "/frr-zebra:get-debugs", + .cbs = { + .rpc = get_debugs_rpc, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/ip-addrs", + .cbs = { + .create = lib_interface_zebra_ip_addrs_create, + .destroy = lib_interface_zebra_ip_addrs_destroy, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/ip-addrs/label", + .cbs = { + .modify = lib_interface_zebra_ip_addrs_label_modify, + .destroy = lib_interface_zebra_ip_addrs_label_destroy, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/ip-addrs/ip4-peer", + .cbs = { + .modify = lib_interface_zebra_ip_addrs_ip4_peer_modify, + .destroy = lib_interface_zebra_ip_addrs_ip4_peer_destroy, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/multicast", + .cbs = { + .modify = lib_interface_zebra_multicast_modify, + .destroy = lib_interface_zebra_multicast_destroy, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/link-detect", + .cbs = { + .modify = lib_interface_zebra_link_detect_modify, + .destroy = lib_interface_zebra_link_detect_destroy, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/shutdown", + .cbs = { + .modify = lib_interface_zebra_shutdown_modify, + .destroy = lib_interface_zebra_shutdown_destroy, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/bandwidth", + .cbs = { + .modify = lib_interface_zebra_bandwidth_modify, + .destroy = lib_interface_zebra_bandwidth_destroy, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/state/up-count", + .cbs = { + .get_elem = lib_interface_zebra_state_up_count_get_elem, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/state/down-count", + .cbs = { + .get_elem = lib_interface_zebra_state_down_count_get_elem, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/state/zif-type", + .cbs = { + .get_elem = lib_interface_zebra_state_zif_type_get_elem, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/state/ptm-status", + .cbs = { + .get_elem = lib_interface_zebra_state_ptm_status_get_elem, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/state/vlan-id", + .cbs = { + .get_elem = lib_interface_zebra_state_vlan_id_get_elem, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/state/vni-id", + .cbs = { + .get_elem = lib_interface_zebra_state_vni_id_get_elem, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/state/remote-vtep", + .cbs = { + .get_elem = lib_interface_zebra_state_remote_vtep_get_elem, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/state/mcast-group", + .cbs = { + .get_elem = lib_interface_zebra_state_mcast_group_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib", + .cbs = { + .create = lib_vrf_ribs_rib_create, + .destroy = lib_vrf_ribs_rib_destroy, + .get_next = lib_vrf_ribs_rib_get_next, + .get_keys = lib_vrf_ribs_rib_get_keys, + .lookup_entry = lib_vrf_ribs_rib_lookup_entry, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route", + .cbs = { + .get_next = lib_vrf_ribs_rib_route_get_next, + .get_keys = lib_vrf_ribs_rib_route_get_keys, + .lookup_entry = lib_vrf_ribs_rib_route_lookup_entry, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/prefix", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_prefix_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry", + .cbs = { + .get_next = lib_vrf_ribs_rib_route_route_entry_get_next, + .get_keys = lib_vrf_ribs_rib_route_route_entry_get_keys, + .lookup_entry = lib_vrf_ribs_rib_route_route_entry_lookup_entry, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/protocol", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_protocol_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/instance", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_instance_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/distance", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_distance_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/metric", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_metric_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/tag", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_tag_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/selected", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_selected_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/installed", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_installed_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/failed", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_failed_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/queued", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_queued_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/internal-flags", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_internal_flags_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/internal-status", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_internal_status_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/uptime", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_uptime_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group", + .cbs = { + .get_next = lib_vrf_ribs_rib_route_route_entry_nexthop_group_get_next, + .get_keys = lib_vrf_ribs_rib_route_route_entry_nexthop_group_get_keys, + .lookup_entry = lib_vrf_ribs_rib_route_route_entry_nexthop_group_lookup_entry, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/name", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_name_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop", + .cbs = { + .get_next = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_next, + .get_keys = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_keys, + .lookup_entry = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_lookup_entry, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/nh-type", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_nh_type_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/vrf", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_vrf_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/gateway", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_gateway_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/interface", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_interface_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/bh-type", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_bh_type_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/onlink", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_onlink_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry", + .cbs = { + .get_next = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_next, + .get_keys = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_keys, + .lookup_entry = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_lookup_entry, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/id", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_id_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/label", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_label_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/ttl", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/traffic-class", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/duplicate", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_duplicate_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/recursive", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_recursive_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/active", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_active_get_elem, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/fib", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_fib_get_elem, + } + }, + { + .xpath = "/frr-route-map:lib/route-map/entry/match-condition/frr-zebra:ipv4-prefix-length", + .cbs = { + .modify = lib_route_map_entry_match_condition_ipv4_prefix_length_modify, + .destroy = lib_route_map_entry_match_condition_ipv4_prefix_length_destroy, + } + }, + { + .xpath = "/frr-route-map:lib/route-map/entry/match-condition/frr-zebra:ipv6-prefix-length", + .cbs = { + .modify = lib_route_map_entry_match_condition_ipv6_prefix_length_modify, + .destroy = lib_route_map_entry_match_condition_ipv6_prefix_length_destroy, + } + }, + { + .xpath = "/frr-route-map:lib/route-map/entry/match-condition/frr-zebra:source-protocol", + .cbs = { + .modify = lib_route_map_entry_match_condition_source_protocol_modify, + .destroy = lib_route_map_entry_match_condition_source_protocol_destroy, + } + }, + { + .xpath = "/frr-route-map:lib/route-map/entry/match-condition/frr-zebra:source-instance", + .cbs = { + .modify = lib_route_map_entry_match_condition_source_instance_modify, + .destroy = lib_route_map_entry_match_condition_source_instance_destroy, + } + }, + { + .xpath = "/frr-route-map:lib/route-map/entry/set-action/frr-zebra:source-v4", + .cbs = { + .modify = lib_route_map_entry_set_action_source_v4_modify, + .destroy = lib_route_map_entry_set_action_source_v4_destroy, + } + }, + { + .xpath = "/frr-route-map:lib/route-map/entry/set-action/frr-zebra:source-v6", + .cbs = { + .modify = lib_route_map_entry_set_action_source_v6_modify, + .destroy = lib_route_map_entry_set_action_source_v6_destroy, + } + }, + { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/weight", + .cbs = { + .get_elem = lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_weight_get_elem, + } + }, + { + .xpath = NULL, + }, + } +}; diff --git a/zebra/zebra_nb.h b/zebra/zebra_nb.h new file mode 100644 index 0000000000..01a44e5525 --- /dev/null +++ b/zebra/zebra_nb.h @@ -0,0 +1,488 @@ +/* + * Copyright (C) 2020 Cumulus Networks, Inc. + * Chirag Shah + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef ZEBRA_ZEBRA_NB_H_ +#define ZEBRA_ZEBRA_NB_H_ + +extern const struct frr_yang_module_info frr_zebra_info; + +/* prototypes */ +int get_route_information_rpc(const char *xpath, const struct list *input, + struct list *output); +int get_v6_mroute_info_rpc(const char *xpath, const struct list *input, + struct list *output); +int get_vrf_info_rpc(const char *xpath, const struct list *input, + struct list *output); +int get_vrf_vni_info_rpc(const char *xpath, const struct list *input, + struct list *output); +int get_evpn_info_rpc(const char *xpath, const struct list *input, + struct list *output); +int get_vni_info_rpc(const char *xpath, const struct list *input, + struct list *output); +int get_evpn_vni_rmac_rpc(const char *xpath, const struct list *input, + struct list *output); +int get_evpn_vni_nexthops_rpc(const char *xpath, const struct list *input, + struct list *output); +int clear_evpn_dup_addr_rpc(const char *xpath, const struct list *input, + struct list *output); +int get_evpn_macs_rpc(const char *xpath, const struct list *input, + struct list *output); +int get_evpn_arp_cache_rpc(const char *xpath, const struct list *input, + struct list *output); +int get_pbr_ipset_rpc(const char *xpath, const struct list *input, + struct list *output); +int get_pbr_iptable_rpc(const char *xpath, const struct list *input, + struct list *output); +int get_debugs_rpc(const char *xpath, const struct list *input, + struct list *output); +int zebra_mcast_rpf_lookup_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_ip_forwarding_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_ip_forwarding_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_ipv6_forwarding_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_ipv6_forwarding_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_workqueue_hold_timer_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_zapi_packets_modify(enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_import_kernel_table_table_id_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_import_kernel_table_table_id_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_import_kernel_table_distance_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_import_kernel_table_route_map_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_import_kernel_table_route_map_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_allow_external_route_update_create(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_allow_external_route_update_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_dplane_queue_limit_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_vrf_vni_mapping_create(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_vrf_vni_mapping_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_vrf_vni_mapping_vni_id_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_vrf_vni_mapping_vni_id_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_vrf_vni_mapping_prefix_only_create(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_vrf_vni_mapping_prefix_only_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_events_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_events_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_zapi_send_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_zapi_send_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_zapi_recv_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_zapi_recv_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_zapi_detail_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_zapi_detail_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_kernel_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_kernel_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_kernel_msg_send_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_kernel_msg_send_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_kernel_msg_recv_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_kernel_msg_recv_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_rib_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_rib_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_rib_detail_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_rib_detail_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_fpm_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_fpm_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_nht_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_nht_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_nht_detail_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_nht_detail_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_mpls_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_mpls_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_vxlan_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_vxlan_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_pw_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_pw_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_dplane_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_dplane_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_dplane_detail_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_dplane_detail_destroy(enum nb_event event, + const struct lyd_node *dnode); +int zebra_debugs_debug_mlag_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int zebra_debugs_debug_mlag_destroy(enum nb_event event, + const struct lyd_node *dnode); +int lib_interface_zebra_ip_addrs_create(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int lib_interface_zebra_ip_addrs_destroy(enum nb_event event, + const struct lyd_node *dnode); +int lib_interface_zebra_ip_addrs_label_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int lib_interface_zebra_ip_addrs_label_destroy(enum nb_event event, + const struct lyd_node *dnode); +int lib_interface_zebra_ip_addrs_ip4_peer_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int lib_interface_zebra_ip_addrs_ip4_peer_destroy(enum nb_event event, + const struct lyd_node *dnode); +int lib_interface_zebra_multicast_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int lib_interface_zebra_multicast_destroy(enum nb_event event, + const struct lyd_node *dnode); +int lib_interface_zebra_link_detect_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int lib_interface_zebra_link_detect_destroy(enum nb_event event, + const struct lyd_node *dnode); +int lib_interface_zebra_shutdown_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int lib_interface_zebra_shutdown_destroy(enum nb_event event, + const struct lyd_node *dnode); +int lib_interface_zebra_bandwidth_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource); +int lib_interface_zebra_bandwidth_destroy(enum nb_event event, + const struct lyd_node *dnode); +int lib_route_map_entry_match_condition_ipv4_prefix_length_modify( + enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource); +int lib_route_map_entry_match_condition_ipv4_prefix_length_destroy( + enum nb_event event, const struct lyd_node *dnode); +int lib_route_map_entry_match_condition_ipv6_prefix_length_modify( + enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource); +int lib_route_map_entry_match_condition_ipv6_prefix_length_destroy( + enum nb_event event, const struct lyd_node *dnode); +int lib_route_map_entry_match_condition_source_protocol_modify( + enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource); +int lib_route_map_entry_match_condition_source_protocol_destroy( + enum nb_event event, const struct lyd_node *dnode); +int lib_route_map_entry_match_condition_source_instance_modify( + enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource); +int lib_route_map_entry_match_condition_source_instance_destroy( + enum nb_event event, const struct lyd_node *dnode); +int lib_route_map_entry_set_action_source_v4_modify( + enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource); +int lib_route_map_entry_set_action_source_v4_destroy( + enum nb_event event, const struct lyd_node *dnode); +int lib_route_map_entry_set_action_source_v6_modify( + enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource); +int lib_route_map_entry_set_action_source_v6_destroy( + enum nb_event event, const struct lyd_node *dnode); +struct yang_data * +lib_interface_zebra_state_up_count_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_interface_zebra_state_down_count_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_interface_zebra_state_zif_type_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_interface_zebra_state_ptm_status_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_interface_zebra_state_vlan_id_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_interface_zebra_state_vni_id_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_interface_zebra_state_remote_vtep_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_interface_zebra_state_mcast_group_get_elem(const char *xpath, + const void *list_entry); +int lib_vrf_ribs_rib_create(enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource); +int lib_vrf_ribs_rib_destroy(enum nb_event event, const struct lyd_node *dnode); +const void *lib_vrf_ribs_rib_get_next(const void *parent_list_entry, + const void *list_entry); +int lib_vrf_ribs_rib_get_keys(const void *list_entry, + struct yang_list_keys *keys); +const void *lib_vrf_ribs_rib_lookup_entry(const void *parent_list_entry, + const struct yang_list_keys *keys); +const void *lib_vrf_ribs_rib_route_get_next(const void *parent_list_entry, + const void *list_entry); +int lib_vrf_ribs_rib_route_get_keys(const void *list_entry, + struct yang_list_keys *keys); +const void * +lib_vrf_ribs_rib_route_lookup_entry(const void *parent_list_entry, + const struct yang_list_keys *keys); +struct yang_data * +lib_vrf_ribs_rib_route_prefix_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_protocol_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_protocol_v6_get_elem(const char *xpath, + const void *list_entry); +struct yang_data *lib_vrf_ribs_rib_route_vrf_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_distance_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_metric_get_elem(const char *xpath, + const void *list_entry); +struct yang_data *lib_vrf_ribs_rib_route_tag_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_selected_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_installed_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_failed_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_queued_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_internal_flags_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_internal_status_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_uptime_get_elem(const char *xpath, + const void *list_entry); +const void * +lib_vrf_ribs_rib_route_nexthop_group_get_next(const void *parent_list_entry, + const void *list_entry); +int lib_vrf_ribs_rib_route_nexthop_group_get_keys(const void *list_entry, + struct yang_list_keys *keys); +const void *lib_vrf_ribs_rib_route_nexthop_group_lookup_entry( + const void *parent_list_entry, const struct yang_list_keys *keys); +struct yang_data * +lib_vrf_ribs_rib_route_nexthop_group_name_get_elem(const char *xpath, + const void *list_entry); +const void *lib_vrf_ribs_rib_route_nexthop_group_frr_nexthops_nexthop_get_next( + const void *parent_list_entry, const void *list_entry); +int lib_vrf_ribs_rib_route_nexthop_group_frr_nexthops_nexthop_get_keys( + const void *list_entry, struct yang_list_keys *keys); +int lib_vrf_ribs_rib_create(enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource); +int lib_vrf_ribs_rib_destroy(enum nb_event event, const struct lyd_node *dnode); +const void *lib_vrf_ribs_rib_get_next(const void *parent_list_entry, + const void *list_entry); +int lib_vrf_ribs_rib_get_keys(const void *list_entry, + struct yang_list_keys *keys); +const void *lib_vrf_ribs_rib_lookup_entry(const void *parent_list_entry, + const struct yang_list_keys *keys); +const void *lib_vrf_ribs_rib_route_get_next(const void *parent_list_entry, + const void *list_entry); +int lib_vrf_ribs_rib_route_get_keys(const void *list_entry, + struct yang_list_keys *keys); +const void * +lib_vrf_ribs_rib_route_lookup_entry(const void *parent_list_entry, + const struct yang_list_keys *keys); +struct yang_data * +lib_vrf_ribs_rib_route_prefix_get_elem(const char *xpath, + const void *list_entry); +const void * +lib_vrf_ribs_rib_route_route_entry_get_next(const void *parent_list_entry, + const void *list_entry); +int lib_vrf_ribs_rib_route_route_entry_get_keys(const void *list_entry, + struct yang_list_keys *keys); +const void *lib_vrf_ribs_rib_route_route_entry_lookup_entry( + const void *parent_list_entry, const struct yang_list_keys *keys); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_protocol_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_instance_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_distance_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_metric_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_tag_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_selected_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_installed_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_failed_get_elem(const char *xpath, + const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_queued_get_elem(const char *xpath, + const void *list_entry); +struct yang_data *lib_vrf_ribs_rib_route_route_entry_internal_flags_get_elem( + const char *xpath, const void *list_entry); +struct yang_data *lib_vrf_ribs_rib_route_route_entry_internal_status_get_elem( + const char *xpath, const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_uptime_get_elem(const char *xpath, + const void *list_entry); +const void *lib_vrf_ribs_rib_route_route_entry_nexthop_group_get_next( + const void *parent_list_entry, const void *list_entry); +int lib_vrf_ribs_rib_route_route_entry_nexthop_group_get_keys( + const void *list_entry, struct yang_list_keys *keys); +const void *lib_vrf_ribs_rib_route_route_entry_nexthop_group_lookup_entry( + const void *parent_list_entry, const struct yang_list_keys *keys); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_name_get_elem( + const char *xpath, const void *list_entry); +const void * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_next( + const void *parent_list_entry, const void *list_entry); +int lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_keys( + const void *list_entry, struct yang_list_keys *keys); +const void * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_lookup_entry( + const void *parent_list_entry, const struct yang_list_keys *keys); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_nh_type_get_elem( + const char *xpath, const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_vrf_get_elem( + const char *xpath, const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_gateway_get_elem( + const char *xpath, const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_interface_get_elem( + const char *xpath, const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_bh_type_get_elem( + const char *xpath, const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_onlink_get_elem( + const char *xpath, const void *list_entry); +const void * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_next( + const void *parent_list_entry, const void *list_entry); +int lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_keys( + const void *list_entry, struct yang_list_keys *keys); +const void * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_lookup_entry( + const void *parent_list_entry, const struct yang_list_keys *keys); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_id_get_elem( + const char *xpath, const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_label_get_elem( + const char *xpath, const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_get_elem( + const char *xpath, const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_get_elem( + const char *xpath, const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_duplicate_get_elem( + const char *xpath, const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_recursive_get_elem( + const char *xpath, const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_active_get_elem( + const char *xpath, const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_fib_get_elem( + const char *xpath, const void *list_entry); +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_weight_get_elem( + const char *xpath, const void *list_entry); + +#endif diff --git a/zebra/zebra_nb_config.c b/zebra/zebra_nb_config.c new file mode 100644 index 0000000000..311922f2ef --- /dev/null +++ b/zebra/zebra_nb_config.c @@ -0,0 +1,1672 @@ +/* + * Copyright (C) 2019 Cumulus Networks, Inc. + * Chirag Shah + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> + +#include "lib/log.h" +#include "lib/northbound.h" +#include "libfrr.h" +#include "lib/command.h" +#include "lib/routemap.h" +#include "zebra/zebra_nb.h" +#include "zebra/rib.h" +#include "zebra_nb.h" +#include "zebra/interface.h" +#include "zebra/connected.h" + +/* + * XPath: /frr-zebra:zebra/mcast-rpf-lookup + */ +int zebra_mcast_rpf_lookup_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/ip-forwarding + */ +int zebra_ip_forwarding_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_ip_forwarding_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/ipv6-forwarding + */ +int zebra_ipv6_forwarding_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_ipv6_forwarding_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/workqueue-hold-timer + */ +int zebra_workqueue_hold_timer_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/zapi-packets + */ +int zebra_zapi_packets_modify(enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/import-kernel-table/table-id + */ +int zebra_import_kernel_table_table_id_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_import_kernel_table_table_id_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/import-kernel-table/distance + */ +int zebra_import_kernel_table_distance_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/import-kernel-table/route-map + */ +int zebra_import_kernel_table_route_map_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_import_kernel_table_route_map_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/allow-external-route-update + */ +int zebra_allow_external_route_update_create(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_allow_external_route_update_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/dplane-queue-limit + */ +int zebra_dplane_queue_limit_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/vrf-vni-mapping + */ +int zebra_vrf_vni_mapping_create(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_vrf_vni_mapping_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/vrf-vni-mapping/vni-id + */ +int zebra_vrf_vni_mapping_vni_id_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_vrf_vni_mapping_vni_id_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/vrf-vni-mapping/prefix-only + */ +int zebra_vrf_vni_mapping_prefix_only_create(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_vrf_vni_mapping_prefix_only_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-events + */ +int zebra_debugs_debug_events_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_events_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-zapi-send + */ +int zebra_debugs_debug_zapi_send_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_zapi_send_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-zapi-recv + */ +int zebra_debugs_debug_zapi_recv_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_zapi_recv_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-zapi-detail + */ +int zebra_debugs_debug_zapi_detail_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_zapi_detail_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-kernel + */ +int zebra_debugs_debug_kernel_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_kernel_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-kernel-msg-send + */ +int zebra_debugs_debug_kernel_msg_send_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_kernel_msg_send_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-kernel-msg-recv + */ +int zebra_debugs_debug_kernel_msg_recv_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_kernel_msg_recv_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-rib + */ +int zebra_debugs_debug_rib_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_rib_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-rib-detail + */ +int zebra_debugs_debug_rib_detail_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_rib_detail_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-fpm + */ +int zebra_debugs_debug_fpm_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_fpm_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-nht + */ +int zebra_debugs_debug_nht_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_nht_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-nht-detail + */ +int zebra_debugs_debug_nht_detail_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_nht_detail_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-mpls + */ +int zebra_debugs_debug_mpls_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_mpls_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-vxlan + */ +int zebra_debugs_debug_vxlan_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_vxlan_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-pw + */ +int zebra_debugs_debug_pw_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_pw_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-dplane + */ +int zebra_debugs_debug_dplane_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_dplane_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-dplane-detail + */ +int zebra_debugs_debug_dplane_detail_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_dplane_detail_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-mlag + */ +int zebra_debugs_debug_mlag_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int zebra_debugs_debug_mlag_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/ip-addrs + */ +int lib_interface_zebra_ip_addrs_create(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + struct interface *ifp; + struct prefix prefix; + char buf[PREFIX_STRLEN] = {0}; + + ifp = nb_running_get_entry(dnode, NULL, true); + // addr_family = yang_dnode_get_enum(dnode, "./address-family"); + yang_dnode_get_prefix(&prefix, dnode, "./ip-prefix"); + apply_mask(&prefix); + + switch (event) { + case NB_EV_VALIDATE: + if (prefix.family == AF_INET + && ipv4_martian(&prefix.u.prefix4)) { + zlog_debug("invalid address %s", + prefix2str(&prefix, buf, sizeof(buf))); + return NB_ERR_VALIDATION; + } else if (prefix.family == AF_INET6 + && ipv6_martian(&prefix.u.prefix6)) { + zlog_debug("invalid address %s", + prefix2str(&prefix, buf, sizeof(buf))); + return NB_ERR_VALIDATION; + } + break; + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + if (prefix.family == AF_INET) + if_ip_address_install(ifp, &prefix, NULL, NULL); + else if (prefix.family == AF_INET6) + if_ipv6_address_install(ifp, &prefix, NULL); + + break; + } + + return NB_OK; +} + +int lib_interface_zebra_ip_addrs_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + struct interface *ifp; + struct prefix prefix; + struct connected *ifc; + + ifp = nb_running_get_entry(dnode, NULL, true); + yang_dnode_get_prefix(&prefix, dnode, "./ip-prefix"); + apply_mask(&prefix); + + switch (event) { + case NB_EV_VALIDATE: + if (prefix.family == AF_INET) { + /* Check current interface address. */ + ifc = connected_check_ptp(ifp, &prefix, NULL); + if (!ifc) { + zlog_debug("interface %s Can't find address\n", + ifp->name); + return NB_ERR_VALIDATION; + } + } else if (prefix.family == AF_INET6) { + /* Check current interface address. */ + ifc = connected_check(ifp, &prefix); + if (!ifc) { + zlog_debug("interface can't find address %s", + ifp->name); + return NB_ERR_VALIDATION; + } + } else + return NB_ERR_VALIDATION; + + /* This is not configured address. */ + if (!CHECK_FLAG(ifc->conf, ZEBRA_IFC_CONFIGURED)) { + zlog_debug("interface %s not configured", ifp->name); + return NB_ERR_VALIDATION; + } + + /* This is not real address or interface is not active. */ + if (!CHECK_FLAG(ifc->conf, ZEBRA_IFC_QUEUED) + || !CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE)) { + listnode_delete(ifp->connected, ifc); + connected_free(&ifc); + return NB_ERR_VALIDATION; + } + break; + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + if_ip_address_uinstall(ifp, &prefix); + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/ip-addrs/label + */ +int lib_interface_zebra_ip_addrs_label_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int lib_interface_zebra_ip_addrs_label_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/ip-addrs/ip4-peer + */ +int lib_interface_zebra_ip_addrs_ip4_peer_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int lib_interface_zebra_ip_addrs_ip4_peer_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/multicast + */ +int lib_interface_zebra_multicast_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + if (event != NB_EV_APPLY) + return NB_OK; + + struct interface *ifp; + + ifp = nb_running_get_entry(dnode, NULL, true); + + if_multicast_set(ifp); + + return NB_OK; +} + +int lib_interface_zebra_multicast_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + if (event != NB_EV_APPLY) + return NB_OK; + + struct interface *ifp; + + ifp = nb_running_get_entry(dnode, NULL, true); + + if_multicast_unset(ifp); + + return NB_OK; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/link-detect + */ +int lib_interface_zebra_link_detect_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + if (event != NB_EV_APPLY) + return NB_OK; + + struct interface *ifp; + bool link_detect; + + ifp = nb_running_get_entry(dnode, NULL, true); + link_detect = yang_dnode_get_bool(dnode, "./link-detect"); + + if_linkdetect(ifp, link_detect); + + return NB_OK; +} + +int lib_interface_zebra_link_detect_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + if (event != NB_EV_APPLY) + return NB_OK; + + struct interface *ifp; + bool link_detect; + + ifp = nb_running_get_entry(dnode, NULL, true); + link_detect = yang_dnode_get_bool(dnode, "./link-detect"); + + if_linkdetect(ifp, link_detect); + + return NB_OK; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/shutdown + */ +int lib_interface_zebra_shutdown_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + struct interface *ifp; + + ifp = nb_running_get_entry(dnode, NULL, true); + + if_shutdown(ifp); + + return NB_OK; +} + +int lib_interface_zebra_shutdown_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + struct interface *ifp; + + ifp = nb_running_get_entry(dnode, NULL, true); + + if_no_shutdown(ifp); + + return NB_OK; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/bandwidth + */ +int lib_interface_zebra_bandwidth_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + if (event != NB_EV_APPLY) + return NB_OK; + + struct interface *ifp; + uint32_t bandwidth; + + ifp = nb_running_get_entry(dnode, NULL, true); + bandwidth = yang_dnode_get_uint32(dnode, "./bandwidth"); + + ifp->bandwidth = bandwidth; + + /* force protocols to recalculate routes due to cost change */ + if (if_is_operative(ifp)) + zebra_interface_up_update(ifp); + + return NB_OK; +} + +int lib_interface_zebra_bandwidth_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + if (event != NB_EV_APPLY) + return NB_OK; + + struct interface *ifp; + + ifp = nb_running_get_entry(dnode, NULL, true); + + ifp->bandwidth = 0; + + /* force protocols to recalculate routes due to cost change */ + if (if_is_operative(ifp)) + zebra_interface_up_update(ifp); + + return NB_OK; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib + */ +int lib_vrf_ribs_rib_create(enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int lib_vrf_ribs_rib_destroy(enum nb_event event, const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + + +/* + * XPath: + * /frr-route-map:lib/route-map/entry/match-condition/frr-zebra:ipv4-prefix-length + */ +int lib_route_map_entry_match_condition_ipv4_prefix_length_modify( + enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource) +{ + struct routemap_hook_context *rhc; + const char *length; + int condition, rv; + + if (event != NB_EV_APPLY) + return NB_OK; + + /* Add configuration. */ + rhc = nb_running_get_entry(dnode, NULL, true); + length = yang_dnode_get_string(dnode, NULL); + condition = yang_dnode_get_enum(dnode, "../frr-route-map:condition"); + + /* Set destroy information. */ + switch (condition) { + case 100: /* ipv4-prefix-length */ + rhc->rhc_rule = "ip address prefix-len"; + break; + + case 102: /* ipv4-next-hop-prefix-length */ + rhc->rhc_rule = "ip next-hop prefix-len"; + break; + } + rhc->rhc_mhook = generic_match_delete; + rhc->rhc_event = RMAP_EVENT_MATCH_DELETED; + + rv = generic_match_add(NULL, rhc->rhc_rmi, rhc->rhc_rule, length, + RMAP_EVENT_MATCH_ADDED); + if (rv != CMD_SUCCESS) { + rhc->rhc_mhook = NULL; + return NB_ERR_INCONSISTENCY; + } + + return NB_OK; +} + +int lib_route_map_entry_match_condition_ipv4_prefix_length_destroy( + enum nb_event event, const struct lyd_node *dnode) +{ + return lib_route_map_entry_match_destroy(event, dnode); +} + +/* + * XPath: + * /frr-route-map:lib/route-map/entry/match-condition/frr-zebra:ipv6-prefix-length + */ +int lib_route_map_entry_match_condition_ipv6_prefix_length_modify( + enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource) +{ + struct routemap_hook_context *rhc; + const char *length; + int rv; + + if (event != NB_EV_APPLY) + return NB_OK; + + /* Add configuration. */ + rhc = nb_running_get_entry(dnode, NULL, true); + length = yang_dnode_get_string(dnode, NULL); + + /* Set destroy information. */ + rhc->rhc_mhook = generic_match_delete; + rhc->rhc_rule = "ipv6 address prefix-len"; + rhc->rhc_event = RMAP_EVENT_MATCH_DELETED; + + rv = generic_match_add(NULL, rhc->rhc_rmi, "ipv6 address prefix-len", + length, RMAP_EVENT_MATCH_ADDED); + if (rv != CMD_SUCCESS) { + rhc->rhc_mhook = NULL; + return NB_ERR_INCONSISTENCY; + } + + return NB_OK; +} + +int lib_route_map_entry_match_condition_ipv6_prefix_length_destroy( + enum nb_event event, const struct lyd_node *dnode) +{ + return lib_route_map_entry_match_destroy(event, dnode); +} + +/* + * XPath: + * /frr-route-map:lib/route-map/entry/match-condition/frr-zebra:source-protocol + */ +int lib_route_map_entry_match_condition_source_protocol_modify( + enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource) +{ + struct routemap_hook_context *rhc; + const char *type; + int rv; + + switch (event) { + case NB_EV_VALIDATE: + type = yang_dnode_get_string(dnode, NULL); + if (proto_name2num(type) == -1) { + zlog_warn("%s: invalid protocol: %s", __func__, type); + return NB_ERR_VALIDATION; + } + return NB_OK; + case NB_EV_PREPARE: + case NB_EV_ABORT: + return NB_OK; + case NB_EV_APPLY: + /* NOTHING */ + break; + } + + /* Add configuration. */ + rhc = nb_running_get_entry(dnode, NULL, true); + type = yang_dnode_get_string(dnode, NULL); + + /* Set destroy information. */ + rhc->rhc_mhook = generic_match_delete; + rhc->rhc_rule = "source-protocol"; + rhc->rhc_event = RMAP_EVENT_MATCH_DELETED; + + rv = generic_match_add(NULL, rhc->rhc_rmi, "source-protocol", type, + RMAP_EVENT_MATCH_ADDED); + if (rv != CMD_SUCCESS) { + rhc->rhc_mhook = NULL; + return NB_ERR_INCONSISTENCY; + } + + return NB_OK; +} + +int lib_route_map_entry_match_condition_source_protocol_destroy( + enum nb_event event, const struct lyd_node *dnode) +{ + return lib_route_map_entry_match_destroy(event, dnode); +} + +/* + * XPath: + * /frr-route-map:lib/route-map/entry/match-condition/frr-zebra:source-instance + */ +int lib_route_map_entry_match_condition_source_instance_modify( + enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource) +{ + struct routemap_hook_context *rhc; + const char *type; + int rv; + + if (event != NB_EV_APPLY) + return NB_OK; + + /* Add configuration. */ + rhc = nb_running_get_entry(dnode, NULL, true); + type = yang_dnode_get_string(dnode, NULL); + + /* Set destroy information. */ + rhc->rhc_mhook = generic_match_delete; + rhc->rhc_rule = "source-instance"; + rhc->rhc_event = RMAP_EVENT_MATCH_DELETED; + + rv = generic_match_add(NULL, rhc->rhc_rmi, "source-instance", type, + RMAP_EVENT_MATCH_ADDED); + if (rv != CMD_SUCCESS) { + rhc->rhc_mhook = NULL; + return NB_ERR_INCONSISTENCY; + } + + return NB_OK; +} + +int lib_route_map_entry_match_condition_source_instance_destroy( + enum nb_event event, const struct lyd_node *dnode) +{ + return lib_route_map_entry_match_destroy(event, dnode); +} + +/* + * XPath: /frr-route-map:lib/route-map/entry/set-action/frr-zebra:source-v4 + */ +int lib_route_map_entry_set_action_source_v4_modify( + enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource) +{ + struct routemap_hook_context *rhc; + struct interface *pif = NULL; + const char *source; + struct vrf *vrf; + struct prefix p; + int rv; + + switch (event) { + case NB_EV_VALIDATE: + memset(&p, 0, sizeof(p)); + yang_dnode_get_ipv4p(&p, dnode, NULL); + if (zebra_check_addr(&p) == 0) { + zlog_warn("%s: invalid IPv4 address: %s", __func__, + yang_dnode_get_string(dnode, NULL)); + return NB_ERR_VALIDATION; + } + + RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) { + pif = if_lookup_exact_address(&p.u.prefix4, AF_INET, + vrf->vrf_id); + if (pif != NULL) + break; + } + if (pif == NULL) { + zlog_warn("%s: is not a local adddress: %s", __func__, + yang_dnode_get_string(dnode, NULL)); + return NB_ERR_VALIDATION; + } + return NB_OK; + case NB_EV_PREPARE: + case NB_EV_ABORT: + return NB_OK; + case NB_EV_APPLY: + /* NOTHING */ + break; + } + + /* Add configuration. */ + rhc = nb_running_get_entry(dnode, NULL, true); + source = yang_dnode_get_string(dnode, NULL); + + /* Set destroy information. */ + rhc->rhc_shook = generic_set_delete; + rhc->rhc_rule = "src"; + + rv = generic_set_add(NULL, rhc->rhc_rmi, "src", source); + if (rv != CMD_SUCCESS) { + rhc->rhc_shook = NULL; + return NB_ERR_INCONSISTENCY; + } + + return NB_OK; +} + +int lib_route_map_entry_set_action_source_v4_destroy( + enum nb_event event, const struct lyd_node *dnode) +{ + return lib_route_map_entry_set_destroy(event, dnode); +} + +/* + * XPath: /frr-route-map:lib/route-map/entry/set-action/frr-zebra:source-v6 + */ +int lib_route_map_entry_set_action_source_v6_modify( + enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource) +{ + struct routemap_hook_context *rhc; + struct interface *pif = NULL; + const char *source; + struct vrf *vrf; + struct prefix p; + int rv; + + switch (event) { + case NB_EV_VALIDATE: + memset(&p, 0, sizeof(p)); + yang_dnode_get_ipv6p(&p, dnode, NULL); + if (zebra_check_addr(&p) == 0) { + zlog_warn("%s: invalid IPv6 address: %s", __func__, + yang_dnode_get_string(dnode, NULL)); + return NB_ERR_VALIDATION; + } + + RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) { + pif = if_lookup_exact_address(&p.u.prefix6, AF_INET6, + vrf->vrf_id); + if (pif != NULL) + break; + } + if (pif == NULL) { + zlog_warn("%s: is not a local adddress: %s", __func__, + yang_dnode_get_string(dnode, NULL)); + return NB_ERR_VALIDATION; + } + return NB_OK; + case NB_EV_PREPARE: + case NB_EV_ABORT: + return NB_OK; + case NB_EV_APPLY: + /* NOTHING */ + break; + } + + /* Add configuration. */ + rhc = nb_running_get_entry(dnode, NULL, true); + source = yang_dnode_get_string(dnode, NULL); + + /* Set destroy information. */ + rhc->rhc_shook = generic_set_delete; + rhc->rhc_rule = "src"; + + rv = generic_set_add(NULL, rhc->rhc_rmi, "src", source); + if (rv != CMD_SUCCESS) { + rhc->rhc_shook = NULL; + return NB_ERR_INCONSISTENCY; + } + + return NB_OK; +} + +int lib_route_map_entry_set_action_source_v6_destroy( + enum nb_event event, const struct lyd_node *dnode) +{ + return lib_route_map_entry_set_destroy(event, dnode); +} diff --git a/zebra/zebra_nb_rpcs.c b/zebra/zebra_nb_rpcs.c new file mode 100644 index 0000000000..cf8efe40a4 --- /dev/null +++ b/zebra/zebra_nb_rpcs.c @@ -0,0 +1,212 @@ +/* + * Copyright (C) 2020 Cumulus Networks, Inc. + * Chirag Shah + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> +#include "northbound.h" +#include "libfrr.h" + +#include "zebra/zebra_nb.h" +#include "zebra/zebra_router.h" +#include "zebra/zebra_vrf.h" +#include "zebra/zebra_vxlan.h" + +/* + * XPath: /frr-zebra:clear-evpn-dup-addr + */ +int clear_evpn_dup_addr_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + struct zebra_vrf *zvrf; + int ret = NB_OK; + struct yang_data *yang_dup_choice = NULL, *yang_dup_vni = NULL, + *yang_dup_ip = NULL, *yang_dup_mac = NULL; + + yang_dup_choice = yang_data_list_find(input, "%s/%s", xpath, + "input/clear-dup-choice"); + + zvrf = zebra_vrf_get_evpn(); + + if (yang_dup_choice + && strcmp(yang_dup_choice->value, "all-case") == 0) { + zebra_vxlan_clear_dup_detect_vni_all(zvrf); + } else { + vni_t vni; + struct ipaddr host_ip = {.ipa_type = IPADDR_NONE}; + struct ethaddr mac; + + yang_dup_vni = yang_data_list_find( + input, "%s/%s", xpath, + "input/clear-dup-choice/single-case/vni-id"); + if (yang_dup_vni) { + vni = yang_str2uint32(yang_dup_vni->value); + + yang_dup_mac = yang_data_list_find( + input, "%s/%s", xpath, + "input/clear-dup-choice/single-case/vni-id/mac-addr"); + yang_dup_ip = yang_data_list_find( + input, "%s/%s", xpath, + "input/clear-dup-choice/single-case/vni-id/vni-ipaddr"); + + if (yang_dup_mac) { + yang_str2mac(yang_dup_mac->value, &mac); + ret = zebra_vxlan_clear_dup_detect_vni_mac( + zvrf, vni, &mac); + } else if (yang_dup_ip) { + yang_str2ip(yang_dup_ip->value, &host_ip); + ret = zebra_vxlan_clear_dup_detect_vni_ip( + zvrf, vni, &host_ip); + } else + ret = zebra_vxlan_clear_dup_detect_vni(zvrf, + vni); + } + } + ret = (ret != CMD_SUCCESS) ? NB_ERR : NB_OK; + + return ret; +} + +/* + * XPath: /frr-zebra:get-route-information + */ +int get_route_information_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-v6-mroute-info + */ +int get_v6_mroute_info_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-vrf-info + */ +int get_vrf_info_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-vrf-vni-info + */ +int get_vrf_vni_info_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-evpn-info + */ +int get_evpn_info_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-vni-info + */ +int get_vni_info_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-evpn-vni-rmac + */ +int get_evpn_vni_rmac_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-evpn-vni-nexthops + */ +int get_evpn_vni_nexthops_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-evpn-macs + */ +int get_evpn_macs_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-evpn-arp-cache + */ +int get_evpn_arp_cache_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-pbr-ipset + */ +int get_pbr_ipset_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-pbr-iptable + */ +int get_pbr_iptable_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-debugs + */ +int get_debugs_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} diff --git a/zebra/zebra_nb_state.c b/zebra/zebra_nb_state.c new file mode 100644 index 0000000000..9036bdf0f6 --- /dev/null +++ b/zebra/zebra_nb_state.c @@ -0,0 +1,637 @@ +/* + * Copyright (C) 2020 Cumulus Networks, Inc. + * Chirag Shah + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> +#include "northbound.h" +#include "libfrr.h" +#include "zebra_nb.h" +#include "zebra/interface.h" + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/state/up-count + */ +struct yang_data * +lib_interface_zebra_state_up_count_get_elem(const char *xpath, + const void *list_entry) +{ + const struct interface *ifp = list_entry; + struct zebra_if *zebra_if; + + zebra_if = ifp->info; + + return yang_data_new_uint16(xpath, zebra_if->up_count); +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/state/down-count + */ +struct yang_data * +lib_interface_zebra_state_down_count_get_elem(const char *xpath, + const void *list_entry) +{ + const struct interface *ifp = list_entry; + struct zebra_if *zebra_if; + + zebra_if = ifp->info; + + return yang_data_new_uint16(xpath, zebra_if->down_count); +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/state/zif-type + */ +struct yang_data * +lib_interface_zebra_state_zif_type_get_elem(const char *xpath, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/state/ptm-status + */ +struct yang_data * +lib_interface_zebra_state_ptm_status_get_elem(const char *xpath, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/state/vlan-id + */ +struct yang_data * +lib_interface_zebra_state_vlan_id_get_elem(const char *xpath, + const void *list_entry) +{ + const struct interface *ifp = list_entry; + struct zebra_if *zebra_if; + struct zebra_l2info_vlan *vlan_info; + + if (!IS_ZEBRA_IF_VLAN(ifp)) + return NULL; + + zebra_if = ifp->info; + vlan_info = &zebra_if->l2info.vl; + + return yang_data_new_uint16(xpath, vlan_info->vid); +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/state/vni-id + */ +struct yang_data * +lib_interface_zebra_state_vni_id_get_elem(const char *xpath, + const void *list_entry) +{ + const struct interface *ifp = list_entry; + struct zebra_if *zebra_if; + struct zebra_l2info_vxlan *vxlan_info; + + if (!IS_ZEBRA_IF_VXLAN(ifp)) + return NULL; + + zebra_if = ifp->info; + vxlan_info = &zebra_if->l2info.vxl; + + return yang_data_new_uint32(xpath, vxlan_info->vni); +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/state/remote-vtep + */ +struct yang_data * +lib_interface_zebra_state_remote_vtep_get_elem(const char *xpath, + const void *list_entry) +{ + const struct interface *ifp = list_entry; + struct zebra_if *zebra_if; + struct zebra_l2info_vxlan *vxlan_info; + + if (!IS_ZEBRA_IF_VXLAN(ifp)) + return NULL; + + zebra_if = ifp->info; + vxlan_info = &zebra_if->l2info.vxl; + + return yang_data_new_ipv4(xpath, &vxlan_info->vtep_ip); +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/state/mcast-group + */ +struct yang_data * +lib_interface_zebra_state_mcast_group_get_elem(const char *xpath, + const void *list_entry) +{ + const struct interface *ifp = list_entry; + struct zebra_if *zebra_if; + struct zebra_l2info_vxlan *vxlan_info; + + if (!IS_ZEBRA_IF_VXLAN(ifp)) + return NULL; + + zebra_if = ifp->info; + vxlan_info = &zebra_if->l2info.vxl; + + return yang_data_new_ipv4(xpath, &vxlan_info->mcast_grp); +} + +const void *lib_vrf_ribs_rib_get_next(const void *parent_list_entry, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +int lib_vrf_ribs_rib_get_keys(const void *list_entry, + struct yang_list_keys *keys) +{ + /* TODO: implement me. */ + return NB_OK; +} + +const void *lib_vrf_ribs_rib_lookup_entry(const void *parent_list_entry, + const struct yang_list_keys *keys) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route + */ +const void *lib_vrf_ribs_rib_route_get_next(const void *parent_list_entry, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +int lib_vrf_ribs_rib_route_get_keys(const void *list_entry, + struct yang_list_keys *keys) +{ + /* TODO: implement me. */ + return NB_OK; +} + +const void * +lib_vrf_ribs_rib_route_lookup_entry(const void *parent_list_entry, + const struct yang_list_keys *keys) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/prefix + */ +struct yang_data *lib_vrf_ribs_rib_route_prefix_get_elem(const char *xpath, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry + */ +const void * +lib_vrf_ribs_rib_route_route_entry_get_next(const void *parent_list_entry, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +int lib_vrf_ribs_rib_route_route_entry_get_keys(const void *list_entry, + struct yang_list_keys *keys) +{ + /* TODO: implement me. */ + return NB_OK; +} + +const void *lib_vrf_ribs_rib_route_route_entry_lookup_entry( + const void *parent_list_entry, const struct yang_list_keys *keys) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/protocol + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_protocol_get_elem(const char *xpath, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/instance + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_instance_get_elem(const char *xpath, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/distance + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_distance_get_elem(const char *xpath, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/metric + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_metric_get_elem(const char *xpath, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/tag + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_tag_get_elem(const char *xpath, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/selected + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_selected_get_elem(const char *xpath, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/installed + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_installed_get_elem(const char *xpath, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/failed + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_failed_get_elem(const char *xpath, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/queued + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_queued_get_elem(const char *xpath, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/internal-flags + */ +struct yang_data *lib_vrf_ribs_rib_route_route_entry_internal_flags_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/internal-status + */ +struct yang_data *lib_vrf_ribs_rib_route_route_entry_internal_status_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/uptime + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_uptime_get_elem(const char *xpath, + const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group + */ +const void *lib_vrf_ribs_rib_route_route_entry_nexthop_group_get_next( + const void *parent_list_entry, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +int lib_vrf_ribs_rib_route_route_entry_nexthop_group_get_keys( + const void *list_entry, struct yang_list_keys *keys) +{ + /* TODO: implement me. */ + return NB_OK; +} + +const void *lib_vrf_ribs_rib_route_route_entry_nexthop_group_lookup_entry( + const void *parent_list_entry, const struct yang_list_keys *keys) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/name + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_name_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop + */ +const void * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_next( + const void *parent_list_entry, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +int lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_get_keys( + const void *list_entry, struct yang_list_keys *keys) +{ + /* TODO: implement me. */ + return NB_OK; +} + +const void * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_lookup_entry( + const void *parent_list_entry, const struct yang_list_keys *keys) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/nh-type + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_nh_type_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/vrf + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_vrf_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/gateway + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_gateway_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/interface + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_interface_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/bh-type + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_bh_type_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/onlink + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_onlink_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry + */ +const void * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_next( + const void *parent_list_entry, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +int lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_get_keys( + const void *list_entry, struct yang_list_keys *keys) +{ + /* TODO: implement me. */ + return NB_OK; +} + +const void * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_lookup_entry( + const void *parent_list_entry, const struct yang_list_keys *keys) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/id + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_id_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/label + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_label_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/ttl + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/mpls-label-stack/entry/traffic-class + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/duplicate + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_duplicate_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/recursive + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_recursive_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/active + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_active_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/fib + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_fib_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} + +/* + * XPath: + * /frr-vrf:lib/vrf/frr-zebra:ribs/rib/route/route-entry/nexthop-group/frr-nexthops/nexthop/weight + */ +struct yang_data * +lib_vrf_ribs_rib_route_route_entry_nexthop_group_frr_nexthops_nexthop_weight_get_elem( + const char *xpath, const void *list_entry) +{ + /* TODO: implement me. */ + return NULL; +} diff --git a/zebra/zebra_netns_id.c b/zebra/zebra_netns_id.c index ea4b07a87d..77a9a7c368 100644 --- a/zebra/zebra_netns_id.c +++ b/zebra/zebra_netns_id.c @@ -143,7 +143,7 @@ static ns_id_t extract_nsid(struct nlmsghdr *nlh, char *buf) void *tail = (void *)((char *)nlh + NETLINK_ALIGN(nlh->nlmsg_len)); struct nlattr *attr; - for (attr = (struct nlattr *)((char *)buf + offset); + for (attr = (struct nlattr *)(buf + offset); NETLINK_NLATTR_LEN(tail, attr) >= sizeof(struct nlattr) && attr->nla_len >= sizeof(struct nlattr) && attr->nla_len <= NETLINK_NLATTR_LEN(tail, attr); diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c index dc0af050d7..de044c0ea0 100644 --- a/zebra/zebra_nhg.c +++ b/zebra/zebra_nhg.c @@ -63,6 +63,9 @@ static struct nhg_hash_entry * depends_find_id_add(struct nhg_connected_tree_head *head, uint32_t id); static void depends_decrement_free(struct nhg_connected_tree_head *head); +static struct nhg_backup_info * +nhg_backup_copy(const struct nhg_backup_info *orig); + static void nhg_connected_free(struct nhg_connected *dep) { @@ -295,7 +298,7 @@ static void zebra_nhg_set_if(struct nhg_hash_entry *nhe, struct interface *ifp) static void zebra_nhg_connect_depends(struct nhg_hash_entry *nhe, - struct nhg_connected_tree_head nhg_depends) + struct nhg_connected_tree_head *nhg_depends) { struct nhg_connected *rb_node_dep = NULL; @@ -304,31 +307,58 @@ zebra_nhg_connect_depends(struct nhg_hash_entry *nhe, * for now. Otherwise, their might be a time trade-off for repeated * alloc/frees as startup. */ - nhe->nhg_depends = nhg_depends; + nhe->nhg_depends = *nhg_depends; /* Attach backpointer to anything that it depends on */ zebra_nhg_dependents_init(nhe); if (!zebra_nhg_depends_is_empty(nhe)) { frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nhe %p (%u), dep %p (%u)", + __func__, nhe, nhe->id, + rb_node_dep->nhe, + rb_node_dep->nhe->id); + zebra_nhg_dependents_add(rb_node_dep->nhe, nhe); } } +} - /* Add the ifp now if its not a group or recursive and has ifindex */ - if (zebra_nhg_depends_is_empty(nhe) && nhe->nhg.nexthop - && nhe->nhg.nexthop->ifindex) { - struct interface *ifp = NULL; +/* Init an nhe, for use in a hash lookup for example */ +void zebra_nhe_init(struct nhg_hash_entry *nhe, afi_t afi, + const struct nexthop *nh) +{ + memset(nhe, 0, sizeof(struct nhg_hash_entry)); + nhe->vrf_id = VRF_DEFAULT; + nhe->type = ZEBRA_ROUTE_NHG; + nhe->afi = AFI_UNSPEC; - ifp = if_lookup_by_index(nhe->nhg.nexthop->ifindex, - nhe->nhg.nexthop->vrf_id); - if (ifp) - zebra_nhg_set_if(nhe, ifp); - else - flog_err( - EC_ZEBRA_IF_LOOKUP_FAILED, - "Zebra failed to lookup an interface with ifindex=%d in vrf=%u for NHE id=%u", - nhe->nhg.nexthop->ifindex, - nhe->nhg.nexthop->vrf_id, nhe->id); + /* There are some special rules that apply to groups representing + * a single nexthop. + */ + if (nh && (nh->next == NULL)) { + switch (nh->type) { + case (NEXTHOP_TYPE_IFINDEX): + case (NEXTHOP_TYPE_BLACKHOLE): + /* + * This switch case handles setting the afi different + * for ipv4/v6 routes. Ifindex/blackhole nexthop + * objects cannot be ambiguous, they must be Address + * Family specific. If we get here, we will either use + * the AF of the route, or the one we got passed from + * here from the kernel. + */ + nhe->afi = afi; + break; + case (NEXTHOP_TYPE_IPV4_IFINDEX): + case (NEXTHOP_TYPE_IPV4): + nhe->afi = AFI_IP; + break; + case (NEXTHOP_TYPE_IPV6_IFINDEX): + case (NEXTHOP_TYPE_IPV6): + nhe->afi = AFI_IP6; + break; + } } } @@ -341,7 +371,7 @@ struct nhg_hash_entry *zebra_nhg_alloc(void) return nhe; } -static struct nhg_hash_entry *zebra_nhg_copy(const struct nhg_hash_entry *copy, +static struct nhg_hash_entry *zebra_nhg_copy(const struct nhg_hash_entry *orig, uint32_t id) { struct nhg_hash_entry *nhe; @@ -350,14 +380,18 @@ static struct nhg_hash_entry *zebra_nhg_copy(const struct nhg_hash_entry *copy, nhe->id = id; - nexthop_group_copy(&(nhe->nhg), &(copy->nhg)); + nexthop_group_copy(&(nhe->nhg), &(orig->nhg)); - nhe->vrf_id = copy->vrf_id; - nhe->afi = copy->afi; - nhe->type = copy->type ? copy->type : ZEBRA_ROUTE_NHG; + nhe->vrf_id = orig->vrf_id; + nhe->afi = orig->afi; + nhe->type = orig->type ? orig->type : ZEBRA_ROUTE_NHG; nhe->refcnt = 0; nhe->dplane_ref = zebra_router_get_next_sequence(); + /* Copy backup info also, if present */ + if (orig->backup_info) + nhe->backup_info = nhg_backup_copy(orig->backup_info); + return nhe; } @@ -372,7 +406,25 @@ static void *zebra_nhg_hash_alloc(void *arg) /* Mark duplicate nexthops in a group at creation time. */ nexthop_group_mark_duplicates(&(nhe->nhg)); - zebra_nhg_connect_depends(nhe, copy->nhg_depends); + zebra_nhg_connect_depends(nhe, &(copy->nhg_depends)); + + /* Add the ifp now if it's not a group or recursive and has ifindex */ + if (zebra_nhg_depends_is_empty(nhe) && nhe->nhg.nexthop + && nhe->nhg.nexthop->ifindex) { + struct interface *ifp = NULL; + + ifp = if_lookup_by_index(nhe->nhg.nexthop->ifindex, + nhe->nhg.nexthop->vrf_id); + if (ifp) + zebra_nhg_set_if(nhe, ifp); + else + flog_err( + EC_ZEBRA_IF_LOOKUP_FAILED, + "Zebra failed to lookup an interface with ifindex=%d in vrf=%u for NHE id=%u", + nhe->nhg.nexthop->ifindex, + nhe->nhg.nexthop->vrf_id, nhe->id); + } + zebra_nhg_insert_id(nhe); return nhe; @@ -381,12 +433,17 @@ static void *zebra_nhg_hash_alloc(void *arg) uint32_t zebra_nhg_hash_key(const void *arg) { const struct nhg_hash_entry *nhe = arg; + uint32_t val, key = 0x5a351234; + + val = nexthop_group_hash(&(nhe->nhg)); + if (nhe->backup_info) { + val = jhash_2words(val, + nexthop_group_hash( + &(nhe->backup_info->nhe->nhg)), + key); + } - uint32_t key = 0x5a351234; - - key = jhash_3words(nhe->vrf_id, nhe->afi, - nexthop_group_hash(&(nhe->nhg)), - key); + key = jhash_3words(nhe->vrf_id, nhe->afi, val, key); return key; } @@ -398,6 +455,46 @@ uint32_t zebra_nhg_id_key(const void *arg) return nhe->id; } +/* Helper with common nhg/nhe nexthop comparison logic */ +static bool nhg_compare_nexthops(const struct nexthop *nh1, + const struct nexthop *nh2) +{ + assert(nh1 != NULL && nh2 != NULL); + + /* + * We have to check the active flag of each individual one, + * not just the overall active_num. This solves the special case + * issue of a route with a nexthop group with one nexthop + * resolving to itself and thus marking it inactive. If we + * have two different routes each wanting to mark a different + * nexthop inactive, they need to hash to two different groups. + * + * If we just hashed on num_active, they would hash the same + * which is incorrect. + * + * ex) + * 1.1.1.0/24 + * -> 1.1.1.1 dummy1 (inactive) + * -> 1.1.2.1 dummy2 + * + * 1.1.2.0/24 + * -> 1.1.1.1 dummy1 + * -> 1.1.2.1 dummy2 (inactive) + * + * Without checking each individual one, they would hash to + * the same group and both have 1.1.1.1 dummy1 marked inactive. + * + */ + if (CHECK_FLAG(nh1->flags, NEXTHOP_FLAG_ACTIVE) + != CHECK_FLAG(nh2->flags, NEXTHOP_FLAG_ACTIVE)) + return false; + + if (!nexthop_same(nh1, nh2)) + return false; + + return true; +} + bool zebra_nhg_hash_equal(const void *arg1, const void *arg2) { const struct nhg_hash_entry *nhe1 = arg1; @@ -415,45 +512,48 @@ bool zebra_nhg_hash_equal(const void *arg1, const void *arg2) if (nhe1->afi != nhe2->afi) return false; - /* Nexthops should be sorted */ + /* Nexthops should be in-order, so we simply compare them in-place */ for (nexthop1 = nhe1->nhg.nexthop, nexthop2 = nhe2->nhg.nexthop; - nexthop1 || nexthop2; + nexthop1 && nexthop2; nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) { - if (nexthop1 && !nexthop2) - return false; - if (!nexthop1 && nexthop2) + if (!nhg_compare_nexthops(nexthop1, nexthop2)) return false; + } - /* - * We have to check the active flag of each individual one, - * not just the overall active_num. This solves the special case - * issue of a route with a nexthop group with one nexthop - * resolving to itself and thus marking it inactive. If we - * have two different routes each wanting to mark a different - * nexthop inactive, they need to hash to two different groups. - * - * If we just hashed on num_active, they would hash the same - * which is incorrect. - * - * ex) - * 1.1.1.0/24 - * -> 1.1.1.1 dummy1 (inactive) - * -> 1.1.2.1 dummy2 - * - * 1.1.2.0/24 - * -> 1.1.1.1 dummy1 - * -> 1.1.2.1 dummy2 (inactive) - * - * Without checking each individual one, they would hash to - * the same group and both have 1.1.1.1 dummy1 marked inactive. - * - */ - if (CHECK_FLAG(nexthop1->flags, NEXTHOP_FLAG_ACTIVE) - != CHECK_FLAG(nexthop2->flags, NEXTHOP_FLAG_ACTIVE)) - return false; + /* Check for unequal list lengths */ + if (nexthop1 || nexthop2) + return false; + + /* If there's no backup info, comparison is done. */ + if ((nhe1->backup_info == NULL) && (nhe2->backup_info == NULL)) + return true; + + /* Compare backup info also - test the easy things first */ + if (nhe1->backup_info && (nhe2->backup_info == NULL)) + return false; + if (nhe2->backup_info && (nhe1->backup_info == NULL)) + return false; + + /* Compare number of backups before actually comparing any */ + for (nexthop1 = nhe1->backup_info->nhe->nhg.nexthop, + nexthop2 = nhe2->backup_info->nhe->nhg.nexthop; + nexthop1 && nexthop2; + nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) { + ; + } + + /* Did we find the end of one list before the other? */ + if (nexthop1 || nexthop2) + return false; + + /* Have to compare the backup nexthops */ + for (nexthop1 = nhe1->backup_info->nhe->nhg.nexthop, + nexthop2 = nhe2->backup_info->nhe->nhg.nexthop; + nexthop1 && nexthop2; + nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) { - if (!nexthop_same(nexthop1, nexthop2)) + if (!nhg_compare_nexthops(nexthop1, nexthop2)) return false; } @@ -512,29 +612,185 @@ static void handle_recursive_depend(struct nhg_connected_tree_head *nhg_depends, resolved_ng.nexthop = nh; + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: head %p, nh %pNHv", + __func__, nhg_depends, nh); + depend = zebra_nhg_rib_find(0, &resolved_ng, afi); + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nh %pNHv => %p (%u)", + __func__, nh, depend, + depend ? depend->id : 0); + if (depend) depends_add(nhg_depends, depend); } +/* + * Lookup an nhe in the global hash, using data from another nhe. If 'lookup' + * has an id value, that's used. Create a new global/shared nhe if not found. + */ +static bool zebra_nhe_find(struct nhg_hash_entry **nhe, /* return value */ + struct nhg_hash_entry *lookup, + struct nhg_connected_tree_head *nhg_depends, + afi_t afi) +{ + bool created = false; + bool recursive = false; + struct nhg_hash_entry *newnhe, *backup_nhe; + struct nexthop *nh = NULL; + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: id %u, lookup %p, vrf %d, type %d, depends %p", + __func__, lookup->id, lookup, + lookup->vrf_id, lookup->type, + nhg_depends); + + if (lookup->id) + (*nhe) = zebra_nhg_lookup_id(lookup->id); + else + (*nhe) = hash_lookup(zrouter.nhgs, lookup); + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: lookup => %p (%u)", + __func__, (*nhe), + (*nhe) ? (*nhe)->id : 0); + + /* If we found an existing object, we're done */ + if (*nhe) + goto done; + + /* We're going to create/insert a new nhe: + * assign the next global id value if necessary. + */ + if (lookup->id == 0) + lookup->id = ++id_counter; + newnhe = hash_get(zrouter.nhgs, lookup, zebra_nhg_hash_alloc); + created = true; + + /* Mail back the new object */ + *nhe = newnhe; + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: => created %p (%u)", __func__, newnhe, + newnhe->id); + + /* Only hash/lookup the depends if the first lookup + * fails to find something. This should hopefully save a + * lot of cycles for larger ecmp sizes. + */ + if (nhg_depends) { + /* If you don't want to hash on each nexthop in the + * nexthop group struct you can pass the depends + * directly. Kernel-side we do this since it just looks + * them up via IDs. + */ + zebra_nhg_connect_depends(newnhe, nhg_depends); + goto done; + } + + /* Prepare dependency relationships if this is not a + * singleton nexthop. There are two cases: a single + * recursive nexthop, where we need a relationship to the + * resolving nexthop; or a group of nexthops, where we need + * relationships with the corresponding singletons. + */ + zebra_nhg_depends_init(lookup); + + nh = newnhe->nhg.nexthop; + + if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)) + SET_FLAG(newnhe->flags, NEXTHOP_GROUP_VALID); + + if (nh->next == NULL) { + if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) { + /* Single recursive nexthop */ + handle_recursive_depend(&newnhe->nhg_depends, + nh->resolved, afi); + recursive = true; + } + } else { + /* List of nexthops */ + for (nh = newnhe->nhg.nexthop; nh; nh = nh->next) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: depends NH %pNHv %s", + __func__, nh, + CHECK_FLAG(nh->flags, + NEXTHOP_FLAG_RECURSIVE) ? + "(R)" : ""); + + depends_find_add(&newnhe->nhg_depends, nh, afi); + } + } + + if (recursive) + SET_FLAG((*nhe)->flags, NEXTHOP_GROUP_RECURSIVE); + + if (zebra_nhg_get_backup_nhg(newnhe) == NULL || + zebra_nhg_get_backup_nhg(newnhe)->nexthop == NULL) + goto done; + + /* If there are backup nexthops, add them to the backup + * depends tree. The rules here are a little different. + */ + recursive = false; + backup_nhe = newnhe->backup_info->nhe; + + nh = backup_nhe->nhg.nexthop; + + /* Singleton recursive NH */ + if (nh->next == NULL && + CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: backup depend NH %pNHv (R)", + __func__, nh); + + /* Single recursive nexthop */ + handle_recursive_depend(&backup_nhe->nhg_depends, + nh->resolved, afi); + recursive = true; + } else { + /* One or more backup NHs */ + for (; nh; nh = nh->next) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: backup depend NH %pNHv %s", + __func__, nh, + CHECK_FLAG(nh->flags, + NEXTHOP_FLAG_RECURSIVE) ? + "(R)" : ""); + + depends_find_add(&backup_nhe->nhg_depends, + nh, afi); + } + } + + if (recursive) + SET_FLAG(backup_nhe->flags, NEXTHOP_GROUP_RECURSIVE); + +done: + + return created; +} + +/* + * Lookup or create an nhe, based on an nhg or an nhe id. + */ static bool zebra_nhg_find(struct nhg_hash_entry **nhe, uint32_t id, struct nexthop_group *nhg, struct nhg_connected_tree_head *nhg_depends, vrf_id_t vrf_id, afi_t afi, int type) { struct nhg_hash_entry lookup = {}; - - uint32_t old_id_counter = id_counter; - bool created = false; - bool recursive = false; - /* - * If it has an id at this point, we must have gotten it from the kernel - */ - lookup.id = id ? id : ++id_counter; + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: id %u, nhg %p, vrf %d, type %d, depends %p", + __func__, id, nhg, vrf_id, type, + nhg_depends); + /* Use a temporary nhe and call into the superset/common code */ + lookup.id = id; lookup.type = type ? type : ZEBRA_ROUTE_NHG; lookup.nhg = *nhg; @@ -567,53 +823,8 @@ static bool zebra_nhg_find(struct nhg_hash_entry **nhe, uint32_t id, } } - if (id) - (*nhe) = zebra_nhg_lookup_id(id); - else - (*nhe) = hash_lookup(zrouter.nhgs, &lookup); - - /* If it found an nhe in our tables, this new ID is unused */ - if (*nhe) - id_counter = old_id_counter; + created = zebra_nhe_find(nhe, &lookup, nhg_depends, afi); - if (!(*nhe)) { - /* Only hash/lookup the depends if the first lookup - * fails to find something. This should hopefully save a - * lot of cycles for larger ecmp sizes. - */ - if (nhg_depends) - /* If you don't want to hash on each nexthop in the - * nexthop group struct you can pass the depends - * directly. Kernel-side we do this since it just looks - * them up via IDs. - */ - lookup.nhg_depends = *nhg_depends; - else { - if (nhg->nexthop->next) { - zebra_nhg_depends_init(&lookup); - - /* If its a group, create a dependency tree */ - struct nexthop *nh = NULL; - - for (nh = nhg->nexthop; nh; nh = nh->next) - depends_find_add(&lookup.nhg_depends, - nh, afi); - } else if (CHECK_FLAG(nhg->nexthop->flags, - NEXTHOP_FLAG_RECURSIVE)) { - zebra_nhg_depends_init(&lookup); - handle_recursive_depend(&lookup.nhg_depends, - nhg->nexthop->resolved, - afi); - recursive = true; - } - } - - (*nhe) = hash_get(zrouter.nhgs, &lookup, zebra_nhg_hash_alloc); - created = true; - - if (recursive) - SET_FLAG((*nhe)->flags, NEXTHOP_GROUP_RECURSIVE); - } return created; } @@ -629,6 +840,10 @@ zebra_nhg_find_nexthop(uint32_t id, struct nexthop *nh, afi_t afi, int type) zebra_nhg_find(&nhe, id, &nhg, NULL, vrf_id, afi, type); + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nh %pNHv => %p (%u)", + __func__, nh, nhe, nhe ? nhe->id : 0); + return nhe; } @@ -807,6 +1022,9 @@ done: static void zebra_nhg_release(struct nhg_hash_entry *nhe) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nhe %p (%u)", __func__, nhe, nhe->id); + /* Remove it from any lists it may be on */ zebra_nhg_depends_release(nhe); zebra_nhg_dependents_release(nhe); @@ -872,6 +1090,10 @@ static int nhg_ctx_process_new(struct nhg_ctx *ctx) lookup = zebra_nhg_lookup_id(id); + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: id %u, count %d, lookup => %p", + __func__, id, count, lookup); + if (lookup) { /* This is already present in our table, hence an update * that we did not initate. @@ -919,6 +1141,11 @@ static int nhg_ctx_process_new(struct nhg_ctx *ctx) */ kernel_nhe = zebra_nhg_copy(nhe, id); + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: copying kernel nhe (%u), dup of %u", + __func__, id, nhe->id); + zebra_nhg_insert_id(kernel_nhe); zebra_nhg_set_unhashable(kernel_nhe); } else if (zebra_nhg_contains_unhashable(nhe)) { @@ -926,10 +1153,18 @@ static int nhg_ctx_process_new(struct nhg_ctx *ctx) * depend, so lets mark this group as unhashable as well * and release it from the non-ID hash. */ + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nhe %p (%u) unhashable", + __func__, nhe, nhe->id); + hash_release(zrouter.nhgs, nhe); zebra_nhg_set_unhashable(nhe); } else { /* It actually created a new nhe */ + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nhe %p (%u) is new", + __func__, nhe, nhe->id); + SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID); SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED); } @@ -1038,6 +1273,10 @@ int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp, { struct nhg_ctx *ctx = NULL; + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nh %pNHv, id %u, count %d", + __func__, nh, id, (int)count); + if (id > id_counter) /* Increase our counter so we don't try to create * an ID that already exists @@ -1111,12 +1350,17 @@ static struct nhg_hash_entry *depends_find_singleton(const struct nexthop *nh, /* The copy may have allocated labels; free them if necessary. */ nexthop_del_labels(&lookup); + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nh %pNHv => %p (%u)", + __func__, nh, nhe, nhe ? nhe->id : 0); + return nhe; } static struct nhg_hash_entry *depends_find(const struct nexthop *nh, afi_t afi) { struct nhg_hash_entry *nhe = NULL; + char rbuf[10]; if (!nh) goto done; @@ -1124,10 +1368,18 @@ static struct nhg_hash_entry *depends_find(const struct nexthop *nh, afi_t afi) /* We are separating these functions out to increase handling speed * in the non-recursive case (by not alloc/freeing) */ - if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) + if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) { nhe = depends_find_recursive(nh, afi); - else + strlcpy(rbuf, "(R)", sizeof(rbuf)); + } else { nhe = depends_find_singleton(nh, afi); + rbuf[0] = '\0'; + } + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nh %pNHv %s => %p (%u)", + __func__, nh, rbuf, + nhe, nhe ? nhe->id : 0); done: return nhe; @@ -1136,6 +1388,10 @@ done: static void depends_add(struct nhg_connected_tree_head *head, struct nhg_hash_entry *depend) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: head %p nh %pNHv", + __func__, head, depend->nhg.nexthop); + /* If NULL is returned, it was successfully added and * needs to have its refcnt incremented. * @@ -1154,6 +1410,10 @@ depends_find_add(struct nhg_connected_tree_head *head, struct nexthop *nh, depend = depends_find(nh, afi); + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nh %pNHv => %p", + __func__, nh, depend); + if (depend) depends_add(head, depend); @@ -1179,7 +1439,7 @@ static void depends_decrement_free(struct nhg_connected_tree_head *head) nhg_connected_tree_free(head); } -/* Rib-side, you get a nexthop group struct */ +/* Find an nhe based on a list of nexthops */ struct nhg_hash_entry * zebra_nhg_rib_find(uint32_t id, struct nexthop_group *nhg, afi_t rt_afi) { @@ -1195,13 +1455,105 @@ zebra_nhg_rib_find(uint32_t id, struct nexthop_group *nhg, afi_t rt_afi) zebra_nhg_find(&nhe, id, nhg, NULL, vrf_id, rt_afi, 0); + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: => nhe %p (%u)", + __func__, nhe, nhe ? nhe->id : 0); + + return nhe; +} + +/* Find an nhe based on a route's nhe */ +struct nhg_hash_entry * +zebra_nhg_rib_find_nhe(struct nhg_hash_entry *rt_nhe, afi_t rt_afi) +{ + struct nhg_hash_entry *nhe = NULL; + + if (!(rt_nhe && rt_nhe->nhg.nexthop)) { + flog_err(EC_ZEBRA_TABLE_LOOKUP_FAILED, + "No nexthop passed to %s", __func__); + return NULL; + } + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: rt_nhe %p (%u)", __func__, rt_nhe, rt_nhe->id); + + zebra_nhe_find(&nhe, rt_nhe, NULL, rt_afi); + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: => nhe %p (%u)", + __func__, nhe, nhe ? nhe->id : 0); + return nhe; } +/* + * Allocate backup nexthop info object. Typically these are embedded in + * nhg_hash_entry objects. + */ +struct nhg_backup_info *zebra_nhg_backup_alloc(void) +{ + struct nhg_backup_info *p; + + p = XCALLOC(MTYPE_NHG, sizeof(struct nhg_backup_info)); + + p->nhe = zebra_nhg_alloc(); + + /* Identify the embedded group used to hold the list of backups */ + SET_FLAG(p->nhe->flags, NEXTHOP_GROUP_BACKUP); + + return p; +} + +/* + * Free backup nexthop info object, deal with any embedded allocations + */ +void zebra_nhg_backup_free(struct nhg_backup_info **p) +{ + if (p && *p) { + if ((*p)->nhe) + zebra_nhg_free((*p)->nhe); + + XFREE(MTYPE_NHG, (*p)); + } +} + +/* Accessor for backup nexthop group */ +struct nexthop_group *zebra_nhg_get_backup_nhg(struct nhg_hash_entry *nhe) +{ + struct nexthop_group *p = NULL; + + if (nhe) { + if (nhe->backup_info && nhe->backup_info->nhe) + p = &(nhe->backup_info->nhe->nhg); + } + + return p; +} + +/* + * Helper to return a copy of a backup_info - note that this is a shallow + * copy, meant to be used when creating a new nhe from info passed in with + * a route e.g. + */ +static struct nhg_backup_info * +nhg_backup_copy(const struct nhg_backup_info *orig) +{ + struct nhg_backup_info *b; + + b = zebra_nhg_backup_alloc(); + + /* Copy list of nexthops */ + nexthop_group_copy(&(b->nhe->nhg), &(orig->nhe->nhg)); + + return b; +} + static void zebra_nhg_free_members(struct nhg_hash_entry *nhe) { nexthops_free(nhe->nhg.nexthop); + zebra_nhg_backup_free(&nhe->backup_info); + /* Decrement to remove connection ref */ nhg_connected_tree_decrement_ref(&nhe->nhg_depends); nhg_connected_tree_free(&nhe->nhg_depends); @@ -1210,6 +1562,17 @@ static void zebra_nhg_free_members(struct nhg_hash_entry *nhe) void zebra_nhg_free(struct nhg_hash_entry *nhe) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) { + /* Group or singleton? */ + if (nhe->nhg.nexthop && nhe->nhg.nexthop->next) + zlog_debug("%s: nhe %p (%u), refcnt %d", + __func__, nhe, nhe->id, nhe->refcnt); + else + zlog_debug("%s: nhe %p (%u), refcnt %d, NH %pNHv", + __func__, nhe, nhe->id, nhe->refcnt, + nhe->nhg.nexthop); + } + if (nhe->refcnt) zlog_debug("nhe_id=%u hash refcnt=%d", nhe->id, nhe->refcnt); @@ -1225,6 +1588,11 @@ void zebra_nhg_hash_free(void *p) void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nhe %p (%u) %d => %d", + __func__, nhe, nhe->id, nhe->refcnt, + nhe->refcnt - 1); + nhe->refcnt--; if (!zebra_nhg_depends_is_empty(nhe)) @@ -1236,6 +1604,11 @@ void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe) void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nhe %p (%u) %d => %d", + __func__, nhe, nhe->id, nhe->refcnt, + nhe->refcnt + 1); + nhe->refcnt++; if (!zebra_nhg_depends_is_empty(nhe)) @@ -1385,6 +1758,10 @@ static int nexthop_active(afi_t afi, struct route_entry *re, nexthop->resolved = NULL; re->nexthop_mtu = 0; + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: re %p, nexthop %pNHv", + __func__, re, nexthop); + /* * If the kernel has sent us a NEW route, then * by golly gee whiz it's a good route. @@ -1533,6 +1910,12 @@ static int nexthop_active(afi_t afi, struct route_entry *re, || nexthop->type == NEXTHOP_TYPE_IPV6) nexthop->ifindex = newhop->ifindex; } + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: CONNECT match %p (%u), newhop %pNHv", + __func__, match, + match->nhe->id, newhop); + return 1; } else if (CHECK_FLAG(re->flags, ZEBRA_FLAG_ALLOW_RECURSION)) { resolved = 0; @@ -1543,18 +1926,24 @@ static int nexthop_active(afi_t afi, struct route_entry *re, if (!nexthop_valid_resolve(nexthop, newhop)) continue; + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: RECURSIVE match %p (%u), newhop %pNHv", + __func__, match, + match->nhe->id, newhop); + SET_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE); nexthop_set_resolved(afi, newhop, nexthop); resolved = 1; } + if (resolved) re->nexthop_mtu = match->mtu; - - if (!resolved && IS_ZEBRA_DEBUG_RIB_DETAILED) + else if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug( " %s: Recursion failed to find", __func__); + return resolved; } else if (re->type == ZEBRA_ROUTE_STATIC) { resolved = 0; @@ -1565,6 +1954,11 @@ static int nexthop_active(afi_t afi, struct route_entry *re, if (!nexthop_valid_resolve(nexthop, newhop)) continue; + if (IS_ZEBRA_DEBUG_RIB_DETAILED) + zlog_debug("%s: STATIC match %p (%u), newhop %pNHv", + __func__, match, + match->nhe->id, newhop); + SET_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE); nexthop_set_resolved(afi, newhop, nexthop); @@ -1683,11 +2077,11 @@ static unsigned nexthop_active_check(struct route_node *rn, default: break; } + if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) { if (IS_ZEBRA_DEBUG_RIB_DETAILED) - zlog_debug( - " %s: Unable to find a active nexthop", - __func__); + zlog_debug(" %s: Unable to find active nexthop", + __func__); return 0; } @@ -1768,45 +2162,37 @@ done: } /* - * Iterate over all nexthops of the given RIB entry and refresh their - * ACTIVE flag. If any nexthop is found to toggle the ACTIVE flag, - * the whole re structure is flagged with ROUTE_ENTRY_CHANGED. - * - * Return value is the new number of active nexthops. + * Process a list of nexthops, given the head of the list, determining + * whether each one is ACTIVE/installable at this time. */ -int nexthop_active_update(struct route_node *rn, struct route_entry *re) +static uint32_t nexthop_list_active_update(struct route_node *rn, + struct route_entry *re, + struct nexthop *nexthop) { - struct nexthop_group new_grp = {}; - struct nexthop *nexthop; union g_addr prev_src; unsigned int prev_active, new_active; ifindex_t prev_index; - uint8_t curr_active = 0; + uint32_t counter = 0; - afi_t rt_afi = family2afi(rn->p.family); - - UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED); - - /* Copy over the nexthops in current state */ - nexthop_group_copy(&new_grp, &(re->nhe->nhg)); - - for (nexthop = new_grp.nexthop; nexthop; nexthop = nexthop->next) { + /* Process nexthops one-by-one */ + for ( ; nexthop; nexthop = nexthop->next) { /* No protocol daemon provides src and so we're skipping - * tracking it */ + * tracking it + */ prev_src = nexthop->rmap_src; prev_active = CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); prev_index = nexthop->ifindex; /* * We need to respect the multipath_num here * as that what we should be able to install from - * a multipath perpsective should not be a data plane + * a multipath perspective should not be a data plane * decision point. */ new_active = nexthop_active_check(rn, re, nexthop); - if (new_active && curr_active >= zrouter.multipath_num) { + if (new_active && counter >= zrouter.multipath_num) { struct nexthop *nh; /* Set it and its resolved nexthop as inactive. */ @@ -1817,7 +2203,7 @@ int nexthop_active_update(struct route_node *rn, struct route_entry *re) } if (new_active) - curr_active++; + counter++; /* Don't allow src setting on IPv6 addr for now */ if (prev_active != new_active || prev_index != nexthop->ifindex @@ -1833,14 +2219,79 @@ int nexthop_active_update(struct route_node *rn, struct route_entry *re) SET_FLAG(re->status, ROUTE_ENTRY_CHANGED); } + return counter; +} + +/* + * Iterate over all nexthops of the given RIB entry and refresh their + * ACTIVE flag. If any nexthop is found to toggle the ACTIVE flag, + * the whole re structure is flagged with ROUTE_ENTRY_CHANGED. + * + * Return value is the new number of active nexthops. + */ +int nexthop_active_update(struct route_node *rn, struct route_entry *re) +{ + struct nhg_hash_entry *curr_nhe; + uint32_t curr_active = 0, backup_active = 0; + + afi_t rt_afi = family2afi(rn->p.family); + + UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED); + + /* Make a local copy of the existing nhe, so we don't work on/modify + * the shared nhe. + */ + curr_nhe = zebra_nhg_copy(re->nhe, re->nhe->id); + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: re %p nhe %p (%u), curr_nhe %p", + __func__, re, re->nhe, re->nhe->id, + curr_nhe); + + /* Clear the existing id, if any: this will avoid any confusion + * if the id exists, and will also force the creation + * of a new nhe reflecting the changes we may make in this local copy. + */ + curr_nhe->id = 0; + + /* Process nexthops */ + curr_active = nexthop_list_active_update(rn, re, curr_nhe->nhg.nexthop); + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: re %p curr_active %u", __func__, re, + curr_active); + + /* If there are no backup nexthops, we are done */ + if (zebra_nhg_get_backup_nhg(curr_nhe) == NULL) + goto backups_done; + + backup_active = nexthop_list_active_update( + rn, re, zebra_nhg_get_backup_nhg(curr_nhe)->nexthop); + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: re %p backup_active %u", __func__, re, + backup_active); + +backups_done: + + /* + * Ref or create an nhe that matches the current state of the + * nexthop(s). + */ if (CHECK_FLAG(re->status, ROUTE_ENTRY_CHANGED)) { struct nhg_hash_entry *new_nhe = NULL; - new_nhe = zebra_nhg_rib_find(0, &new_grp, rt_afi); + new_nhe = zebra_nhg_rib_find_nhe(curr_nhe, rt_afi); + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: re %p CHANGED: nhe %p (%u) => new_nhe %p (%u)", + __func__, re, re->nhe, + re->nhe->id, new_nhe, new_nhe->id); route_entry_update_nhe(re, new_nhe); } + /* Walk the NHE depends tree and toggle NEXTHOP_GROUP_VALID * flag where appropriate. */ @@ -1848,11 +2299,11 @@ int nexthop_active_update(struct route_node *rn, struct route_entry *re) zebra_nhg_set_valid_if_active(re->nhe); /* - * Do not need these nexthops anymore since they - * were either copied over into an nhe or not + * Do not need the old / copied nhe anymore since it + * was either copied over into a new nhe or not * used at all. */ - nexthops_free(new_grp.nexthop); + zebra_nhg_free(curr_nhe); return curr_active; } @@ -1950,6 +2401,16 @@ static uint8_t zebra_nhg_nhe2grp_internal(struct nh_grp *grp, } } + if (nhe->backup_info == NULL || nhe->backup_info->nhe == NULL) + goto done; + + /* TODO -- For now, we are not trying to use or install any + * backup info in this nexthop-id path: we aren't prepared + * to use the backups here yet. We're just debugging what we find. + */ + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: skipping backup nhe", __func__); + done: return i; } @@ -2036,7 +2497,7 @@ void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx) id = dplane_ctx_get_nhe_id(ctx); - if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) + if (IS_ZEBRA_DEBUG_DPLANE_DETAIL || IS_ZEBRA_DEBUG_NHG_DETAIL) zlog_debug( "Nexthop dplane ctx %p, op %s, nexthop ID (%u), result %s", ctx, dplane_op2str(op), id, dplane_res2str(status)); diff --git a/zebra/zebra_nhg.h b/zebra/zebra_nhg.h index dc3a47c020..0a9e97ab48 100644 --- a/zebra/zebra_nhg.h +++ b/zebra/zebra_nhg.h @@ -50,6 +50,9 @@ struct nhg_hash_entry { struct nexthop_group nhg; + /* If supported, a mapping of backup nexthops. */ + struct nhg_backup_info *backup_info; + /* If this is not a group, it * will be a single nexthop * and must have an interface @@ -72,6 +75,7 @@ struct nhg_hash_entry { * faster with ID's. */ struct nhg_connected_tree_head nhg_depends, nhg_dependents; + /* * Is this nexthop group valid, ie all nexthops are fully resolved. * What is fully resolved? It's a nexthop that is either self contained @@ -102,11 +106,25 @@ struct nhg_hash_entry { * from the kernel. Therefore, it is unhashable. */ #define NEXTHOP_GROUP_UNHASHABLE (1 << 4) + +/* + * Backup nexthop support - identify groups that are backups for + * another group. + */ +#define NEXTHOP_GROUP_BACKUP (1 << 5) + }; /* Was this one we created, either this session or previously? */ #define ZEBRA_NHG_CREATED(NHE) ((NHE->type) == ZEBRA_ROUTE_NHG) +/* + * Backup nexthops: this is a group object itself, so + * that the backup nexthops can use the same code as a normal object. + */ +struct nhg_backup_info { + struct nhg_hash_entry *nhe; +}; enum nhg_ctx_op_e { NHG_CTX_OP_NONE = 0, @@ -162,13 +180,26 @@ bool zebra_nhg_kernel_nexthops_enabled(void); /** * NHE abstracted tree functions. - * Use these where possible instead of the direct ones access ones. + * Use these where possible instead of direct access. */ struct nhg_hash_entry *zebra_nhg_alloc(void); void zebra_nhg_free(struct nhg_hash_entry *nhe); /* In order to clear a generic hash, we need a generic api, sigh. */ void zebra_nhg_hash_free(void *p); +/* Init an nhe, for use in a hash lookup for example. There's some fuzziness + * if the nhe represents only a single nexthop, so we try to capture that + * variant also. + */ +void zebra_nhe_init(struct nhg_hash_entry *nhe, afi_t afi, + const struct nexthop *nh); + +/* Allocate, free backup nexthop info objects */ +struct nhg_backup_info *zebra_nhg_backup_alloc(void); +void zebra_nhg_backup_free(struct nhg_backup_info **p); + +struct nexthop_group *zebra_nhg_get_backup_nhg(struct nhg_hash_entry *nhe); + extern struct nhg_hash_entry *zebra_nhg_resolve(struct nhg_hash_entry *nhe); extern unsigned int zebra_nhg_depends_count(const struct nhg_hash_entry *nhe); @@ -203,10 +234,14 @@ extern int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, /* Del via kernel */ extern int zebra_nhg_kernel_del(uint32_t id, vrf_id_t vrf_id); -/* Find via route creation */ +/* Find an nhe based on a nexthop_group */ extern struct nhg_hash_entry * zebra_nhg_rib_find(uint32_t id, struct nexthop_group *nhg, afi_t rt_afi); +/* Find an nhe based on a route's nhe, used during route creation */ +struct nhg_hash_entry * +zebra_nhg_rib_find_nhe(struct nhg_hash_entry *rt_nhe, afi_t rt_afi); + /* Reference counter functions */ extern void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe); extern void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe); diff --git a/zebra/zebra_ns.c b/zebra/zebra_ns.c index 3287176ef5..4e51437337 100644 --- a/zebra/zebra_ns.c +++ b/zebra/zebra_ns.c @@ -126,6 +126,7 @@ int zebra_ns_enable(ns_id_t ns_id, void **info) kernel_init(zns); interface_list(zns); route_read(zns); + kernel_read_pbr_rules(zns); /* Initiate Table Manager per ZNS */ table_manager_enable(ns_id); diff --git a/zebra/zebra_pbr.c b/zebra/zebra_pbr.c index 6728567e6e..c049aa14f6 100644 --- a/zebra/zebra_pbr.c +++ b/zebra/zebra_pbr.c @@ -431,32 +431,51 @@ static void *pbr_rule_alloc_intern(void *arg) return new; } +static int pbr_rule_release(struct zebra_pbr_rule *rule) +{ + struct zebra_pbr_rule *lookup; + + lookup = hash_lookup(zrouter.rules_hash, rule); + + if (!lookup) + return -ENOENT; + + hash_release(zrouter.rules_hash, lookup); + XFREE(MTYPE_TMP, lookup); + + return 0; +} + void zebra_pbr_add_rule(struct zebra_pbr_rule *rule) { - struct zebra_pbr_rule *unique = - pbr_rule_lookup_unique(rule); + struct zebra_pbr_rule *found; - (void)hash_get(zrouter.rules_hash, rule, pbr_rule_alloc_intern); - (void)kernel_add_pbr_rule(rule); - /* - * Rule Replace semantics, if we have an old, install the - * new rule, look above, and then delete the old + /** + * Check if we already have it (this checks via a unique ID, walking + * over the hash table, not via a hash operation). */ - if (unique) - zebra_pbr_del_rule(unique); + found = pbr_rule_lookup_unique(rule); + + (void)hash_get(zrouter.rules_hash, rule, pbr_rule_alloc_intern); + + /* If found, this is an update */ + if (found) { + (void)kernel_update_pbr_rule(found, rule); + + if (pbr_rule_release(found)) + zlog_debug( + "%s: Rule being updated we know nothing about", + __PRETTY_FUNCTION__); + + } else + (void)kernel_add_pbr_rule(rule); } void zebra_pbr_del_rule(struct zebra_pbr_rule *rule) { - struct zebra_pbr_rule *lookup; - - lookup = hash_lookup(zrouter.rules_hash, rule); (void)kernel_del_pbr_rule(rule); - if (lookup) { - hash_release(zrouter.rules_hash, lookup); - XFREE(MTYPE_TMP, lookup); - } else + if (pbr_rule_release(rule)) zlog_debug("%s: Rule being deleted we know nothing about", __func__); } diff --git a/zebra/zebra_pbr.h b/zebra/zebra_pbr.h index b7fbc9b7d5..83797b9521 100644 --- a/zebra/zebra_pbr.h +++ b/zebra/zebra_pbr.h @@ -183,6 +183,13 @@ extern enum zebra_dplane_result kernel_add_pbr_rule(struct zebra_pbr_rule *rule) extern enum zebra_dplane_result kernel_del_pbr_rule(struct zebra_pbr_rule *rule); /* + * Update specified rule for a specific interface. + */ +extern enum zebra_dplane_result +kernel_update_pbr_rule(struct zebra_pbr_rule *old_rule, + struct zebra_pbr_rule *new_rule); + +/* * Get to know existing PBR rules in the kernel - typically called at startup. */ extern void kernel_read_pbr_rules(struct zebra_ns *zns); diff --git a/zebra/zebra_pw.c b/zebra/zebra_pw.c index 610a052c31..7a14f6304f 100644 --- a/zebra/zebra_pw.c +++ b/zebra/zebra_pw.c @@ -547,13 +547,18 @@ static int zebra_pw_config(struct vty *vty) return write; } +static int zebra_pw_config(struct vty *vty); static struct cmd_node pw_node = { - PW_NODE, "%s(config-pw)# ", 1, + .name = "pw", + .node = PW_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-pw)# ", + .config_write = zebra_pw_config, }; void zebra_pw_vty_init(void) { - install_node(&pw_node, zebra_pw_config); + install_node(&pw_node); install_default(PW_NODE); install_element(CONFIG_NODE, &pseudowire_if_cmd); diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index dc54dee785..2dbe907751 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -213,7 +213,7 @@ static void route_entry_attach_ref(struct route_entry *re, int route_entry_update_nhe(struct route_entry *re, struct nhg_hash_entry *new) { - struct nhg_hash_entry *old = NULL; + struct nhg_hash_entry *old; int ret = 0; if (new == NULL) { @@ -223,7 +223,7 @@ int route_entry_update_nhe(struct route_entry *re, struct nhg_hash_entry *new) goto done; } - if (re->nhe_id != new->id) { + if ((re->nhe_id != 0) && (re->nhe_id != new->id)) { old = re->nhe; route_entry_attach_ref(re, new); @@ -261,7 +261,7 @@ struct route_entry *rib_match(afi_t afi, safi_t safi, vrf_id_t vrf_id, p.prefixlen = IPV6_MAX_PREFIXLEN; } - rn = route_node_match(table, (struct prefix *)&p); + rn = route_node_match(table, &p); while (rn) { rib_dest_t *dest; @@ -348,8 +348,8 @@ struct route_entry *rib_match_ipv4_multicast(vrf_id_t vrf_id, char buf[BUFSIZ]; inet_ntop(AF_INET, &addr, buf, BUFSIZ); - zlog_debug("%s: %s: vrf: %u found %s, using %s", - __func__, buf, vrf_id, + zlog_debug("%s: %s: vrf: %s(%u) found %s, using %s", __func__, + buf, vrf_id_to_name(vrf_id), vrf_id, mre ? (ure ? "MRIB+URIB" : "MRIB") : ure ? "URIB" : "nothing", re == ure ? "URIB" : re == mre ? "MRIB" : "none"); @@ -659,13 +659,14 @@ void zebra_rib_evaluate_rn_nexthops(struct route_node *rn, uint32_t seq) char buf1[PREFIX_STRLEN]; char buf2[PREFIX_STRLEN]; - zlog_debug("%u:%s has Nexthop(%s) Type: %s depending on it, evaluating %u:%u", - zvrf->vrf->vrf_id, - srcdest_rnode2str(rn, buf1, - sizeof(buf1)), - prefix2str(p, buf2, sizeof(buf2)), - rnh_type2str(rnh->type), - seq, rnh->seqno); + zlog_debug( + "%s(%u):%s has Nexthop(%s) Type: %s depending on it, evaluating %u:%u", + zvrf_name(zvrf), zvrf_id(zvrf), + srcdest_rnode2str(rn, buf1, + sizeof(buf1)), + prefix2str(p, buf2, sizeof(buf2)), + rnh_type2str(rnh->type), seq, + rnh->seqno); } /* @@ -753,8 +754,8 @@ static void rib_process_add_fib(struct zebra_vrf *zvrf, struct route_node *rn, if (IS_ZEBRA_DEBUG_RIB) { char buf[SRCDEST2STR_BUFFER]; srcdest_rnode2str(rn, buf, sizeof(buf)); - zlog_debug("%u:%s: Adding route rn %p, re %p (%s)", - zvrf_id(zvrf), buf, rn, new, + zlog_debug("%s(%u):%s: Adding route rn %p, re %p (%s)", + zvrf_name(zvrf), zvrf_id(zvrf), buf, rn, new, zebra_route_string(new->type)); } @@ -776,8 +777,8 @@ static void rib_process_del_fib(struct zebra_vrf *zvrf, struct route_node *rn, if (IS_ZEBRA_DEBUG_RIB) { char buf[SRCDEST2STR_BUFFER]; srcdest_rnode2str(rn, buf, sizeof(buf)); - zlog_debug("%u:%s: Deleting route rn %p, re %p (%s)", - zvrf_id(zvrf), buf, rn, old, + zlog_debug("%s(%u):%s: Deleting route rn %p, re %p (%s)", + zvrf_name(zvrf), zvrf_id(zvrf), buf, rn, old, zebra_route_string(old->type)); } @@ -829,15 +830,17 @@ static void rib_process_update_fib(struct zebra_vrf *zvrf, srcdest_rnode2str(rn, buf, sizeof(buf)); if (new != old) zlog_debug( - "%u:%s: Updating route rn %p, re %p (%s) old %p (%s)", - zvrf_id(zvrf), buf, rn, new, + "%s(%u):%s: Updating route rn %p, re %p (%s) old %p (%s)", + zvrf_name(zvrf), zvrf_id(zvrf), + buf, rn, new, zebra_route_string(new->type), old, zebra_route_string(old->type)); else zlog_debug( - "%u:%s: Updating route rn %p, re %p (%s)", - zvrf_id(zvrf), buf, rn, new, + "%s(%u):%s: Updating route rn %p, re %p (%s)", + zvrf_name(zvrf), zvrf_id(zvrf), + buf, rn, new, zebra_route_string(new->type)); } @@ -867,15 +870,17 @@ static void rib_process_update_fib(struct zebra_vrf *zvrf, srcdest_rnode2str(rn, buf, sizeof(buf)); if (new != old) zlog_debug( - "%u:%s: Deleting route rn %p, re %p (%s) old %p (%s) - nexthop inactive", - zvrf_id(zvrf), buf, rn, new, + "%s(%u):%s: Deleting route rn %p, re %p (%s) old %p (%s) - nexthop inactive", + zvrf_name(zvrf), zvrf_id(zvrf), + buf, rn, new, zebra_route_string(new->type), old, zebra_route_string(old->type)); else zlog_debug( - "%u:%s: Deleting route rn %p, re %p (%s) - nexthop inactive", - zvrf_id(zvrf), buf, rn, new, + "%s(%u):%s: Deleting route rn %p, re %p (%s) - nexthop inactive", + zvrf_name(zvrf), zvrf_id(zvrf), + buf, rn, new, zebra_route_string(new->type)); } @@ -990,6 +995,7 @@ static void rib_process(struct route_node *rn) char buf[SRCDEST2STR_BUFFER]; rib_dest_t *dest; struct zebra_vrf *zvrf = NULL; + struct vrf *vrf; const struct prefix *p, *src_p; srcdest_rnode_prefixes(rn, &p, &src_p); @@ -1003,11 +1009,14 @@ static void rib_process(struct route_node *rn) vrf_id = zvrf_id(zvrf); } + vrf = vrf_lookup_by_id(vrf_id); + if (IS_ZEBRA_DEBUG_RIB) srcdest_rnode2str(rn, buf, sizeof(buf)); if (IS_ZEBRA_DEBUG_RIB_DETAILED) - zlog_debug("%u:%s: Processing rn %p", vrf_id, buf, rn); + zlog_debug("%s(%u):%s: Processing rn %p", VRF_LOGNAME(vrf), + vrf_id, buf, rn); /* * we can have rn's that have a NULL info pointer @@ -1021,10 +1030,10 @@ static void rib_process(struct route_node *rn) RNODE_FOREACH_RE_SAFE (rn, re, next) { if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug( - "%u:%s: Examine re %p (%s) status %x flags %x dist %d metric %d", - vrf_id, buf, re, zebra_route_string(re->type), - re->status, re->flags, re->distance, - re->metric); + "%s(%u):%s: Examine re %p (%s) status %x flags %x dist %d metric %d", + VRF_LOGNAME(vrf), vrf_id, buf, re, + zebra_route_string(re->type), re->status, + re->flags, re->distance, re->metric); /* Currently selected re. */ if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED)) { @@ -1065,9 +1074,11 @@ static void rib_process(struct route_node *rn) if (re != old_selected) { if (IS_ZEBRA_DEBUG_RIB) zlog_debug( - "%s: %u:%s: imported via import-table but denied " + "%s: %s(%u):%s: imported via import-table but denied " "by the ip protocol table route-map", - __func__, vrf_id, buf); + __func__, + VRF_LOGNAME(vrf), + vrf_id, buf); rib_unlink(rn, re); } else SET_FLAG(re->status, @@ -1118,9 +1129,9 @@ static void rib_process(struct route_node *rn) if (IS_ZEBRA_DEBUG_RIB_DETAILED) { zlog_debug( - "%u:%s: After processing: old_selected %p new_selected %p old_fib %p new_fib %p", - vrf_id, buf, (void *)old_selected, (void *)new_selected, - (void *)old_fib, (void *)new_fib); + "%s(%u):%s: After processing: old_selected %p new_selected %p old_fib %p new_fib %p", + VRF_LOGNAME(vrf), vrf_id, buf, (void *)old_selected, + (void *)new_selected, (void *)old_fib, (void *)new_fib); } /* Buffer ROUTE_ENTRY_CHANGED here, because it will get cleared if @@ -1191,8 +1202,8 @@ static void zebra_rib_evaluate_mpls(struct route_node *rn) if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_LSPS)) { if (IS_ZEBRA_DEBUG_MPLS) zlog_debug( - "%u: Scheduling all LSPs upon RIB completion", - zvrf_id(zvrf)); + "%s(%u): Scheduling all LSPs upon RIB completion", + zvrf_name(zvrf), zvrf_id(zvrf)); zebra_mpls_lsp_schedule(zvrf); mpls_unmark_lsps_for_processing(rn); } @@ -1299,6 +1310,9 @@ static bool rib_update_re_from_ctx(struct route_entry *re, bool is_selected = false; /* Is 're' currently the selected re? */ bool changed_p = false; /* Change to nexthops? */ rib_dest_t *dest; + struct vrf *vrf; + + vrf = vrf_lookup_by_id(re->vrf_id); /* Note well: only capturing the prefix string if debug is enabled here; * unconditional log messages will have to generate the string. @@ -1311,8 +1325,9 @@ static bool rib_update_re_from_ctx(struct route_entry *re, is_selected = (re == dest->selected_fib); if (IS_ZEBRA_DEBUG_RIB_DETAILED) - zlog_debug("update_from_ctx: %u:%s: %sSELECTED", - re->vrf_id, dest_str, (is_selected ? "" : "NOT ")); + zlog_debug("update_from_ctx: %s(%u):%s: %sSELECTED", + VRF_LOGNAME(vrf), re->vrf_id, dest_str, + (is_selected ? "" : "NOT ")); /* Update zebra's nexthop FIB flag for each nexthop that was installed. * If the installed set differs from the set requested by the rib/owner, @@ -1325,11 +1340,10 @@ static bool rib_update_re_from_ctx(struct route_entry *re, * Let's assume the nexthops are ordered here to save time. */ if (nexthop_group_equal(&re->fib_ng, dplane_ctx_get_ng(ctx)) == false) { - if (IS_ZEBRA_DEBUG_RIB_DETAILED) { + if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug( - "%u:%s update_from_ctx: notif nh and fib nh mismatch", - re->vrf_id, dest_str); - } + "%s(%u):%s update_from_ctx: notif nh and fib nh mismatch", + VRF_LOGNAME(vrf), re->vrf_id, dest_str); matched = false; } else @@ -1338,8 +1352,9 @@ static bool rib_update_re_from_ctx(struct route_entry *re, /* If the new FIB set matches the existing FIB set, we're done. */ if (matched) { if (IS_ZEBRA_DEBUG_RIB) - zlog_debug("%u:%s update_from_ctx(): existing fib nhg, no change", - re->vrf_id, dest_str); + zlog_debug( + "%s(%u):%s update_from_ctx(): existing fib nhg, no change", + VRF_LOGNAME(vrf), re->vrf_id, dest_str); goto done; } else if (re->fib_ng.nexthop) { @@ -1347,8 +1362,9 @@ static bool rib_update_re_from_ctx(struct route_entry *re, * Free stale fib list and move on to check the rib nhg. */ if (IS_ZEBRA_DEBUG_RIB) - zlog_debug("%u:%s update_from_ctx(): replacing fib nhg", - re->vrf_id, dest_str); + zlog_debug( + "%s(%u):%s update_from_ctx(): replacing fib nhg", + VRF_LOGNAME(vrf), re->vrf_id, dest_str); nexthops_free(re->fib_ng.nexthop); re->fib_ng.nexthop = NULL; @@ -1356,8 +1372,8 @@ static bool rib_update_re_from_ctx(struct route_entry *re, changed_p = true; } else { if (IS_ZEBRA_DEBUG_RIB) - zlog_debug("%u:%s update_from_ctx(): no fib nhg", - re->vrf_id, dest_str); + zlog_debug("%s(%u):%s update_from_ctx(): no fib nhg", + VRF_LOGNAME(vrf), re->vrf_id, dest_str); } /* @@ -1437,9 +1453,10 @@ static bool rib_update_re_from_ctx(struct route_entry *re, /* If all nexthops were processed, we're done */ if (matched) { if (IS_ZEBRA_DEBUG_RIB) - zlog_debug("%u:%s update_from_ctx(): rib nhg matched, changed '%s'", - re->vrf_id, dest_str, - (changed_p ? "true" : "false")); + zlog_debug( + "%s(%u):%s update_from_ctx(): rib nhg matched, changed '%s'", + VRF_LOGNAME(vrf), re->vrf_id, dest_str, + (changed_p ? "true" : "false")); goto done; } @@ -1449,9 +1466,10 @@ no_nexthops: * create a fib-specific nexthop-group */ if (IS_ZEBRA_DEBUG_RIB) - zlog_debug("%u:%s update_from_ctx(): changed %s, adding new fib nhg", - re->vrf_id, dest_str, - (changed_p ? "true" : "false")); + zlog_debug( + "%s(%u):%s update_from_ctx(): changed %s, adding new fib nhg", + VRF_LOGNAME(vrf), re->vrf_id, dest_str, + (changed_p ? "true" : "false")); ctxnhg = dplane_ctx_get_ng(ctx); @@ -1489,10 +1507,12 @@ rib_find_rn_from_ctx(const struct zebra_dplane_ctx *ctx) dplane_ctx_get_vrf(ctx), dplane_ctx_get_table(ctx)); if (table == NULL) { if (IS_ZEBRA_DEBUG_DPLANE) { - zlog_debug("Failed to find route for ctx: no table for afi %d, safi %d, vrf %u", - dplane_ctx_get_afi(ctx), - dplane_ctx_get_safi(ctx), - dplane_ctx_get_vrf(ctx)); + zlog_debug( + "Failed to find route for ctx: no table for afi %d, safi %d, vrf %s(%u)", + dplane_ctx_get_afi(ctx), + dplane_ctx_get_safi(ctx), + vrf_id_to_name(dplane_ctx_get_vrf(ctx)), + dplane_ctx_get_vrf(ctx)); } goto done; } @@ -1515,6 +1535,7 @@ done: static void rib_process_result(struct zebra_dplane_ctx *ctx) { struct zebra_vrf *zvrf = NULL; + struct vrf *vrf; struct route_node *rn = NULL; struct route_entry *re = NULL, *old_re = NULL, *rib; bool is_update = false; @@ -1526,6 +1547,7 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx) bool fib_changed = false; zvrf = vrf_info_lookup(dplane_ctx_get_vrf(ctx)); + vrf = vrf_lookup_by_id(dplane_ctx_get_vrf(ctx)); dest_pfx = dplane_ctx_get_dest(ctx); /* Note well: only capturing the prefix string if debug is enabled here; @@ -1538,8 +1560,10 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx) rn = rib_find_rn_from_ctx(ctx); if (rn == NULL) { if (IS_ZEBRA_DEBUG_DPLANE) { - zlog_debug("Failed to process dplane results: no route for %u:%s", - dplane_ctx_get_vrf(ctx), dest_str); + zlog_debug( + "Failed to process dplane results: no route for %s(%u):%s", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + dest_str); } goto done; } @@ -1550,9 +1574,10 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx) status = dplane_ctx_get_status(ctx); if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) - zlog_debug("%u:%s Processing dplane ctx %p, op %s result %s", - dplane_ctx_get_vrf(ctx), dest_str, ctx, - dplane_op2str(op), dplane_res2str(status)); + zlog_debug( + "%s(%u):%s Processing dplane ctx %p, op %s result %s", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), dest_str, + ctx, dplane_op2str(op), dplane_res2str(status)); /* * Update is a bit of a special case, where we may have both old and new @@ -1590,9 +1615,10 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx) if (re) { if (re->dplane_sequence != seq) { if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) - zlog_debug("%u:%s Stale dplane result for re %p", - dplane_ctx_get_vrf(ctx), - dest_str, re); + zlog_debug( + "%s(%u):%s Stale dplane result for re %p", + VRF_LOGNAME(vrf), + dplane_ctx_get_vrf(ctx), dest_str, re); } else UNSET_FLAG(re->status, ROUTE_ENTRY_QUEUED); } @@ -1600,9 +1626,11 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx) if (old_re) { if (old_re->dplane_sequence != dplane_ctx_get_old_seq(ctx)) { if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) - zlog_debug("%u:%s Stale dplane result for old_re %p", - dplane_ctx_get_vrf(ctx), - dest_str, old_re); + zlog_debug( + "%s(%u):%s Stale dplane result for old_re %p", + VRF_LOGNAME(vrf), + dplane_ctx_get_vrf(ctx), dest_str, + old_re); } else UNSET_FLAG(old_re->status, ROUTE_ENTRY_QUEUED); } @@ -1639,10 +1667,11 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx) if (!fib_changed) { if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) - zlog_debug("%u:%s no fib change for re", - dplane_ctx_get_vrf( - ctx), - dest_str); + zlog_debug( + "%s(%u):%s no fib change for re", + VRF_LOGNAME(vrf), + dplane_ctx_get_vrf(ctx), + dest_str); } /* Redistribute */ @@ -1677,10 +1706,10 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx) zsend_route_notify_owner(re, dest_pfx, ZAPI_ROUTE_FAIL_INSTALL); - zlog_warn("%u:%s: Route install failed", - dplane_ctx_get_vrf(ctx), - prefix2str(dest_pfx, - dest_str, sizeof(dest_str))); + zlog_warn("%s(%u):%s: Route install failed", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + prefix2str(dest_pfx, dest_str, + sizeof(dest_str))); } break; case DPLANE_OP_ROUTE_DELETE: @@ -1706,10 +1735,10 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx) zsend_route_notify_owner_ctx(ctx, ZAPI_ROUTE_REMOVE_FAIL); - zlog_warn("%u:%s: Route Deletion failure", - dplane_ctx_get_vrf(ctx), - prefix2str(dest_pfx, - dest_str, sizeof(dest_str))); + zlog_warn("%s(%u):%s: Route Deletion failure", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + prefix2str(dest_pfx, dest_str, + sizeof(dest_str))); } /* @@ -1747,6 +1776,7 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) { struct route_node *rn = NULL; struct route_entry *re = NULL; + struct vrf *vrf; struct nexthop *nexthop; char dest_str[PREFIX_STRLEN] = ""; const struct prefix *dest_pfx, *src_pfx; @@ -1755,6 +1785,7 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) bool debug_p = IS_ZEBRA_DEBUG_DPLANE | IS_ZEBRA_DEBUG_RIB; int start_count, end_count; dest_pfx = dplane_ctx_get_dest(ctx); + vrf = vrf_lookup_by_id(dplane_ctx_get_vrf(ctx)); /* Note well: only capturing the prefix string if debug is enabled here; * unconditional log messages will have to generate the string. @@ -1766,8 +1797,10 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) rn = rib_find_rn_from_ctx(ctx); if (rn == NULL) { if (debug_p) { - zlog_debug("Failed to process dplane notification: no routes for %u:%s", - dplane_ctx_get_vrf(ctx), dest_str); + zlog_debug( + "Failed to process dplane notification: no routes for %s(%u):%s", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + dest_str); } goto done; } @@ -1776,8 +1809,9 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) srcdest_rnode_prefixes(rn, &dest_pfx, &src_pfx); if (debug_p) - zlog_debug("%u:%s Processing dplane notif ctx %p", - dplane_ctx_get_vrf(ctx), dest_str, ctx); + zlog_debug("%s(%u):%s Processing dplane notif ctx %p", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), dest_str, + ctx); /* * Take a pass through the routes, look for matches with the context @@ -1791,10 +1825,11 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) /* No match? Nothing we can do */ if (re == NULL) { if (debug_p) - zlog_debug("%u:%s Unable to process dplane notification: no entry for type %s", - dplane_ctx_get_vrf(ctx), dest_str, - zebra_route_string( - dplane_ctx_get_type(ctx))); + zlog_debug( + "%s(%u):%s Unable to process dplane notification: no entry for type %s", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + dest_str, + zebra_route_string(dplane_ctx_get_type(ctx))); goto done; } @@ -1824,17 +1859,21 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) if (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED)) UNSET_FLAG(re->status, ROUTE_ENTRY_INSTALLED); if (debug_p) - zlog_debug("%u:%s dplane notif, uninstalled type %s route", - dplane_ctx_get_vrf(ctx), dest_str, - zebra_route_string( - dplane_ctx_get_type(ctx))); + zlog_debug( + "%s(%u):%s dplane notif, uninstalled type %s route", + VRF_LOGNAME(vrf), + dplane_ctx_get_vrf(ctx), dest_str, + zebra_route_string( + dplane_ctx_get_type(ctx))); } else { /* At least report on the event. */ if (debug_p) - zlog_debug("%u:%s dplane notif, but type %s not selected_fib", - dplane_ctx_get_vrf(ctx), dest_str, - zebra_route_string( - dplane_ctx_get_type(ctx))); + zlog_debug( + "%s(%u):%s dplane notif, but type %s not selected_fib", + VRF_LOGNAME(vrf), + dplane_ctx_get_vrf(ctx), dest_str, + zebra_route_string( + dplane_ctx_get_type(ctx))); } goto done; } @@ -1859,8 +1898,10 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) if (!fib_changed) { if (debug_p) - zlog_debug("%u:%s dplane notification: rib_update returns FALSE", - dplane_ctx_get_vrf(ctx), dest_str); + zlog_debug( + "%s(%u):%s dplane notification: rib_update returns FALSE", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + dest_str); } /* @@ -1879,8 +1920,10 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) */ if (start_count > 0 && end_count > 0) { if (debug_p) - zlog_debug("%u:%s applied nexthop changes from dplane notification", - dplane_ctx_get_vrf(ctx), dest_str); + zlog_debug( + "%s(%u):%s applied nexthop changes from dplane notification", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + dest_str); /* Changed nexthops - update kernel/others */ dplane_route_notif_update(rn, re, @@ -1888,8 +1931,10 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) } else if (start_count == 0 && end_count > 0) { if (debug_p) - zlog_debug("%u:%s installed transition from dplane notification", - dplane_ctx_get_vrf(ctx), dest_str); + zlog_debug( + "%s(%u):%s installed transition from dplane notification", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + dest_str); /* We expect this to be the selected route, so we want * to tell others about this transition. @@ -1904,8 +1949,10 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) } else if (start_count > 0 && end_count == 0) { if (debug_p) - zlog_debug("%u:%s un-installed transition from dplane notification", - dplane_ctx_get_vrf(ctx), dest_str); + zlog_debug( + "%s(%u):%s un-installed transition from dplane notification", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + dest_str); /* Transition from _something_ installed to _nothing_ * installed. @@ -1970,8 +2017,8 @@ static void process_subq_route(struct listnode *lnode, uint8_t qindex) char buf[SRCDEST2STR_BUFFER]; srcdest_rnode2str(rnode, buf, sizeof(buf)); - zlog_debug("%u:%s: rn %p dequeued from sub-queue %u", - zvrf ? zvrf_id(zvrf) : 0, buf, rnode, qindex); + zlog_debug("%s(%u):%s: rn %p dequeued from sub-queue %u", + zvrf_name(zvrf), zvrf_id(zvrf), buf, rnode, qindex); } if (rnode->info) @@ -2338,7 +2385,6 @@ static void rib_addnode(struct route_node *rn, void rib_unlink(struct route_node *rn, struct route_entry *re) { rib_dest_t *dest; - struct nhg_hash_entry *nhe = NULL; assert(rn && re); @@ -2353,11 +2399,10 @@ void rib_unlink(struct route_node *rn, struct route_entry *re) if (dest->selected_fib == re) dest->selected_fib = NULL; - if (re->nhe_id) { - nhe = zebra_nhg_lookup_id(re->nhe_id); - if (nhe) - zebra_nhg_decrement_ref(nhe); - } else if (re->nhe->nhg.nexthop) + if (re->nhe && re->nhe_id) { + assert(re->nhe->id == re->nhe_id); + zebra_nhg_decrement_ref(re->nhe); + } else if (re->nhe && re->nhe->nhg.nexthop) nexthops_free(re->nhe->nhg.nexthop); nexthops_free(re->fib_ng.nexthop); @@ -2385,9 +2430,9 @@ void rib_delnode(struct route_node *rn, struct route_entry *re) if (IS_ZEBRA_DEBUG_RIB) { char buf[SRCDEST2STR_BUFFER]; srcdest_rnode2str(rn, buf, sizeof(buf)); - zlog_debug("%u:%s: Freeing route rn %p, re %p (%s)", - re->vrf_id, buf, rn, re, - zebra_route_string(re->type)); + zlog_debug("%s(%u):%s: Freeing route rn %p, re %p (%s)", + vrf_id_to_name(re->vrf_id), re->vrf_id, buf, + rn, re, zebra_route_string(re->type)); } rib_unlink(rn, re); @@ -2396,11 +2441,75 @@ void rib_delnode(struct route_node *rn, struct route_entry *re) } } +/* + * Helper that debugs a single nexthop within a route-entry + */ +static void _route_entry_dump_nh(const struct route_entry *re, + const char *straddr, + const struct nexthop *nexthop) +{ + char nhname[PREFIX_STRLEN]; + char backup_str[50]; + char wgt_str[50]; + struct interface *ifp; + struct vrf *vrf = vrf_lookup_by_id(nexthop->vrf_id); + + switch (nexthop->type) { + case NEXTHOP_TYPE_BLACKHOLE: + sprintf(nhname, "Blackhole"); + break; + case NEXTHOP_TYPE_IFINDEX: + ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id); + sprintf(nhname, "%s", ifp ? ifp->name : "Unknown"); + break; + case NEXTHOP_TYPE_IPV4: + /* fallthrough */ + case NEXTHOP_TYPE_IPV4_IFINDEX: + inet_ntop(AF_INET, &nexthop->gate, nhname, INET6_ADDRSTRLEN); + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + inet_ntop(AF_INET6, &nexthop->gate, nhname, INET6_ADDRSTRLEN); + break; + } + + backup_str[0] = '\0'; + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) { + snprintf(backup_str, sizeof(backup_str), "backup %d,", + (int)nexthop->backup_idx); + } + + wgt_str[0] = '\0'; + if (nexthop->weight) + snprintf(wgt_str, sizeof(wgt_str), "wgt %d,", nexthop->weight); + + zlog_debug("%s: %s %s[%u] vrf %s(%u) %s%s with flags %s%s%s%s%s", + straddr, (nexthop->rparent ? " NH" : "NH"), nhname, + nexthop->ifindex, vrf ? vrf->name : "Unknown", + nexthop->vrf_id, + wgt_str, backup_str, + (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE) + ? "ACTIVE " + : ""), + (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED) + ? "FIB " + : ""), + (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE) + ? "RECURSIVE " + : ""), + (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK) + ? "ONLINK " + : ""), + (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE) + ? "DUPLICATE " + : "")); + +} + /* This function dumps the contents of a given RE entry into * standard debug log. Calling function name and IP prefix in * question are passed as 1st and 2nd arguments. */ - void _route_entry_dump(const char *func, union prefixconstptr pp, union prefixconstptr src_pp, const struct route_entry *re) @@ -2409,9 +2518,9 @@ void _route_entry_dump(const char *func, union prefixconstptr pp, bool is_srcdst = src_p && src_p->prefixlen; char straddr[PREFIX_STRLEN]; char srcaddr[PREFIX_STRLEN]; - char nhname[PREFIX_STRLEN]; struct nexthop *nexthop; struct vrf *vrf = vrf_lookup_by_id(re->vrf_id); + struct nexthop_group *nhg; zlog_debug("%s: dumping RE entry %p for %s%s%s vrf %s(%u)", func, (const void *)re, prefix2str(pp, straddr, sizeof(straddr)), @@ -2422,78 +2531,48 @@ void _route_entry_dump(const char *func, union prefixconstptr pp, zlog_debug("%s: uptime == %lu, type == %u, instance == %d, table == %d", straddr, (unsigned long)re->uptime, re->type, re->instance, re->table); - zlog_debug( - "%s: metric == %u, mtu == %u, distance == %u, flags == %u, status == %u", - straddr, re->metric, re->mtu, re->distance, re->flags, re->status); + zlog_debug("%s: metric == %u, mtu == %u, distance == %u, flags == %u, status == %u", + straddr, re->metric, re->mtu, re->distance, re->flags, + re->status); zlog_debug("%s: nexthop_num == %u, nexthop_active_num == %u", straddr, nexthop_group_nexthop_num(&(re->nhe->nhg)), nexthop_group_active_nexthop_num(&(re->nhe->nhg))); - for (ALL_NEXTHOPS(re->nhe->nhg, nexthop)) { - struct interface *ifp; - struct vrf *vrf = vrf_lookup_by_id(nexthop->vrf_id); + /* Dump nexthops */ + for (ALL_NEXTHOPS(re->nhe->nhg, nexthop)) + _route_entry_dump_nh(re, straddr, nexthop); - switch (nexthop->type) { - case NEXTHOP_TYPE_BLACKHOLE: - sprintf(nhname, "Blackhole"); - break; - case NEXTHOP_TYPE_IFINDEX: - ifp = if_lookup_by_index(nexthop->ifindex, - nexthop->vrf_id); - sprintf(nhname, "%s", ifp ? ifp->name : "Unknown"); - break; - case NEXTHOP_TYPE_IPV4: - /* fallthrough */ - case NEXTHOP_TYPE_IPV4_IFINDEX: - inet_ntop(AF_INET, &nexthop->gate, nhname, - INET6_ADDRSTRLEN); - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - inet_ntop(AF_INET6, &nexthop->gate, nhname, - INET6_ADDRSTRLEN); - break; - } - zlog_debug("%s: %s %s[%u] vrf %s(%u) with flags %s%s%s%s%s", - straddr, (nexthop->rparent ? " NH" : "NH"), nhname, - nexthop->ifindex, vrf ? vrf->name : "Unknown", - nexthop->vrf_id, - (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE) - ? "ACTIVE " - : ""), - (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED) - ? "FIB " - : ""), - (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE) - ? "RECURSIVE " - : ""), - (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK) - ? "ONLINK " - : ""), - (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE) - ? "DUPLICATE " - : "")); + if (zebra_nhg_get_backup_nhg(re->nhe)) { + zlog_debug("%s: backup nexthops:", straddr); + + nhg = zebra_nhg_get_backup_nhg(re->nhe); + for (ALL_NEXTHOPS_PTR(nhg, nexthop)) + _route_entry_dump_nh(re, straddr, nexthop); } + zlog_debug("%s: dump complete", straddr); } -/* This is an exported helper to rtm_read() to dump the strange +/* + * This is an exported helper to rtm_read() to dump the strange * RE entry found by rib_lookup_ipv4_route() */ - void rib_lookup_and_dump(struct prefix_ipv4 *p, vrf_id_t vrf_id) { struct route_table *table; struct route_node *rn; struct route_entry *re; + struct vrf *vrf; char prefix_buf[INET_ADDRSTRLEN]; + vrf = vrf_lookup_by_id(vrf_id); + /* Lookup table. */ table = zebra_vrf_table(AFI_IP, SAFI_UNICAST, vrf_id); if (!table) { flog_err(EC_ZEBRA_TABLE_LOOKUP_FAILED, - "%s:%u zebra_vrf_table() returned NULL", __func__, - vrf_id); + "%s:%s(%u) zebra_vrf_table() returned NULL", __func__, + VRF_LOGNAME(vrf), vrf_id); return; } @@ -2502,7 +2581,8 @@ void rib_lookup_and_dump(struct prefix_ipv4 *p, vrf_id_t vrf_id) /* No route for this prefix. */ if (!rn) { - zlog_debug("%s:%u lookup failed for %s", __func__, vrf_id, + zlog_debug("%s:%s(%u) lookup failed for %s", __func__, + VRF_LOGNAME(vrf), vrf_id, prefix2str((struct prefix *)p, prefix_buf, sizeof(prefix_buf))); return; @@ -2513,9 +2593,8 @@ void rib_lookup_and_dump(struct prefix_ipv4 *p, vrf_id_t vrf_id) /* let's go */ RNODE_FOREACH_RE (rn, re) { - zlog_debug("%s:%u rn %p, re %p: %s, %s", - __func__, vrf_id, - (void *)rn, (void *)re, + zlog_debug("%s:%s(%u) rn %p, re %p: %s, %s", __func__, + VRF_LOGNAME(vrf), vrf_id, (void *)rn, (void *)re, (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED) ? "removed" : "NOT removed"), @@ -2538,9 +2617,11 @@ void rib_lookup_and_pushup(struct prefix_ipv4 *p, vrf_id_t vrf_id) rib_dest_t *dest; if (NULL == (table = zebra_vrf_table(AFI_IP, SAFI_UNICAST, vrf_id))) { + struct vrf *vrf = vrf_lookup_by_id(vrf_id); + flog_err(EC_ZEBRA_TABLE_LOOKUP_FAILED, - "%s:%u zebra_vrf_table() returned NULL", __func__, - vrf_id); + "%s:%s(%u) zebra_vrf_table() returned NULL", __func__, + VRF_LOGNAME(vrf), vrf_id); return; } @@ -2563,10 +2644,13 @@ void rib_lookup_and_pushup(struct prefix_ipv4 *p, vrf_id_t vrf_id) if (dest->selected_fib) { if (IS_ZEBRA_DEBUG_RIB) { char buf[PREFIX_STRLEN]; + struct vrf *vrf = + vrf_lookup_by_id(dest->selected_fib->vrf_id); - zlog_debug("%u:%s: freeing way for connected prefix", - dest->selected_fib->vrf_id, - prefix2str(&rn->p, buf, sizeof(buf))); + zlog_debug( + "%s(%u):%s: freeing way for connected prefix", + VRF_LOGNAME(vrf), dest->selected_fib->vrf_id, + prefix2str(&rn->p, buf, sizeof(buf))); route_entry_dump(&rn->p, NULL, dest->selected_fib); } rib_uninstall(rn, dest->selected_fib); @@ -2574,9 +2658,16 @@ void rib_lookup_and_pushup(struct prefix_ipv4 *p, vrf_id_t vrf_id) } } -int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, - struct prefix_ipv6 *src_p, struct route_entry *re, - struct nexthop_group *ng) +/* + * Internal route-add implementation; there are a couple of different public + * signatures. Callers in this path are responsible for the memory they + * allocate: if they allocate a nexthop_group or backup nexthop info, they + * must free those objects. If this returns < 0, an error has occurred and the + * route_entry 're' has not been captured; the caller should free that also. + */ +int rib_add_multipath_nhe(afi_t afi, safi_t safi, struct prefix *p, + struct prefix_ipv6 *src_p, struct route_entry *re, + struct nhg_hash_entry *re_nhe) { struct nhg_hash_entry *nhe = NULL; struct route_table *table; @@ -2584,41 +2675,31 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, struct route_entry *same = NULL; int ret = 0; - if (!re) - return 0; + if (!re || !re_nhe) + return -1; assert(!src_p || !src_p->prefixlen || afi == AFI_IP6); /* Lookup table. */ table = zebra_vrf_get_table_with_table_id(afi, safi, re->vrf_id, re->table); - if (!table) { - if (ng) - nexthop_group_delete(&ng); - XFREE(MTYPE_RE, re); - return 0; - } + if (!table) + return -1; - if (re->nhe_id) { - nhe = zebra_nhg_lookup_id(re->nhe_id); + if (re_nhe->id > 0) { + nhe = zebra_nhg_lookup_id(re_nhe->id); if (!nhe) { flog_err( EC_ZEBRA_TABLE_LOOKUP_FAILED, "Zebra failed to find the nexthop hash entry for id=%u in a route entry", - re->nhe_id); - XFREE(MTYPE_RE, re); + re_nhe->id); + return -1; } } else { - nhe = zebra_nhg_rib_find(0, ng, afi); - - /* - * The nexthops got copied over into an nhe, - * so free them now. - */ - nexthop_group_delete(&ng); - + /* Lookup nhe from route information */ + nhe = zebra_nhg_rib_find_nhe(re_nhe, afi); if (!nhe) { char buf[PREFIX_STRLEN] = ""; char buf2[PREFIX_STRLEN] = ""; @@ -2631,7 +2712,6 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, src_p ? prefix2str(src_p, buf2, sizeof(buf2)) : ""); - XFREE(MTYPE_RE, re); return -1; } } @@ -2709,15 +2789,51 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, ret = 1; /* Free implicit route.*/ - if (same) { + if (same) rib_delnode(rn, same); - ret = -1; - } route_unlock_node(rn); return ret; } +/* + * Add a single route. + */ +int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, + struct prefix_ipv6 *src_p, struct route_entry *re, + struct nexthop_group *ng) +{ + int ret; + struct nhg_hash_entry nhe; + + if (!re) + return -1; + + /* We either need nexthop(s) or an existing nexthop id */ + if (ng == NULL && re->nhe_id == 0) + return -1; + + /* + * Use a temporary nhe to convey info to the common/main api. + */ + zebra_nhe_init(&nhe, afi, (ng ? ng->nexthop : NULL)); + if (ng) + nhe.nhg.nexthop = ng->nexthop; + else if (re->nhe_id > 0) + nhe.id = re->nhe_id; + + ret = rib_add_multipath_nhe(afi, safi, p, src_p, re, &nhe); + + /* In this path, the callers expect memory to be freed. */ + nexthop_group_delete(&ng); + + /* In error cases, free the route also */ + if (ret < 0) + XFREE(MTYPE_RE, re); + + return ret; +} + void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type, unsigned short instance, int flags, struct prefix *p, struct prefix_ipv6 *src_p, const struct nexthop *nh, @@ -3030,8 +3146,7 @@ void rib_update_table(struct route_table *table, rib_update_event_t event) table->info ? afi2str( ((rib_table_info_t *)table->info)->afi) : "Unknown", - vrf ? vrf->name : "Unknown", - zvrf ? zvrf->table_id : 0, + VRF_LOGNAME(vrf), zvrf ? zvrf->table_id : 0, rib_update_event2str(event)); } @@ -3188,6 +3303,9 @@ void rib_sweep_table(struct route_table *table) if (!table) return; + if (IS_ZEBRA_DEBUG_RIB) + zlog_debug("%s: starting", __func__); + for (rn = route_top(table); rn; rn = srcdest_route_next(rn)) { RNODE_FOREACH_RE_SAFE (rn, re, next) { @@ -3234,6 +3352,9 @@ void rib_sweep_table(struct route_table *table) rib_delnode(rn, re); } } + + if (IS_ZEBRA_DEBUG_RIB) + zlog_debug("%s: ends", __func__); } /* Sweep all RIB tables. */ diff --git a/zebra/zebra_routemap.c b/zebra/zebra_routemap.c index 500c2c84a1..2b3b3afbb5 100644 --- a/zebra/zebra_routemap.c +++ b/zebra/zebra_routemap.c @@ -30,6 +30,8 @@ #include "filter.h" #include "plist.h" #include "nexthop.h" +#include "northbound_cli.h" +#include "route_types.h" #include "vrf.h" #include "frrstr.h" @@ -58,82 +60,6 @@ struct nh_rmap_obj { static void zebra_route_map_set_delay_timer(uint32_t value); - -/* Add zebra route map rule */ -static int zebra_route_match_add(struct vty *vty, const char *command, - const char *arg, route_map_event_t type) -{ - VTY_DECLVAR_CONTEXT(route_map_index, index); - enum rmap_compile_rets ret; - int retval = CMD_SUCCESS; - - ret = route_map_add_match(index, command, arg, type); - switch (ret) { - case RMAP_RULE_MISSING: - vty_out(vty, "%% Zebra Can't find rule.\n"); - retval = CMD_WARNING_CONFIG_FAILED; - break; - case RMAP_COMPILE_ERROR: - vty_out(vty, "%% Zebra Argument is malformed.\n"); - retval = CMD_WARNING_CONFIG_FAILED; - break; - case RMAP_COMPILE_SUCCESS: - /* - * Nothing to do here - */ - break; - } - - return retval; -} - -/* Delete zebra route map rule. */ -static int zebra_route_match_delete(struct vty *vty, const char *command, - const char *arg, route_map_event_t type) -{ - VTY_DECLVAR_CONTEXT(route_map_index, index); - enum rmap_compile_rets ret; - int retval = CMD_SUCCESS; - char *dep_name = NULL; - const char *tmpstr; - char *rmap_name = NULL; - - if (type != RMAP_EVENT_MATCH_DELETED) { - /* ignore the mundane, the types without any dependency */ - if (arg == NULL) { - if ((tmpstr = route_map_get_match_arg(index, command)) - != NULL) - dep_name = - XSTRDUP(MTYPE_ROUTE_MAP_RULE, tmpstr); - } else { - dep_name = XSTRDUP(MTYPE_ROUTE_MAP_RULE, arg); - } - rmap_name = XSTRDUP(MTYPE_ROUTE_MAP_NAME, index->map->name); - } - - ret = route_map_delete_match(index, command, arg, type); - switch (ret) { - case RMAP_RULE_MISSING: - vty_out(vty, "%% Zebra Can't find rule.\n"); - retval = CMD_WARNING_CONFIG_FAILED; - break; - case RMAP_COMPILE_ERROR: - vty_out(vty, "%% Zebra Argument is malformed.\n"); - retval = CMD_WARNING_CONFIG_FAILED; - break; - case RMAP_COMPILE_SUCCESS: - /* - * Nothing to do here - */ - break; - } - - XFREE(MTYPE_ROUTE_MAP_RULE, dep_name); - XFREE(MTYPE_ROUTE_MAP_NAME, rmap_name); - - return retval; -} - /* 'match tag TAG' * Match function return 1 if match is success else return 0 */ @@ -425,246 +351,227 @@ static int ip_nht_rm_del(struct zebra_vrf *zvrf, const char *rmap, int rtype, return CMD_SUCCESS; } -DEFUN (match_ip_address_prefix_len, - match_ip_address_prefix_len_cmd, - "match ip address prefix-len (0-32)", - MATCH_STR - IP_STR - "Match prefix length of ip address\n" - "Match prefix length of ip address\n" - "Prefix length\n") +DEFPY( + match_ip_address_prefix_len, match_ip_address_prefix_len_cmd, + "match ip address prefix-len (0-32)$length", + MATCH_STR + IP_STR + "Match prefix length of IP address\n" + "Match prefix length of IP address\n" + "Prefix length\n") { - return zebra_route_match_add(vty, "ip address prefix-len", argv[4]->arg, - RMAP_EVENT_MATCH_ADDED); + const char *xpath = "./match-condition[condition='ipv4-prefix-length']"; + char xpath_value[XPATH_MAXLEN]; + + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + snprintf(xpath_value, sizeof(xpath_value), + "%s/frr-zebra:ipv4-prefix-length", xpath); + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, length_str); + + return nb_cli_apply_changes(vty, NULL); } -DEFUN (no_match_ip_address_prefix_len, - no_match_ip_address_prefix_len_cmd, - "no match ip address prefix-len [(0-32)]", - NO_STR - MATCH_STR - IP_STR - "Match prefix length of ip address\n" - "Match prefix length of ip address\n" - "Prefix length\n") +DEFPY( + no_match_ip_address_prefix_len, no_match_ip_address_prefix_len_cmd, + "no match ip address prefix-len [(0-32)]", + NO_STR + MATCH_STR + IP_STR + "Match prefix length of IP address\n" + "Match prefix length of IP address\n" + "Prefix length\n") { - char *plen = (argc == 6) ? argv[5]->arg : NULL; - return zebra_route_match_delete(vty, "ip address prefix-len", plen, - RMAP_EVENT_MATCH_DELETED); + const char *xpath = "./match-condition[condition='ipv4-prefix-length']"; + + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); } -DEFUN (match_ipv6_address_prefix_len, - match_ipv6_address_prefix_len_cmd, - "match ipv6 address prefix-len (0-128)", - MATCH_STR - IPV6_STR - "Match prefix length of ipv6 address\n" - "Match prefix length of ipv6 address\n" - "Prefix length\n") +DEFPY( + match_ipv6_address_prefix_len, match_ipv6_address_prefix_len_cmd, + "match ipv6 address prefix-len (0-128)$length", + MATCH_STR + IPV6_STR + "Match prefix length of IPv6 address\n" + "Match prefix length of IPv6 address\n" + "Prefix length\n") { - return zebra_route_match_add(vty, "ipv6 address prefix-len", - argv[4]->arg, RMAP_EVENT_MATCH_ADDED); + const char *xpath = "./match-condition[condition='ipv6-prefix-length']"; + char xpath_value[XPATH_MAXLEN]; + + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + snprintf(xpath_value, sizeof(xpath_value), + "%s/frr-zebra:ipv6-prefix-length", xpath); + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, length_str); + + return nb_cli_apply_changes(vty, NULL); } -DEFUN (no_match_ipv6_address_prefix_len, - no_match_ipv6_address_prefix_len_cmd, - "no match ipv6 address prefix-len [(0-128)]", - NO_STR - MATCH_STR - IPV6_STR - "Match prefix length of ip address\n" - "Match prefix length of ip address\n" - "Prefix length\n") +DEFPY( + no_match_ipv6_address_prefix_len, no_match_ipv6_address_prefix_len_cmd, + "no match ipv6 address prefix-len [(0-128)]", + NO_STR + MATCH_STR + IPV6_STR + "Match prefix length of IPv6 address\n" + "Match prefix length of IPv6 address\n" + "Prefix length\n") { - char *plen = (argc == 6) ? argv[5]->arg : NULL; - return zebra_route_match_delete(vty, "ipv6 address prefix-len", plen, - RMAP_EVENT_MATCH_DELETED); + const char *xpath = "./match-condition[condition='ipv6-prefix-length']"; + + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); } -DEFUN (match_ip_nexthop_prefix_len, - match_ip_nexthop_prefix_len_cmd, - "match ip next-hop prefix-len (0-32)", - MATCH_STR - IP_STR - "Match prefixlen of nexthop ip address\n" - "Match prefixlen of given nexthop\n" - "Prefix length\n") +DEFPY( + match_ip_nexthop_prefix_len, match_ip_nexthop_prefix_len_cmd, + "match ip next-hop prefix-len (0-32)$length", + MATCH_STR + IP_STR + "Match prefixlen of nexthop IP address\n" + "Match prefixlen of given nexthop\n" + "Prefix length\n") { - return zebra_route_match_add(vty, "ip next-hop prefix-len", - argv[4]->arg, RMAP_EVENT_MATCH_ADDED); + const char *xpath = + "./match-condition[condition='ipv4-next-hop-prefix-length']"; + char xpath_value[XPATH_MAXLEN]; + + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + snprintf(xpath_value, sizeof(xpath_value), + "%s/frr-zebra:ipv4-prefix-length", xpath); + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, length_str); + + return nb_cli_apply_changes(vty, NULL); } -DEFUN (no_match_ip_nexthop_prefix_len, - no_match_ip_nexthop_prefix_len_cmd, - "no match ip next-hop prefix-len [(0-32)]", - NO_STR - MATCH_STR - IP_STR - "Match prefixlen of nexthop ip address\n" - "Match prefix length of nexthop\n" - "Prefix length\n") -{ - char *plen = (argc == 6) ? argv[5]->arg : NULL; - return zebra_route_match_delete(vty, "ip next-hop prefix-len", plen, - RMAP_EVENT_MATCH_DELETED); -} - -DEFUN (match_source_protocol, - match_source_protocol_cmd, - "match source-protocol <bgp|ospf|rip|ripng|isis|ospf6|pim|nhrp|eigrp|babel|connected|system|kernel|static|sharp>", - MATCH_STR - "Match protocol via which the route was learnt\n" - "BGP protocol\n" - "OSPF protocol\n" - "RIP protocol\n" - "RIPNG protocol\n" - "ISIS protocol\n" - "OSPF6 protocol\n" - "PIM protocol\n" - "NHRP protocol\n" - "EIGRP protocol\n" - "BABEL protocol\n" - "Routes from directly connected peer\n" - "Routes from system configuration\n" - "Routes from kernel\n" - "Statically configured routes\n" - "SHARP process\n") -{ - char *proto = argv[2]->text; - int i; +DEFPY( + no_match_ip_nexthop_prefix_len, no_match_ip_nexthop_prefix_len_cmd, + "no match ip next-hop prefix-len [(0-32)]", + NO_STR + MATCH_STR + IP_STR + "Match prefixlen of nexthop IP address\n" + "Match prefix length of nexthop\n" + "Prefix length\n") +{ + const char *xpath = + "./match-condition[condition='ipv4-next-hop-prefix-length']"; - i = proto_name2num(proto); - if (i < 0) { - vty_out(vty, "invalid protocol name \"%s\"\n", proto); - return CMD_WARNING_CONFIG_FAILED; - } - return zebra_route_match_add(vty, "source-protocol", proto, - RMAP_EVENT_MATCH_ADDED); + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); } -DEFUN (no_match_source_protocol, - no_match_source_protocol_cmd, - "no match source-protocol [<bgp|ospf|rip|ripng|isis|ospf6|pim|nhrp|eigrp|babel|connected|system|kernel|static|sharp>]", - NO_STR - MATCH_STR - "No match protocol via which the route was learnt\n" - "BGP protocol\n" - "OSPF protocol\n" - "RIP protocol\n" - "RIPNG protocol\n" - "ISIS protocol\n" - "OSPF6 protocol\n" - "PIM protocol\n" - "NHRP protocol\n" - "EIGRP protocol\n" - "BABEL protocol\n" - "Routes from directly connected peer\n" - "Routes from system configuration\n" - "Routes from kernel\n" - "Statically configured routes\n" - "SHARP process\n") -{ - char *proto = (argc == 4) ? argv[3]->text : NULL; - return zebra_route_match_delete(vty, "source-protocol", proto, - RMAP_EVENT_MATCH_DELETED); -} - -DEFUN (match_source_instance, - match_source_instance_cmd, - "match source-instance (0-255)", - MATCH_STR - "Match the protocol's instance number\n" - "The instance number\n") -{ - char *instance = argv[2]->arg; - - return zebra_route_match_add(vty, "source-instance", instance, - RMAP_EVENT_MATCH_ADDED); -} - -DEFUN (no_match_source_instance, - no_match_source_instance_cmd, - "no match source-instance [(0-255)]", - NO_STR MATCH_STR - "Match the protocol's instance number\n" - "The instance number\n") -{ - char *instance = (argc == 4) ? argv[3]->arg : NULL; - - return zebra_route_match_delete(vty, "source-instance", instance, - RMAP_EVENT_MATCH_ADDED); +DEFPY( + match_source_protocol, match_source_protocol_cmd, + "match source-protocol " FRR_REDIST_STR_ZEBRA "$proto", + MATCH_STR + "Match protocol via which the route was learnt\n" + FRR_REDIST_HELP_STR_ZEBRA) +{ + const char *xpath = "./match-condition[condition='source-protocol']"; + char xpath_value[XPATH_MAXLEN]; + + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + snprintf(xpath_value, sizeof(xpath_value), + "%s/frr-zebra:source-protocol", xpath); + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, proto); + + return nb_cli_apply_changes(vty, NULL); } -/* set functions */ +DEFPY( + no_match_source_protocol, no_match_source_protocol_cmd, + "no match source-protocol [" FRR_REDIST_STR_ZEBRA "]", + NO_STR + MATCH_STR + "Match protocol via which the route was learnt\n" + FRR_REDIST_HELP_STR_ZEBRA) +{ + const char *xpath = "./match-condition[condition='source-protocol']"; -DEFUN (set_src, - set_src_cmd, - "set src <A.B.C.D|X:X::X:X>", - SET_STR - "src address for route\n" - "IPv4 src address\n" - "IPv6 src address\n") -{ - int idx_ip = 2; - union g_addr src; - struct interface *pif = NULL; - int family; - struct prefix p; - struct vrf *vrf; - - if (inet_pton(AF_INET, argv[idx_ip]->arg, &src.ipv4) != 1) { - if (inet_pton(AF_INET6, argv[idx_ip]->arg, &src.ipv6) != 1) { - vty_out(vty, "%% not a valid IPv4/v6 address\n"); - return CMD_WARNING_CONFIG_FAILED; - } + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); - p.family = family = AF_INET6; - p.u.prefix6 = src.ipv6; - p.prefixlen = IPV6_MAX_BITLEN; - } else { - p.family = family = AF_INET; - p.u.prefix4 = src.ipv4; - p.prefixlen = IPV4_MAX_BITLEN; - } + return nb_cli_apply_changes(vty, NULL); +} - if (!zebra_check_addr(&p)) { - vty_out(vty, "%% not a valid source IPv4/v6 address\n"); - return CMD_WARNING_CONFIG_FAILED; - } +DEFPY( + match_source_instance, match_source_instance_cmd, + "match source-instance (0-255)$instance", + MATCH_STR + "Match the protocol's instance number\n" + "The instance number\n") +{ + const char *xpath = "./match-condition[condition='source-instance']"; + char xpath_value[XPATH_MAXLEN]; - RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) { - if (family == AF_INET) - pif = if_lookup_exact_address((void *)&src.ipv4, - AF_INET, vrf->vrf_id); - else if (family == AF_INET6) - pif = if_lookup_exact_address((void *)&src.ipv6, - AF_INET6, vrf->vrf_id); + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + snprintf(xpath_value, sizeof(xpath_value), + "%s/frr-zebra:source-instance", xpath); + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, instance_str); - if (pif != NULL) - break; - } + return nb_cli_apply_changes(vty, NULL); +} - if (!pif) { - vty_out(vty, "%% not a local address\n"); - return CMD_WARNING_CONFIG_FAILED; +DEFPY( + no_match_source_instance, no_match_source_instance_cmd, + "no match source-instance [(0-255)]", + NO_STR MATCH_STR + "Match the protocol's instance number\n" + "The instance number\n") +{ + const char *xpath = "./match-condition[condition='source-instance']"; + + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); +} + +/* set functions */ + +DEFPY( + set_src, set_src_cmd, + "set src <A.B.C.D$addrv4|X:X::X:X$addrv6>", + SET_STR + "src address for route\n" + "IPv4 src address\n" + "IPv6 src address\n") +{ + const char *xpath = "./set-action[action='source']"; + char xpath_value[XPATH_MAXLEN]; + + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + if (addrv4_str) { + snprintf(xpath_value, sizeof(xpath_value), + "%s/frr-zebra:source-v4", xpath); + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, + addrv4_str); + } else { + snprintf(xpath_value, sizeof(xpath_value), + "%s/frr-zebra:source-v6", xpath); + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, + addrv6_str); } - VTY_DECLVAR_CONTEXT(route_map_index, index); - return generic_set_add(vty, index, "src", argv[idx_ip]->arg); + return nb_cli_apply_changes(vty, NULL); } -DEFUN (no_set_src, - no_set_src_cmd, - "no set src [<A.B.C.D|X:X::X:X>]", - NO_STR - SET_STR - "Source address for route\n" - "IPv4 address\n" - "IPv6 address\n") -{ - char *ip = (argc == 4) ? argv[3]->arg : NULL; - VTY_DECLVAR_CONTEXT(route_map_index, index); - return generic_set_delete(vty, index, "src", ip); +DEFPY( + no_set_src, no_set_src_cmd, + "no set src [<A.B.C.D|X:X::X:X>]", + NO_STR + SET_STR + "Source address for route\n" + "IPv4 address\n" + "IPv6 address\n") +{ + const char *xpath = "./set-action[action='source']"; + + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); } DEFUN (zebra_route_map_timer, diff --git a/zebra/zebra_router.c b/zebra/zebra_router.c index a891ffb76a..ea2b6752b3 100644 --- a/zebra/zebra_router.c +++ b/zebra/zebra_router.c @@ -223,10 +223,11 @@ void zebra_router_terminate(void) zebra_vxlan_disable(); zebra_mlag_terminate(); - hash_clean(zrouter.nhgs, zebra_nhg_hash_free); - hash_free(zrouter.nhgs); - hash_clean(zrouter.nhgs_id, NULL); + /* Free NHE in ID table only since it has unhashable entries as well */ + hash_clean(zrouter.nhgs_id, zebra_nhg_hash_free); hash_free(zrouter.nhgs_id); + hash_clean(zrouter.nhgs, NULL); + hash_free(zrouter.nhgs); hash_clean(zrouter.rules_hash, zebra_pbr_rules_free); hash_free(zrouter.rules_hash); diff --git a/zebra/zebra_router.h b/zebra/zebra_router.h index 59bd0e55f0..773e5a6415 100644 --- a/zebra/zebra_router.h +++ b/zebra/zebra_router.h @@ -218,6 +218,9 @@ extern void multicast_mode_ipv4_set(enum multicast_mode mode); extern enum multicast_mode multicast_mode_ipv4_get(void); +/* zebra_northbound.c */ +extern const struct frr_yang_module_info frr_zebra_info; + #ifdef __cplusplus } #endif diff --git a/zebra/zebra_snmp.c b/zebra/zebra_snmp.c index 5de45c0294..89b8238c29 100644 --- a/zebra/zebra_snmp.c +++ b/zebra/zebra_snmp.c @@ -266,9 +266,9 @@ static void check_replace(struct route_node *np2, struct route_entry *re2, return; } - if (in_addr_cmp(&(*np)->p.u.prefix, &np2->p.u.prefix) < 0) + if (prefix_cmp(&(*np)->p, &np2->p) < 0) return; - if (in_addr_cmp(&(*np)->p.u.prefix, &np2->p.u.prefix) > 0) { + if (prefix_cmp(&(*np)->p, &np2->p) > 0) { *np = np2; *re = re2; return; diff --git a/zebra/zebra_vrf.c b/zebra/zebra_vrf.c index dfa7d5ae92..ee1e251a69 100644 --- a/zebra/zebra_vrf.c +++ b/zebra/zebra_vrf.c @@ -178,7 +178,7 @@ static int zebra_vrf_disable(struct vrf *vrf) zebra_vxlan_vrf_disable(zvrf); #if defined(HAVE_RTADV) - rtadv_terminate(zvrf); + rtadv_vrf_terminate(zvrf); #endif /* Inform clients that the VRF is now inactive. This is a diff --git a/zebra/zebra_vrf.h b/zebra/zebra_vrf.h index 5448e17073..268ee12a65 100644 --- a/zebra/zebra_vrf.h +++ b/zebra/zebra_vrf.h @@ -193,7 +193,7 @@ struct zebra_vrf { static inline vrf_id_t zvrf_id(struct zebra_vrf *zvrf) { if (!zvrf || !zvrf->vrf) - return VRF_UNKNOWN; + return VRF_DEFAULT; return zvrf->vrf->vrf_id; } @@ -206,6 +206,8 @@ static inline const char *zvrf_ns_name(struct zebra_vrf *zvrf) static inline const char *zvrf_name(struct zebra_vrf *zvrf) { + if (!zvrf || !zvrf->vrf) + return "Unknown"; return zvrf->vrf->name; } diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c index ccc6e9e46b..8024db4ca7 100644 --- a/zebra/zebra_vty.c +++ b/zebra/zebra_vty.c @@ -54,6 +54,7 @@ #include "zebra/zebra_pbr.h" #include "zebra/zebra_nhg.h" #include "zebra/interface.h" +#include "northbound_cli.h" extern int allow_delete; @@ -164,7 +165,8 @@ DEFUN (show_ip_rpf_addr, return CMD_SUCCESS; } -static char re_status_output_char(struct route_entry *re, struct nexthop *nhop) +static char re_status_output_char(const struct route_entry *re, + const struct nexthop *nhop) { if (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED)) { if (!CHECK_FLAG(nhop->flags, NEXTHOP_FLAG_DUPLICATE) && @@ -187,6 +189,152 @@ static char re_status_output_char(struct route_entry *re, struct nexthop *nhop) return ' '; } +/* + * TODO -- Show backup nexthop info + */ +static void show_nh_backup_helper(struct vty *vty, + const struct nhg_hash_entry *nhe, + const struct nexthop *nexthop) +{ + /* Double-check that there _is_ a backup */ + if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) + return; + + /* Locate the backup nexthop */ + + /* Format the backup (indented) */ + +} + +/* + * Helper api to format output for a nexthop, used in the 'detailed' + * output path. + */ +static void show_nexthop_detail_helper(struct vty *vty, + const struct route_entry *re, + const struct nexthop *nexthop) +{ + char addrstr[32]; + char buf[MPLS_LABEL_STRLEN]; + + vty_out(vty, " %c%s", + re_status_output_char(re, nexthop), + nexthop->rparent ? " " : ""); + + switch (nexthop->type) { + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV4_IFINDEX: + vty_out(vty, " %s", + inet_ntoa(nexthop->gate.ipv4)); + if (nexthop->ifindex) + vty_out(vty, ", via %s", + ifindex2ifname( + nexthop->ifindex, + nexthop->vrf_id)); + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + vty_out(vty, " %s", + inet_ntop(AF_INET6, &nexthop->gate.ipv6, + buf, sizeof(buf))); + if (nexthop->ifindex) + vty_out(vty, ", via %s", + ifindex2ifname( + nexthop->ifindex, + nexthop->vrf_id)); + break; + + case NEXTHOP_TYPE_IFINDEX: + vty_out(vty, " directly connected, %s", + ifindex2ifname(nexthop->ifindex, + nexthop->vrf_id)); + break; + case NEXTHOP_TYPE_BLACKHOLE: + vty_out(vty, " unreachable"); + switch (nexthop->bh_type) { + case BLACKHOLE_REJECT: + vty_out(vty, " (ICMP unreachable)"); + break; + case BLACKHOLE_ADMINPROHIB: + vty_out(vty, + " (ICMP admin-prohibited)"); + break; + case BLACKHOLE_NULL: + vty_out(vty, " (blackhole)"); + break; + case BLACKHOLE_UNSPEC: + break; + } + break; + default: + break; + } + + if ((re->vrf_id != nexthop->vrf_id) + && (nexthop->type != NEXTHOP_TYPE_BLACKHOLE)) { + struct vrf *vrf = + vrf_lookup_by_id(nexthop->vrf_id); + + if (vrf) + vty_out(vty, "(vrf %s)", vrf->name); + else + vty_out(vty, "(vrf UNKNOWN)"); + } + + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE)) + vty_out(vty, " (duplicate nexthop removed)"); + + if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) + vty_out(vty, " inactive"); + + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) + vty_out(vty, " onlink"); + + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) + vty_out(vty, " (recursive)"); + + /* Source specified? */ + switch (nexthop->type) { + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV4_IFINDEX: + if (nexthop->src.ipv4.s_addr) { + if (inet_ntop(AF_INET, &nexthop->src.ipv4, + addrstr, sizeof(addrstr))) + vty_out(vty, ", src %s", + addrstr); + } + break; + + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, + &in6addr_any)) { + if (inet_ntop(AF_INET6, &nexthop->src.ipv6, + addrstr, sizeof(addrstr))) + vty_out(vty, ", src %s", + addrstr); + } + break; + + default: + break; + } + + if (re->nexthop_mtu) + vty_out(vty, ", mtu %u", re->nexthop_mtu); + + /* Label information */ + if (nexthop->nh_label && nexthop->nh_label->num_labels) { + vty_out(vty, ", label %s", + mpls_label2str(nexthop->nh_label->num_labels, + nexthop->nh_label->label, buf, + sizeof(buf), 1 /*pretty*/)); + } + + if (nexthop->weight) + vty_out(vty, ", weight %u", nexthop->weight); +} + /* New RIB. Detailed information for IPv4 route. */ static void vty_show_ip_route_detail(struct vty *vty, struct route_node *rn, int mcast, bool use_fib, bool show_ng) @@ -253,129 +401,122 @@ static void vty_show_ip_route_detail(struct vty *vty, struct route_node *rn, vty_out(vty, " Nexthop Group ID: %u\n", re->nhe_id); for (ALL_NEXTHOPS(re->nhe->nhg, nexthop)) { - char addrstr[32]; - - vty_out(vty, " %c%s", - re_status_output_char(re, nexthop), - nexthop->rparent ? " " : ""); - - switch (nexthop->type) { - case NEXTHOP_TYPE_IPV4: - case NEXTHOP_TYPE_IPV4_IFINDEX: - vty_out(vty, " %s", - inet_ntoa(nexthop->gate.ipv4)); - if (nexthop->ifindex) - vty_out(vty, ", via %s", - ifindex2ifname( - nexthop->ifindex, - nexthop->vrf_id)); - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - vty_out(vty, " %s", - inet_ntop(AF_INET6, &nexthop->gate.ipv6, - buf, sizeof(buf))); - if (nexthop->ifindex) - vty_out(vty, ", via %s", - ifindex2ifname( - nexthop->ifindex, - nexthop->vrf_id)); - break; - case NEXTHOP_TYPE_IFINDEX: - vty_out(vty, " directly connected, %s", - ifindex2ifname(nexthop->ifindex, - nexthop->vrf_id)); - break; - case NEXTHOP_TYPE_BLACKHOLE: - vty_out(vty, " unreachable"); - switch (nexthop->bh_type) { - case BLACKHOLE_REJECT: - vty_out(vty, " (ICMP unreachable)"); - break; - case BLACKHOLE_ADMINPROHIB: - vty_out(vty, - " (ICMP admin-prohibited)"); - break; - case BLACKHOLE_NULL: - vty_out(vty, " (blackhole)"); - break; - case BLACKHOLE_UNSPEC: - break; - } - break; - default: - break; - } - - if ((re->vrf_id != nexthop->vrf_id) - && (nexthop->type != NEXTHOP_TYPE_BLACKHOLE)) { - struct vrf *vrf = - vrf_lookup_by_id(nexthop->vrf_id); - - if (vrf) - vty_out(vty, "(vrf %s)", vrf->name); - else - vty_out(vty, "(vrf UNKNOWN)"); - } + /* Use helper to format each nexthop */ + show_nexthop_detail_helper(vty, re, nexthop); + vty_out(vty, "\n"); - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE)) - vty_out(vty, " (duplicate nexthop removed)"); + /* Include backup info, if present */ + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) + show_nh_backup_helper(vty, re->nhe, nexthop); + } + vty_out(vty, "\n"); + } +} - if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) - vty_out(vty, " inactive"); +/* + * Helper for nexthop output, used in the 'show ip route' path + */ +static void show_route_nexthop_helper(struct vty *vty, + const struct route_entry *re, + const struct nexthop *nexthop) +{ + char buf[MPLS_LABEL_STRLEN]; + + switch (nexthop->type) { + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV4_IFINDEX: + vty_out(vty, " via %s", inet_ntoa(nexthop->gate.ipv4)); + if (nexthop->ifindex) + vty_out(vty, ", %s", + ifindex2ifname(nexthop->ifindex, + nexthop->vrf_id)); + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + vty_out(vty, " via %s", + inet_ntop(AF_INET6, &nexthop->gate.ipv6, buf, + sizeof(buf))); + if (nexthop->ifindex) + vty_out(vty, ", %s", + ifindex2ifname(nexthop->ifindex, + nexthop->vrf_id)); + break; - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) - vty_out(vty, " onlink"); + case NEXTHOP_TYPE_IFINDEX: + vty_out(vty, " is directly connected, %s", + ifindex2ifname(nexthop->ifindex, + nexthop->vrf_id)); + break; + case NEXTHOP_TYPE_BLACKHOLE: + vty_out(vty, " unreachable"); + switch (nexthop->bh_type) { + case BLACKHOLE_REJECT: + vty_out(vty, " (ICMP unreachable)"); + break; + case BLACKHOLE_ADMINPROHIB: + vty_out(vty, " (ICMP admin-prohibited)"); + break; + case BLACKHOLE_NULL: + vty_out(vty, " (blackhole)"); + break; + case BLACKHOLE_UNSPEC: + break; + } + break; + default: + break; + } - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) - vty_out(vty, " (recursive)"); + if ((re == NULL || (nexthop->vrf_id != re->vrf_id)) && + (nexthop->type != NEXTHOP_TYPE_BLACKHOLE)) { + struct vrf *vrf = vrf_lookup_by_id(nexthop->vrf_id); - switch (nexthop->type) { - case NEXTHOP_TYPE_IPV4: - case NEXTHOP_TYPE_IPV4_IFINDEX: - if (nexthop->src.ipv4.s_addr) { - if (inet_ntop(AF_INET, - &nexthop->src.ipv4, - addrstr, sizeof(addrstr))) - vty_out(vty, ", src %s", - addrstr); - } - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, - &in6addr_any)) { - if (inet_ntop(AF_INET6, - &nexthop->src.ipv6, - addrstr, sizeof(addrstr))) - vty_out(vty, ", src %s", - addrstr); - } - break; - default: - break; - } + if (vrf) + vty_out(vty, " (vrf %s)", vrf->name); + else + vty_out(vty, " (vrf UNKNOWN)"); + } - if (re->nexthop_mtu) - vty_out(vty, ", mtu %u", re->nexthop_mtu); + if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) + vty_out(vty, " inactive"); - /* Label information */ - if (nexthop->nh_label - && nexthop->nh_label->num_labels) { - vty_out(vty, ", label %s", - mpls_label2str( - nexthop->nh_label->num_labels, - nexthop->nh_label->label, buf, - sizeof(buf), 1)); - } + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) + vty_out(vty, " onlink"); - if (nexthop->weight) - vty_out(vty, ", weight %u", nexthop->weight); + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) + vty_out(vty, " (recursive)"); - vty_out(vty, "\n"); + switch (nexthop->type) { + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV4_IFINDEX: + if (nexthop->src.ipv4.s_addr) { + if (inet_ntop(AF_INET, &nexthop->src.ipv4, buf, + sizeof(buf))) + vty_out(vty, ", src %s", buf); } - vty_out(vty, "\n"); + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, &in6addr_any)) { + if (inet_ntop(AF_INET6, &nexthop->src.ipv6, buf, + sizeof(buf))) + vty_out(vty, ", src %s", buf); + } + break; + default: + break; + } + + /* Label information */ + if (nexthop->nh_label && nexthop->nh_label->num_labels) { + vty_out(vty, ", label %s", + mpls_label2str(nexthop->nh_label->num_labels, + nexthop->nh_label->label, buf, + sizeof(buf), 1)); } + + if ((re == NULL) && nexthop->weight) + vty_out(vty, ", weight %u", nexthop->weight); } static void vty_show_ip_route(struct vty *vty, struct route_node *rn, @@ -625,6 +766,10 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn, json_labels); } + if (nexthop->weight) + json_object_int_add(json_nexthop, "weight", + nexthop->weight); + json_object_array_add(json_nexthops, json_nexthop); } @@ -660,105 +805,46 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn, len - 3 + (2 * nexthop_level(nexthop)), ' '); } - switch (nexthop->type) { - case NEXTHOP_TYPE_IPV4: - case NEXTHOP_TYPE_IPV4_IFINDEX: - vty_out(vty, " via %s", inet_ntoa(nexthop->gate.ipv4)); - if (nexthop->ifindex) - vty_out(vty, ", %s", - ifindex2ifname(nexthop->ifindex, - nexthop->vrf_id)); - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - vty_out(vty, " via %s", - inet_ntop(AF_INET6, &nexthop->gate.ipv6, buf, - sizeof(buf))); - if (nexthop->ifindex) - vty_out(vty, ", %s", - ifindex2ifname(nexthop->ifindex, - nexthop->vrf_id)); - break; + show_route_nexthop_helper(vty, re, nexthop); - case NEXTHOP_TYPE_IFINDEX: - vty_out(vty, " is directly connected, %s", - ifindex2ifname(nexthop->ifindex, - nexthop->vrf_id)); - break; - case NEXTHOP_TYPE_BLACKHOLE: - vty_out(vty, " unreachable"); - switch (nexthop->bh_type) { - case BLACKHOLE_REJECT: - vty_out(vty, " (ICMP unreachable)"); - break; - case BLACKHOLE_ADMINPROHIB: - vty_out(vty, " (ICMP admin-prohibited)"); - break; - case BLACKHOLE_NULL: - vty_out(vty, " (blackhole)"); - break; - case BLACKHOLE_UNSPEC: - break; - } - break; - default: - break; - } - - if ((nexthop->vrf_id != re->vrf_id) - && (nexthop->type != NEXTHOP_TYPE_BLACKHOLE)) { - struct vrf *vrf = vrf_lookup_by_id(nexthop->vrf_id); - - if (vrf) - vty_out(vty, "(vrf %s)", vrf->name); - else - vty_out(vty, "(vrf UNKNOWN)"); - } + if (nexthop->weight) + vty_out(vty, ", weight %u", nexthop->weight); - if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) - vty_out(vty, " inactive"); + vty_out(vty, ", %s\n", up_str); - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) - vty_out(vty, " onlink"); + /* Check for backup info */ + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) { + struct nexthop *backup; + int i; - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) - vty_out(vty, " (recursive)"); + if (re->nhe->backup_info == NULL || + re->nhe->backup_info->nhe == NULL) + continue; - switch (nexthop->type) { - case NEXTHOP_TYPE_IPV4: - case NEXTHOP_TYPE_IPV4_IFINDEX: - if (nexthop->src.ipv4.s_addr) { - if (inet_ntop(AF_INET, &nexthop->src.ipv4, buf, - sizeof(buf))) - vty_out(vty, ", src %s", buf); - } - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, &in6addr_any)) { - if (inet_ntop(AF_INET6, &nexthop->src.ipv6, buf, - sizeof(buf))) - vty_out(vty, ", src %s", buf); + i = 0; + for (ALL_NEXTHOPS(re->nhe->backup_info->nhe->nhg, + backup)) { + if (i == nexthop->backup_idx) + break; + i++; } - break; - default: - break; - } - /* Label information */ - if (nexthop->nh_label && nexthop->nh_label->num_labels) { - vty_out(vty, ", label %s", - mpls_label2str(nexthop->nh_label->num_labels, - nexthop->nh_label->label, buf, - sizeof(buf), 1)); + /* Print useful backup info */ + if (backup) { + /* TODO -- install state is not accurate */ + vty_out(vty, " %*c [backup %d]", + /*re_status_output_char(re, backup),*/ + len - 3 + (2 * nexthop_level(nexthop)), + ' ', nexthop->backup_idx); + show_route_nexthop_helper(vty, re, backup); + vty_out(vty, "\n"); + } } - - vty_out(vty, ", %s\n", up_str); } } static void vty_show_ip_route_detail_json(struct vty *vty, - struct route_node *rn, bool use_fib) + struct route_node *rn, bool use_fib) { json_object *json = NULL; json_object *json_prefix = NULL; @@ -1028,9 +1114,8 @@ static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe) { struct nexthop *nexthop = NULL; struct nhg_connected *rb_node_dep = NULL; - char buf[SRCDEST2STR_BUFFER]; - struct vrf *nhe_vrf = vrf_lookup_by_id(nhe->vrf_id); + struct nexthop_group *backup_nhg; vty_out(vty, "ID: %u\n", nhe->id); vty_out(vty, " RefCnt: %d\n", nhe->refcnt); @@ -1062,6 +1147,7 @@ static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe) vty_out(vty, "\n"); } + /* Output nexthops */ for (ALL_NEXTHOPS(nhe->nhg, nexthop)) { if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) vty_out(vty, " "); @@ -1069,100 +1155,56 @@ static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe) /* Make recursive nexthops a bit more clear */ vty_out(vty, " "); - switch (nexthop->type) { - case NEXTHOP_TYPE_IPV4: - case NEXTHOP_TYPE_IPV4_IFINDEX: - vty_out(vty, " %s", inet_ntoa(nexthop->gate.ipv4)); - if (nexthop->ifindex) - vty_out(vty, ", %s", - ifindex2ifname(nexthop->ifindex, - nexthop->vrf_id)); - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - vty_out(vty, " %s", - inet_ntop(AF_INET6, &nexthop->gate.ipv6, buf, - sizeof(buf))); - if (nexthop->ifindex) - vty_out(vty, ", %s", - ifindex2ifname(nexthop->ifindex, - nexthop->vrf_id)); - break; + show_route_nexthop_helper(vty, NULL, nexthop); - case NEXTHOP_TYPE_IFINDEX: - vty_out(vty, " directly connected %s", - ifindex2ifname(nexthop->ifindex, - nexthop->vrf_id)); - break; - case NEXTHOP_TYPE_BLACKHOLE: - vty_out(vty, " unreachable"); - switch (nexthop->bh_type) { - case BLACKHOLE_REJECT: - vty_out(vty, " (ICMP unreachable)"); - break; - case BLACKHOLE_ADMINPROHIB: - vty_out(vty, " (ICMP admin-prohibited)"); - break; - case BLACKHOLE_NULL: - vty_out(vty, " (blackhole)"); - break; - case BLACKHOLE_UNSPEC: - break; - } - break; - default: - break; + if (nhe->backup_info == NULL || nhe->backup_info->nhe == NULL) { + if (CHECK_FLAG(nexthop->flags, + NEXTHOP_FLAG_HAS_BACKUP)) + vty_out(vty, " [backup %d]", + nexthop->backup_idx); + + vty_out(vty, "\n"); + continue; } - struct vrf *vrf = vrf_lookup_by_id(nexthop->vrf_id); + /* TODO -- print more useful backup info */ + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) { + struct nexthop *backup; + int i; - if (vrf) - vty_out(vty, " (vrf %s)", vrf->name); - else - vty_out(vty, " (vrf UNKNOWN)"); + i = 0; + for (ALL_NEXTHOPS(nhe->backup_info->nhe->nhg, backup)) { + if (i == nexthop->backup_idx) + break; + i++; + } - if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) - vty_out(vty, " inactive"); + /* TODO */ + if (backup) + vty_out(vty, " [backup %d]", + nexthop->backup_idx); + else + vty_out(vty, " [backup INVALID]"); + } - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) - vty_out(vty, " onlink"); + vty_out(vty, "\n"); + } - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) - vty_out(vty, " (recursive)"); + /* Output backup nexthops (if any) */ + backup_nhg = zebra_nhg_get_backup_nhg(nhe); + if (backup_nhg) { + vty_out(vty, " Backups:\n"); - switch (nexthop->type) { - case NEXTHOP_TYPE_IPV4: - case NEXTHOP_TYPE_IPV4_IFINDEX: - if (nexthop->src.ipv4.s_addr) { - if (inet_ntop(AF_INET, &nexthop->src.ipv4, buf, - sizeof(buf))) - vty_out(vty, ", src %s", buf); - } - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, &in6addr_any)) { - if (inet_ntop(AF_INET6, &nexthop->src.ipv6, buf, - sizeof(buf))) - vty_out(vty, ", src %s", buf); - } - break; - default: - break; - } + for (ALL_NEXTHOPS_PTR(backup_nhg, nexthop)) { + if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) + vty_out(vty, " "); + else + /* Make recursive nexthops a bit more clear */ + vty_out(vty, " "); - /* Label information */ - if (nexthop->nh_label && nexthop->nh_label->num_labels) { - vty_out(vty, ", label %s", - mpls_label2str(nexthop->nh_label->num_labels, - nexthop->nh_label->label, buf, - sizeof(buf), 1)); + show_route_nexthop_helper(vty, NULL, nexthop); + vty_out(vty, "\n"); } - - if (nexthop->weight) - vty_out(vty, ", weight %u", nexthop->weight); - - vty_out(vty, "\n"); } if (!zebra_nhg_dependents_is_empty(nhe)) { @@ -2893,17 +2935,29 @@ DEFPY (clear_evpn_dup_addr, "IPv4 address\n" "IPv6 address\n") { - struct zebra_vrf *zvrf; struct ipaddr host_ip = {.ipa_type = IPADDR_NONE }; int ret = CMD_SUCCESS; + struct list *input; + struct yang_data *yang_dup = NULL, *yang_dup_ip = NULL, + *yang_dup_mac = NULL; - zvrf = zebra_vrf_get_evpn(); - if (vni_str) { + input = list_new(); + + if (!vni_str) { + yang_dup = yang_data_new( + "/frr-zebra:clear-evpn-dup-addr/input/clear-dup-choice", + "all-case"); + } else { + yang_dup = yang_data_new_uint32( + "/frr-zebra:clear-evpn-dup-addr/input/clear-dup-choice/single-case/vni-id", + vni); if (!is_zero_mac(&mac->eth_addr)) { - ret = zebra_vxlan_clear_dup_detect_vni_mac(vty, zvrf, - vni, - &mac->eth_addr); - } else if (ip) { + yang_dup_mac = yang_data_new_mac( + "/frr-zebra:clear-evpn-dup-addr/input/clear-dup-choice/single-case/vni-id/mac-addr", + &mac->eth_addr); + if (yang_dup_mac) + listnode_add(input, yang_dup_mac); + } else if (ip) { if (sockunion_family(ip) == AF_INET) { host_ip.ipa_type = IPADDR_V4; host_ip.ipaddr_v4.s_addr = sockunion2ip(ip); @@ -2912,16 +2966,23 @@ DEFPY (clear_evpn_dup_addr, memcpy(&host_ip.ipaddr_v6, &ip->sin6.sin6_addr, sizeof(struct in6_addr)); } - ret = zebra_vxlan_clear_dup_detect_vni_ip(vty, zvrf, - vni, - &host_ip); - } else - ret = zebra_vxlan_clear_dup_detect_vni(vty, zvrf, vni); - } else { - ret = zebra_vxlan_clear_dup_detect_vni_all(vty, zvrf); + yang_dup_ip = yang_data_new_ip( + "/frr-zebra:clear-evpn-dup-addr/input/clear-dup-choice/single-case/vni-id/vni-ipaddr", + &host_ip); + + if (yang_dup_ip) + listnode_add(input, yang_dup_ip); + } } + if (yang_dup) { + listnode_add(input, yang_dup); + ret = nb_cli_rpc("/frr-zebra:clear-evpn-dup-addr", input, NULL); + } + + list_delete(&input); + return ret; } @@ -3437,22 +3498,42 @@ DEFUN_HIDDEN (show_frr, } /* IP node for static routes. */ -static struct cmd_node ip_node = {IP_NODE, "", 1}; -static struct cmd_node protocol_node = {PROTOCOL_NODE, "", 1}; +static int zebra_ip_config(struct vty *vty); +static struct cmd_node ip_node = { + .name = "static ip", + .node = IP_NODE, + .prompt = "", + .config_write = zebra_ip_config, +}; +static int config_write_protocol(struct vty *vty); +static struct cmd_node protocol_node = { + .name = "protocol", + .node = PROTOCOL_NODE, + .prompt = "", + .config_write = config_write_protocol, +}; /* table node for routing tables. */ -static struct cmd_node table_node = {TABLE_NODE, - "", /* This node has no interface. */ - 1}; -static struct cmd_node forwarding_node = {FORWARDING_NODE, - "", /* This node has no interface. */ - 1}; +static int config_write_table(struct vty *vty); +static struct cmd_node table_node = { + .name = "table", + .node = TABLE_NODE, + .prompt = "", + .config_write = config_write_table, +}; +static int config_write_forwarding(struct vty *vty); +static struct cmd_node forwarding_node = { + .name = "forwarding", + .node = FORWARDING_NODE, + .prompt = "", + .config_write = config_write_forwarding, +}; /* Route VTY. */ void zebra_vty_init(void) { /* Install configuration write function. */ - install_node(&table_node, config_write_table); - install_node(&forwarding_node, config_write_forwarding); + install_node(&table_node); + install_node(&forwarding_node); install_element(VIEW_NODE, &show_ip_forwarding_cmd); install_element(CONFIG_NODE, &ip_forwarding_cmd); @@ -3466,8 +3547,8 @@ void zebra_vty_init(void) /* Route-map */ zebra_route_map_init(); - install_node(&ip_node, zebra_ip_config); - install_node(&protocol_node, config_write_protocol); + install_node(&ip_node); + install_node(&protocol_node); install_element(CONFIG_NODE, &allow_external_route_update_cmd); install_element(CONFIG_NODE, &no_allow_external_route_update_cmd); diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c index 2e1daa6fdf..d23cdfccd8 100644 --- a/zebra/zebra_vxlan.c +++ b/zebra/zebra_vxlan.c @@ -121,11 +121,11 @@ static struct interface *zvni_map_to_macvlan(struct interface *br_if, /* l3-vni next-hop neigh related APIs */ static zebra_neigh_t *zl3vni_nh_lookup(zebra_l3vni_t *zl3vni, - struct ipaddr *ip); + const struct ipaddr *ip); static void *zl3vni_nh_alloc(void *p); static zebra_neigh_t *zl3vni_nh_add(zebra_l3vni_t *zl3vni, - struct ipaddr *vtep_ip, - struct ethaddr *rmac); + const struct ipaddr *vtep_ip, + const struct ethaddr *rmac); static int zl3vni_nh_del(zebra_l3vni_t *zl3vni, zebra_neigh_t *n); static int zl3vni_nh_install(zebra_l3vni_t *zl3vni, zebra_neigh_t *n); static int zl3vni_nh_uninstall(zebra_l3vni_t *zl3vni, zebra_neigh_t *n); @@ -133,10 +133,10 @@ static int zl3vni_nh_uninstall(zebra_l3vni_t *zl3vni, zebra_neigh_t *n); /* l3-vni rmac related APIs */ static void zl3vni_print_rmac_hash(struct hash_bucket *, void *); static zebra_mac_t *zl3vni_rmac_lookup(zebra_l3vni_t *zl3vni, - struct ethaddr *rmac); + const struct ethaddr *rmac); static void *zl3vni_rmac_alloc(void *p); static zebra_mac_t *zl3vni_rmac_add(zebra_l3vni_t *zl3vni, - struct ethaddr *rmac); + const struct ethaddr *rmac); static int zl3vni_rmac_del(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac); static int zl3vni_rmac_install(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac); static int zl3vni_rmac_uninstall(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac); @@ -1515,8 +1515,8 @@ static void zvni_print_mac_hash_all_vni(struct hash_bucket *bucket, void *ctxt) struct mac_walk_ctx *wctx = ctxt; char vni_str[VNI_STR_LEN]; - vty = (struct vty *)wctx->vty; - json = (struct json_object *)wctx->json; + vty = wctx->vty; + json = wctx->json; zvni = (zebra_vni_t *)bucket->data; wctx->zvni = zvni; @@ -1586,8 +1586,8 @@ static void zvni_print_mac_hash_all_vni_detail(struct hash_bucket *bucket, struct mac_walk_ctx *wctx = ctxt; char vni_str[VNI_STR_LEN]; - vty = (struct vty *)wctx->vty; - json = (struct json_object *)wctx->json; + vty = wctx->vty; + json = wctx->json; zvni = (zebra_vni_t *)bucket->data; if (!zvni) { @@ -3691,7 +3691,7 @@ static struct interface *zvni_map_to_svi(vlanid_t vid, struct interface *br_if) if (!zif || zif->zif_type != ZEBRA_IF_VLAN || zif->link != br_if) continue; - vl = (struct zebra_l2info_vlan *)&zif->l2info.vl; + vl = &zif->l2info.vl; if (vl->vid == vid) { found = 1; @@ -4434,7 +4434,7 @@ static void zl3vni_cleanup_all(struct hash_bucket *bucket, void *args) } static void rb_find_or_add_host(struct host_rb_tree_entry *hrbe, - struct prefix *host) + const struct prefix *host) { struct host_rb_entry lookup; struct host_rb_entry *hle; @@ -4473,7 +4473,7 @@ static void rb_delete_host(struct host_rb_tree_entry *hrbe, struct prefix *host) * Look up MAC hash entry. */ static zebra_mac_t *zl3vni_rmac_lookup(zebra_l3vni_t *zl3vni, - struct ethaddr *rmac) + const struct ethaddr *rmac) { zebra_mac_t tmp; zebra_mac_t *pmac; @@ -4502,7 +4502,8 @@ static void *zl3vni_rmac_alloc(void *p) /* * Add RMAC entry to l3-vni */ -static zebra_mac_t *zl3vni_rmac_add(zebra_l3vni_t *zl3vni, struct ethaddr *rmac) +static zebra_mac_t *zl3vni_rmac_add(zebra_l3vni_t *zl3vni, + const struct ethaddr *rmac) { zebra_mac_t tmp_rmac; zebra_mac_t *zrmac = NULL; @@ -4632,9 +4633,10 @@ static int zl3vni_rmac_uninstall(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac) } /* handle rmac add */ -static int zl3vni_remote_rmac_add(zebra_l3vni_t *zl3vni, struct ethaddr *rmac, - struct ipaddr *vtep_ip, - struct prefix *host_prefix) +static int zl3vni_remote_rmac_add(zebra_l3vni_t *zl3vni, + const struct ethaddr *rmac, + const struct ipaddr *vtep_ip, + const struct prefix *host_prefix) { char buf[ETHER_ADDR_STRLEN]; char buf1[INET6_ADDRSTRLEN]; @@ -4709,7 +4711,8 @@ static void zl3vni_remote_rmac_del(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac, /* * Look up nh hash entry on a l3-vni. */ -static zebra_neigh_t *zl3vni_nh_lookup(zebra_l3vni_t *zl3vni, struct ipaddr *ip) +static zebra_neigh_t *zl3vni_nh_lookup(zebra_l3vni_t *zl3vni, + const struct ipaddr *ip) { zebra_neigh_t tmp; zebra_neigh_t *n; @@ -4739,8 +4742,9 @@ static void *zl3vni_nh_alloc(void *p) /* * Add neighbor entry. */ -static zebra_neigh_t *zl3vni_nh_add(zebra_l3vni_t *zl3vni, struct ipaddr *ip, - struct ethaddr *mac) +static zebra_neigh_t *zl3vni_nh_add(zebra_l3vni_t *zl3vni, + const struct ipaddr *ip, + const struct ethaddr *mac) { zebra_neigh_t tmp_n; zebra_neigh_t *n = NULL; @@ -4822,9 +4826,10 @@ static int zl3vni_nh_uninstall(zebra_l3vni_t *zl3vni, zebra_neigh_t *n) } /* add remote vtep as a neigh entry */ -static int zl3vni_remote_nh_add(zebra_l3vni_t *zl3vni, struct ipaddr *vtep_ip, - struct ethaddr *rmac, - struct prefix *host_prefix) +static int zl3vni_remote_nh_add(zebra_l3vni_t *zl3vni, + const struct ipaddr *vtep_ip, + const struct ethaddr *rmac, + const struct prefix *host_prefix) { char buf[ETHER_ADDR_STRLEN]; char buf1[ETHER_ADDR_STRLEN]; @@ -5960,9 +5965,9 @@ int is_l3vni_for_prefix_routes_only(vni_t vni) } /* handle evpn route in vrf table */ -void zebra_vxlan_evpn_vrf_route_add(vrf_id_t vrf_id, struct ethaddr *rmac, - struct ipaddr *vtep_ip, - struct prefix *host_prefix) +void zebra_vxlan_evpn_vrf_route_add(vrf_id_t vrf_id, const struct ethaddr *rmac, + const struct ipaddr *vtep_ip, + const struct prefix *host_prefix) { zebra_l3vni_t *zl3vni = NULL; struct ipaddr ipv4_vtep; @@ -6848,9 +6853,8 @@ void zebra_vxlan_print_macs_vni_dad(struct vty *vty, } -int zebra_vxlan_clear_dup_detect_vni_mac(struct vty *vty, - struct zebra_vrf *zvrf, - vni_t vni, struct ethaddr *macaddr) +int zebra_vxlan_clear_dup_detect_vni_mac(struct zebra_vrf *zvrf, vni_t vni, + struct ethaddr *macaddr) { zebra_vni_t *zvni; zebra_mac_t *mac; @@ -6858,24 +6862,23 @@ int zebra_vxlan_clear_dup_detect_vni_mac(struct vty *vty, zebra_neigh_t *nbr = NULL; if (!is_evpn_enabled()) - return CMD_SUCCESS; + return 0; zvni = zvni_lookup(vni); if (!zvni) { - vty_out(vty, "%% VNI %u does not exist\n", vni); - return CMD_WARNING; + zlog_warn("VNI %u does not exist\n", vni); + return -1; } mac = zvni_mac_lookup(zvni, macaddr); if (!mac) { - vty_out(vty, "%% Requested MAC does not exist in VNI %u\n", - vni); - return CMD_WARNING; + zlog_warn("Requested MAC does not exist in VNI %u\n", vni); + return -1; } if (!CHECK_FLAG(mac->flags, ZEBRA_MAC_DUPLICATE)) { - vty_out(vty, "%% Requested MAC is not duplicate detected\n"); - return CMD_WARNING; + zlog_warn("Requested MAC is not duplicate detected\n"); + return -1; } /* Remove all IPs as duplicate associcated with this MAC */ @@ -6910,7 +6913,7 @@ int zebra_vxlan_clear_dup_detect_vni_mac(struct vty *vty, /* warn-only action return */ if (!zvrf->dad_freeze) - return CMD_SUCCESS; + return 0; /* Local: Notify Peer VTEPs, Remote: Install the entry */ if (CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL)) { @@ -6919,7 +6922,7 @@ int zebra_vxlan_clear_dup_detect_vni_mac(struct vty *vty, &mac->macaddr, mac->flags, mac->loc_seq)) - return CMD_SUCCESS; + return 0; /* Process all neighbors associated with this MAC. */ zvni_process_neigh_on_local_mac_change(zvni, mac, 0); @@ -6931,12 +6934,11 @@ int zebra_vxlan_clear_dup_detect_vni_mac(struct vty *vty, zvni_mac_install(zvni, mac); } - return CMD_SUCCESS; + return 0; } -int zebra_vxlan_clear_dup_detect_vni_ip(struct vty *vty, - struct zebra_vrf *zvrf, - vni_t vni, struct ipaddr *ip) +int zebra_vxlan_clear_dup_detect_vni_ip(struct zebra_vrf *zvrf, vni_t vni, + struct ipaddr *ip) { zebra_vni_t *zvni; zebra_neigh_t *nbr; @@ -6945,38 +6947,35 @@ int zebra_vxlan_clear_dup_detect_vni_ip(struct vty *vty, char buf2[ETHER_ADDR_STRLEN]; if (!is_evpn_enabled()) - return CMD_SUCCESS; + return 0; zvni = zvni_lookup(vni); if (!zvni) { - vty_out(vty, "%% VNI %u does not exist\n", vni); - return CMD_WARNING; + zlog_debug("VNI %u does not exist\n", vni); + return -1; } nbr = zvni_neigh_lookup(zvni, ip); if (!nbr) { - vty_out(vty, - "%% Requested host IP does not exist in VNI %u\n", - vni); - return CMD_WARNING; + zlog_warn("Requested host IP does not exist in VNI %u\n", vni); + return -1; } ipaddr2str(&nbr->ip, buf, sizeof(buf)); if (!CHECK_FLAG(nbr->flags, ZEBRA_NEIGH_DUPLICATE)) { - vty_out(vty, - "%% Requested host IP %s is not duplicate detected\n", - buf); - return CMD_WARNING; + zlog_warn("Requested host IP %s is not duplicate detected\n", + buf); + return -1; } mac = zvni_mac_lookup(zvni, &nbr->emac); if (CHECK_FLAG(mac->flags, ZEBRA_MAC_DUPLICATE)) { - vty_out(vty, - "%% Requested IP's associated MAC %s is still in duplicate state\n", + zlog_warn( + "Requested IP's associated MAC %s is still in duplicate state\n", prefix_mac2str(&nbr->emac, buf2, sizeof(buf2))); - return CMD_WARNING_CONFIG_FAILED; + return -1; } if (IS_ZEBRA_DEBUG_VXLAN) @@ -6998,7 +6997,7 @@ int zebra_vxlan_clear_dup_detect_vni_ip(struct vty *vty, zvni_neigh_install(zvni, nbr); } - return CMD_SUCCESS; + return 0; } static void zvni_clear_dup_mac_hash(struct hash_bucket *bucket, void *ctxt) @@ -7097,7 +7096,6 @@ static void zvni_clear_dup_neigh_hash(struct hash_bucket *bucket, void *ctxt) static void zvni_clear_dup_detect_hash_vni_all(struct hash_bucket *bucket, void **args) { - struct vty *vty; zebra_vni_t *zvni; struct zebra_vrf *zvrf; struct mac_walk_ctx m_wctx; @@ -7107,12 +7105,10 @@ static void zvni_clear_dup_detect_hash_vni_all(struct hash_bucket *bucket, if (!zvni) return; - vty = (struct vty *)args[0]; - zvrf = (struct zebra_vrf *)args[1]; + zvrf = (struct zebra_vrf *)args[0]; if (hashcount(zvni->neigh_table)) { memset(&n_wctx, 0, sizeof(struct neigh_walk_ctx)); - n_wctx.vty = vty; n_wctx.zvni = zvni; n_wctx.zvrf = zvrf; hash_iterate(zvni->neigh_table, zvni_clear_dup_neigh_hash, @@ -7122,51 +7118,45 @@ static void zvni_clear_dup_detect_hash_vni_all(struct hash_bucket *bucket, if (num_valid_macs(zvni)) { memset(&m_wctx, 0, sizeof(struct mac_walk_ctx)); m_wctx.zvni = zvni; - m_wctx.vty = vty; m_wctx.zvrf = zvrf; hash_iterate(zvni->mac_table, zvni_clear_dup_mac_hash, &m_wctx); } } -int zebra_vxlan_clear_dup_detect_vni_all(struct vty *vty, - struct zebra_vrf *zvrf) +int zebra_vxlan_clear_dup_detect_vni_all(struct zebra_vrf *zvrf) { - void *args[2]; + void *args[1]; if (!is_evpn_enabled()) - return CMD_SUCCESS; + return 0; - args[0] = vty; - args[1] = zvrf; + args[0] = zvrf; hash_iterate(zvrf->vni_table, (void (*)(struct hash_bucket *, void *)) zvni_clear_dup_detect_hash_vni_all, args); - return CMD_SUCCESS; + return 0; } -int zebra_vxlan_clear_dup_detect_vni(struct vty *vty, - struct zebra_vrf *zvrf, - vni_t vni) +int zebra_vxlan_clear_dup_detect_vni(struct zebra_vrf *zvrf, vni_t vni) { zebra_vni_t *zvni; struct mac_walk_ctx m_wctx; struct neigh_walk_ctx n_wctx; if (!is_evpn_enabled()) - return CMD_SUCCESS; + return 0; zvni = zvni_lookup(vni); if (!zvni) { - vty_out(vty, "%% VNI %u does not exist\n", vni); - return CMD_WARNING; + zlog_warn("VNI %u does not exist\n", vni); + return -1; } if (hashcount(zvni->neigh_table)) { memset(&n_wctx, 0, sizeof(struct neigh_walk_ctx)); - n_wctx.vty = vty; n_wctx.zvni = zvni; n_wctx.zvrf = zvrf; hash_iterate(zvni->neigh_table, zvni_clear_dup_neigh_hash, @@ -7176,12 +7166,11 @@ int zebra_vxlan_clear_dup_detect_vni(struct vty *vty, if (num_valid_macs(zvni)) { memset(&m_wctx, 0, sizeof(struct mac_walk_ctx)); m_wctx.zvni = zvni; - m_wctx.vty = vty; m_wctx.zvrf = zvrf; hash_iterate(zvni->mac_table, zvni_clear_dup_mac_hash, &m_wctx); } - return CMD_SUCCESS; + return 0; } /* @@ -7412,7 +7401,7 @@ void zebra_vxlan_dup_addr_detection(ZAPI_HANDLER_ARGS) * clear all duplicate detected addresses. */ if (zvrf->dup_addr_detect && !dup_addr_detect) - zebra_vxlan_clear_dup_detect_vni_all(NULL, zvrf); + zebra_vxlan_clear_dup_detect_vni_all(zvrf); zvrf->dup_addr_detect = dup_addr_detect; zvrf->dad_time = time; @@ -10254,7 +10243,7 @@ static int zebra_evpn_pim_cfg_clean_up(struct zserv *client) { struct zebra_vrf *zvrf = zebra_vrf_get_evpn(); - if (CHECK_FLAG(zvrf->flags, ZEBRA_PIM_SEND_VXLAN_SG)) { + if (zvrf && CHECK_FLAG(zvrf->flags, ZEBRA_PIM_SEND_VXLAN_SG)) { if (IS_ZEBRA_DEBUG_VXLAN) zlog_debug("VxLAN SG updates to PIM, stop"); UNSET_FLAG(zvrf->flags, ZEBRA_PIM_SEND_VXLAN_SG); diff --git a/zebra/zebra_vxlan.h b/zebra/zebra_vxlan.h index 6ca93f6cb6..064dda6cd0 100644 --- a/zebra/zebra_vxlan.h +++ b/zebra/zebra_vxlan.h @@ -199,24 +199,19 @@ extern void zebra_vxlan_cleanup_tables(struct zebra_vrf *); extern void zebra_vxlan_init(void); extern void zebra_vxlan_disable(void); extern void zebra_vxlan_evpn_vrf_route_add(vrf_id_t vrf_id, - struct ethaddr *rmac, - struct ipaddr *ip, - struct prefix *host_prefix); + const struct ethaddr *rmac, + const struct ipaddr *ip, + const struct prefix *host_prefix); extern void zebra_vxlan_evpn_vrf_route_del(vrf_id_t vrf_id, struct ipaddr *vtep_ip, struct prefix *host_prefix); -extern int zebra_vxlan_clear_dup_detect_vni_mac(struct vty *vty, - struct zebra_vrf *zvrf, +extern int zebra_vxlan_clear_dup_detect_vni_mac(struct zebra_vrf *zvrf, vni_t vni, struct ethaddr *macaddr); -extern int zebra_vxlan_clear_dup_detect_vni_ip(struct vty *vty, - struct zebra_vrf *zvrf, +extern int zebra_vxlan_clear_dup_detect_vni_ip(struct zebra_vrf *zvrf, vni_t vni, struct ipaddr *ip); -extern int zebra_vxlan_clear_dup_detect_vni_all(struct vty *vty, - struct zebra_vrf *zvrf); -extern int zebra_vxlan_clear_dup_detect_vni(struct vty *vty, - struct zebra_vrf *zvrf, - vni_t vni); +extern int zebra_vxlan_clear_dup_detect_vni_all(struct zebra_vrf *zvrf); +extern int zebra_vxlan_clear_dup_detect_vni(struct zebra_vrf *zvrf, vni_t vni); extern void zebra_vxlan_handle_result(struct zebra_dplane_ctx *ctx); extern void zebra_evpn_init(void); diff --git a/zebra/zebra_vxlan_private.h b/zebra/zebra_vxlan_private.h index 100bb0e093..0a46fb2075 100644 --- a/zebra/zebra_vxlan_private.h +++ b/zebra/zebra_vxlan_private.h @@ -301,6 +301,7 @@ struct zebra_mac_t_ { /* remote VTEP advertised MAC as default GW */ #define ZEBRA_MAC_REMOTE_DEF_GW 0x40 #define ZEBRA_MAC_DUPLICATE 0x80 +#define ZEBRA_MAC_FPM_SENT 0x100 /* whether or not this entry was sent. */ /* back pointer to zvni */ zebra_vni_t *zvni; diff --git a/zebra/zserv.c b/zebra/zserv.c index 7f806d82c3..8a1ed115a7 100644 --- a/zebra/zserv.c +++ b/zebra/zserv.c @@ -888,7 +888,9 @@ static void zebra_show_client_detail(struct vty *vty, struct zserv *client) vty_out(vty, "Client: %s", zebra_route_string(client->proto)); if (client->instance) - vty_out(vty, " Instance: %d", client->instance); + vty_out(vty, " Instance: %u", client->instance); + if (client->session_id) + vty_out(vty, " [%u]", client->session_id); vty_out(vty, "\n"); vty_out(vty, "------------------------ \n"); @@ -936,32 +938,32 @@ static void zebra_show_client_detail(struct vty *vty, struct zserv *client) vty_out(vty, "Type Add Update Del \n"); vty_out(vty, "================================================== \n"); - vty_out(vty, "IPv4 %-12d%-12d%-12d\n", client->v4_route_add_cnt, + vty_out(vty, "IPv4 %-12u%-12u%-12u\n", client->v4_route_add_cnt, client->v4_route_upd8_cnt, client->v4_route_del_cnt); - vty_out(vty, "IPv6 %-12d%-12d%-12d\n", client->v6_route_add_cnt, + vty_out(vty, "IPv6 %-12u%-12u%-12u\n", client->v6_route_add_cnt, client->v6_route_upd8_cnt, client->v6_route_del_cnt); - vty_out(vty, "Redist:v4 %-12d%-12d%-12d\n", client->redist_v4_add_cnt, + vty_out(vty, "Redist:v4 %-12u%-12u%-12u\n", client->redist_v4_add_cnt, 0, client->redist_v4_del_cnt); - vty_out(vty, "Redist:v6 %-12d%-12d%-12d\n", client->redist_v6_add_cnt, + vty_out(vty, "Redist:v6 %-12u%-12u%-12u\n", client->redist_v6_add_cnt, 0, client->redist_v6_del_cnt); - vty_out(vty, "Connected %-12d%-12d%-12d\n", client->ifadd_cnt, 0, + vty_out(vty, "Connected %-12u%-12u%-12u\n", client->ifadd_cnt, 0, client->ifdel_cnt); - vty_out(vty, "BFD peer %-12d%-12d%-12d\n", client->bfd_peer_add_cnt, + vty_out(vty, "BFD peer %-12u%-12u%-12u\n", client->bfd_peer_add_cnt, client->bfd_peer_upd8_cnt, client->bfd_peer_del_cnt); - vty_out(vty, "NHT v4 %-12d%-12d%-12d\n", + vty_out(vty, "NHT v4 %-12u%-12u%-12u\n", client->v4_nh_watch_add_cnt, 0, client->v4_nh_watch_rem_cnt); - vty_out(vty, "NHT v6 %-12d%-12d%-12d\n", + vty_out(vty, "NHT v6 %-12u%-12u%-12u\n", client->v6_nh_watch_add_cnt, 0, client->v6_nh_watch_rem_cnt); - vty_out(vty, "VxLAN SG %-12d%-12d%-12d\n", client->vxlan_sg_add_cnt, + vty_out(vty, "VxLAN SG %-12u%-12u%-12u\n", client->vxlan_sg_add_cnt, 0, client->vxlan_sg_del_cnt); - vty_out(vty, "Interface Up Notifications: %d\n", client->ifup_cnt); - vty_out(vty, "Interface Down Notifications: %d\n", client->ifdown_cnt); - vty_out(vty, "VNI add notifications: %d\n", client->vniadd_cnt); - vty_out(vty, "VNI delete notifications: %d\n", client->vnidel_cnt); - vty_out(vty, "L3-VNI add notifications: %d\n", client->l3vniadd_cnt); - vty_out(vty, "L3-VNI delete notifications: %d\n", client->l3vnidel_cnt); - vty_out(vty, "MAC-IP add notifications: %d\n", client->macipadd_cnt); - vty_out(vty, "MAC-IP delete notifications: %d\n", client->macipdel_cnt); + vty_out(vty, "Interface Up Notifications: %u\n", client->ifup_cnt); + vty_out(vty, "Interface Down Notifications: %u\n", client->ifdown_cnt); + vty_out(vty, "VNI add notifications: %u\n", client->vniadd_cnt); + vty_out(vty, "VNI delete notifications: %u\n", client->vnidel_cnt); + vty_out(vty, "L3-VNI add notifications: %u\n", client->l3vniadd_cnt); + vty_out(vty, "L3-VNI delete notifications: %u\n", client->l3vnidel_cnt); + vty_out(vty, "MAC-IP add notifications: %u\n", client->macipadd_cnt); + vty_out(vty, "MAC-IP delete notifications: %u\n", client->macipdel_cnt); TAILQ_FOREACH (info, &client->gr_info_queue, gr_info) { vty_out(vty, "VRF : %s\n", vrf_id_to_name(info->vrf_id)); @@ -995,11 +997,16 @@ static void zebra_show_stale_client_detail(struct vty *vty, time_t uptime; struct client_gr_info *info = NULL; struct zserv *s = NULL; - - if (client->instance) - vty_out(vty, " Instance: %d", client->instance); + bool first_p = true; TAILQ_FOREACH (info, &client->gr_info_queue, gr_info) { + if (first_p) { + if (client->instance) + vty_out(vty, " Instance: %u", client->instance); + if (client->session_id) + vty_out(vty, " [%u]", client->session_id); + first_p = false; + } vty_out(vty, "VRF : %s\n", vrf_id_to_name(info->vrf_id)); vty_out(vty, "Capabilities : "); switch (info->capabilities) { @@ -1070,19 +1077,26 @@ static void zebra_show_client_brief(struct vty *vty, struct zserv *client) client->v6_route_del_cnt); } -struct zserv *zserv_find_client(uint8_t proto, unsigned short instance) +struct zserv *zserv_find_client_session(uint8_t proto, unsigned short instance, + uint32_t session_id) { struct listnode *node, *nnode; struct zserv *client; for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { - if (client->proto == proto && client->instance == instance) + if (client->proto == proto && client->instance == instance && + client->session_id == session_id) return client; } return NULL; } +struct zserv *zserv_find_client(uint8_t proto, unsigned short instance) +{ + return zserv_find_client_session(proto, instance, 0); +} + /* This command is for debugging purpose. */ DEFUN (show_zebra_client, show_zebra_client_cmd, diff --git a/zebra/zserv.h b/zebra/zserv.h index 08df664d56..5506c4299d 100644 --- a/zebra/zserv.h +++ b/zebra/zserv.h @@ -134,9 +134,10 @@ struct zserv { /* Indicates if client is synchronous. */ bool synchronous; - /* client's protocol */ + /* client's protocol and session info */ uint8_t proto; uint16_t instance; + uint32_t session_id; /* * Interested for MLAG Updates, and also stores the client @@ -287,6 +288,24 @@ extern int zserv_send_message(struct zserv *client, struct stream *msg); extern struct zserv *zserv_find_client(uint8_t proto, unsigned short instance); /* + * Retrieve a client by its protocol, instance number, and session id. + * + * proto + * protocol number + * + * instance + * instance number + * + * session_id + * session id + * + * Returns: + * The Zebra API client. + */ +struct zserv *zserv_find_client_session(uint8_t proto, unsigned short instance, + uint32_t session_id); + +/* * Close a client. * * Kills a client's thread, removes the client from the client list and cleans |
