diff options
741 files changed, 40199 insertions, 16152 deletions
diff --git a/alpine/APKBUILD.in b/alpine/APKBUILD.in index f740a34583..d4657dfe55 100644 --- a/alpine/APKBUILD.in +++ b/alpine/APKBUILD.in @@ -2,7 +2,7 @@ pkgname=frr pkgver=@VERSION@ pkgrel=0 -pkgdesc="Free Range Routing is a fork of quagga" +pkgdesc="FRRouting is a fork of quagga" url="https://frrouting.org/" arch="x86_64" license="GPL-2.0" diff --git a/babeld/net.c b/babeld/net.c index d1f6a44142..40716a701d 100644 --- a/babeld/net.c +++ b/babeld/net.c @@ -144,7 +144,7 @@ babel_send(int s, iovec[1].iov_base = buf2; iovec[1].iov_len = buflen2; memset(&msg, 0, sizeof(msg)); - msg.msg_name = (struct sockaddr*)sin; + msg.msg_name = sin; msg.msg_namelen = slen; msg.msg_iov = iovec; msg.msg_iovlen = 2; diff --git a/bfdd/bfd.c b/bfdd/bfd.c index 222bf32c94..e1c662941b 100644 --- a/bfdd/bfd.c +++ b/bfdd/bfd.c @@ -131,7 +131,7 @@ int bfd_session_enable(struct bfd_session *bs) if (bs->key.vrfname[0]) { vrf = vrf_lookup_by_name(bs->key.vrfname); if (vrf == NULL) { - log_error( + zlog_err( "session-enable: specified VRF doesn't exists."); return 0; } @@ -143,15 +143,15 @@ int bfd_session_enable(struct bfd_session *bs) else ifp = if_lookup_by_name_all_vrf(bs->key.ifname); if (ifp == NULL) { - log_error( - "session-enable: specified interface doesn't exists."); + zlog_err( + "session-enable: specified interface doesn't exists."); return 0; } if (bs->key.ifname[0] && !vrf) { vrf = vrf_lookup_by_id(ifp->vrf_id); if (vrf == NULL) { - log_error( - "session-enable: specified VRF doesn't exists."); + zlog_err( + "session-enable: specified VRF doesn't exists."); return 0; } } @@ -164,12 +164,12 @@ int bfd_session_enable(struct bfd_session *bs) assert(bs->vrf); if (bs->key.ifname[0] - && BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) == 0) + && CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) == 0) bs->ifp = ifp; /* Sanity check: don't leak open sockets. */ if (bs->sock != -1) { - log_debug("session-enable: previous socket open"); + zlog_debug("session-enable: previous socket open"); close(bs->sock); bs->sock = -1; } @@ -179,7 +179,7 @@ int bfd_session_enable(struct bfd_session *bs) * could use the destination port (3784) for the source * port we wouldn't need a socket per session. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_IPV6) == 0) { + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_IPV6) == 0) { psock = bp_peer_socket(bs); if (psock == -1) return 0; @@ -287,7 +287,7 @@ void ptm_bfd_echo_stop(struct bfd_session *bfd) { bfd->echo_xmt_TO = 0; bfd->echo_detect_TO = 0; - BFD_UNSET_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE); + UNSET_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE); bfd_echo_xmttimer_delete(bfd); bfd_echo_recvtimer_delete(bfd); @@ -318,9 +318,9 @@ void ptm_bfd_sess_up(struct bfd_session *bfd) if (old_state != bfd->ses_state) { bfd->stats.session_up++; - log_info("state-change: [%s] %s -> %s", bs_to_string(bfd), - state_list[old_state].str, - state_list[bfd->ses_state].str); + zlog_debug("state-change: [%s] %s -> %s", bs_to_string(bfd), + state_list[old_state].str, + state_list[bfd->ses_state].str); } } @@ -352,15 +352,15 @@ void ptm_bfd_sess_dn(struct bfd_session *bfd, uint8_t diag) control_notify(bfd, PTM_BFD_DOWN); /* Stop echo packet transmission if they are active */ - if (BFD_CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) + if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) ptm_bfd_echo_stop(bfd); if (old_state != bfd->ses_state) { bfd->stats.session_down++; - log_info("state-change: [%s] %s -> %s reason:%s", - bs_to_string(bfd), state_list[old_state].str, - state_list[bfd->ses_state].str, - get_diag_str(bfd->local_diag)); + zlog_debug("state-change: [%s] %s -> %s reason:%s", + bs_to_string(bfd), state_list[old_state].str, + state_list[bfd->ses_state].str, + get_diag_str(bfd->local_diag)); } } @@ -548,19 +548,19 @@ static void _bfd_session_update(struct bfd_session *bs, { if (bpc->bpc_echo) { /* Check if echo mode is already active. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) goto skip_echo; - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); + SET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); /* Activate/update echo receive timeout timer. */ bs_echo_timer_handler(bs); } else { /* Check if echo mode is already disabled. */ - if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) + if (!CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) goto skip_echo; - BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); + UNSET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); ptm_bfd_echo_stop(bs); } @@ -582,10 +582,10 @@ skip_echo: if (bpc->bpc_shutdown) { /* Check if already shutdown. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) return; - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); + SET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); /* Disable all events. */ bfd_recvtimer_delete(bs); @@ -602,10 +602,10 @@ skip_echo: ptm_bfd_snd(bs, 0); } else { /* Check if already working. */ - if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) + if (!CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) return; - BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); + UNSET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); /* Change and notify state change. */ bs->ses_state = PTM_BFD_DOWN; @@ -616,15 +616,15 @@ skip_echo: bfd_xmttimer_update(bs, bs->xmt_TO); } if (bpc->bpc_cbit) { - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CBIT)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CBIT)) return; - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_CBIT); + SET_FLAG(bs->flags, BFD_SESS_FLAG_CBIT); } else { - if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CBIT)) + if (!CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CBIT)) return; - BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_CBIT); + UNSET_FLAG(bs->flags, BFD_SESS_FLAG_CBIT); } } @@ -682,7 +682,7 @@ struct bfd_session *ptm_bfd_sess_new(struct bfd_peer_cfg *bpc) /* Get BFD session storage with its defaults. */ bfd = bfd_session_new(); if (bfd == NULL) { - log_error("session-new: allocation failed"); + zlog_err("session-new: allocation failed"); return NULL; } @@ -703,7 +703,7 @@ struct bfd_session *ptm_bfd_sess_new(struct bfd_peer_cfg *bpc) /* Copy remaining data. */ if (bpc->bpc_ipv4 == false) - BFD_SET_FLAG(bfd->flags, BFD_SESS_FLAG_IPV6); + SET_FLAG(bfd->flags, BFD_SESS_FLAG_IPV6); bfd->key.family = (bpc->bpc_ipv4) ? AF_INET : AF_INET6; switch (bfd->key.family) { @@ -727,7 +727,7 @@ struct bfd_session *ptm_bfd_sess_new(struct bfd_peer_cfg *bpc) } if (bpc->bpc_mhop) - BFD_SET_FLAG(bfd->flags, BFD_SESS_FLAG_MH); + SET_FLAG(bfd->flags, BFD_SESS_FLAG_MH); bfd->key.mhop = bpc->bpc_mhop; @@ -758,7 +758,7 @@ struct bfd_session *bs_registrate(struct bfd_session *bfd) if (bfd->key.ifname[0] || bfd->key.vrfname[0] || bfd->sock == -1) bs_observer_add(bfd); - log_info("session-new: %s", bs_to_string(bfd)); + zlog_debug("session-new: %s", bs_to_string(bfd)); control_notify_config(BCM_NOTIFY_CONFIG_ADD, bfd); @@ -776,13 +776,13 @@ int ptm_bfd_sess_del(struct bfd_peer_cfg *bpc) /* This pointer is being referenced, don't let it be deleted. */ if (bs->refcount > 0) { - log_error("session-delete: refcount failure: %" PRIu64 - " references", - bs->refcount); + zlog_err("session-delete: refcount failure: %" PRIu64 + " references", + bs->refcount); return -1; } - log_info("session-delete: %s", bs_to_string(bs)); + zlog_debug("session-delete: %s", bs_to_string(bs)); control_notify_config(BCM_NOTIFY_CONFIG_DELETE, bs); @@ -849,7 +849,8 @@ static void bs_down_handler(struct bfd_session *bs, int nstate) break; default: - log_debug("state-change: unhandled neighbor state: %d", nstate); + zlog_debug("state-change: unhandled neighbor state: %d", + nstate); break; } } @@ -876,7 +877,8 @@ static void bs_init_handler(struct bfd_session *bs, int nstate) break; default: - log_debug("state-change: unhandled neighbor state: %d", nstate); + zlog_debug("state-change: unhandled neighbor state: %d", + nstate); break; } } @@ -901,16 +903,16 @@ static void bs_neighbour_admin_down_handler(struct bfd_session *bfd, control_notify(bfd, PTM_BFD_ADM_DOWN); /* Stop echo packet transmission if they are active */ - if (BFD_CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) + if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) ptm_bfd_echo_stop(bfd); if (old_state != bfd->ses_state) { bfd->stats.session_down++; - log_info("state-change: [%s] %s -> %s reason:%s", - bs_to_string(bfd), state_list[old_state].str, - state_list[bfd->ses_state].str, - get_diag_str(bfd->local_diag)); + zlog_debug("state-change: [%s] %s -> %s reason:%s", + bs_to_string(bfd), state_list[old_state].str, + state_list[bfd->ses_state].str, + get_diag_str(bfd->local_diag)); } } @@ -932,7 +934,8 @@ static void bs_up_handler(struct bfd_session *bs, int nstate) break; default: - log_debug("state-change: unhandled neighbor state: %d", nstate); + zlog_debug("state-change: unhandled neighbor state: %d", + nstate); break; } } @@ -954,8 +957,8 @@ void bs_state_handler(struct bfd_session *bs, int nstate) break; default: - log_debug("state-change: [%s] is in invalid state: %d", - bs_to_string(bs), nstate); + zlog_debug("state-change: [%s] is in invalid state: %d", + bs_to_string(bs), nstate); break; } } @@ -976,14 +979,14 @@ void bs_echo_timer_handler(struct bfd_session *bs) * Section 3). * - Check that we are already at the up state. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO) == 0 - || BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO) == 0 + || CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) || bs->ses_state != PTM_BFD_UP) return; /* Remote peer asked to stop echo. */ if (bs->remote_timers.required_min_echo == 0) { - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) ptm_bfd_echo_stop(bs); return; @@ -1002,7 +1005,7 @@ void bs_echo_timer_handler(struct bfd_session *bs) else bs->echo_xmt_TO = bs->timers.required_min_echo; - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO_ACTIVE) == 0 + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO_ACTIVE) == 0 || old_timer != bs->echo_xmt_TO) ptm_bfd_echo_start(bs); } @@ -1032,20 +1035,19 @@ void bs_final_handler(struct bfd_session *bs) } /* - * Calculate detection time based on new timers. + * Calculate transmission time based on new timers. * * Transmission calculation: - * We must respect the RequiredMinRxInterval from the remote - * system: if our desired transmission timer is more than the - * minimum receive rate, then we must lower it to at least the - * minimum receive interval. + * Unless specified by exceptions at the end of Section 6.8.7, the + * transmission time will be determined by the system with the + * slowest rate. * - * RFC 5880, Section 6.8.3. + * RFC 5880, Section 6.8.7. */ if (bs->timers.desired_min_tx > bs->remote_timers.required_min_rx) - bs->xmt_TO = bs->remote_timers.required_min_rx; - else bs->xmt_TO = bs->timers.desired_min_tx; + else + bs->xmt_TO = bs->remote_timers.required_min_rx; /* Apply new transmission timer immediately. */ ptm_bfd_start_xmt_timer(bs, false); @@ -1241,7 +1243,7 @@ const char *bs_to_string(const struct bfd_session *bs) static char buf[256]; char addr_buf[INET6_ADDRSTRLEN]; int pos; - bool is_mhop = BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH); + bool is_mhop = CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH); pos = snprintf(buf, sizeof(buf), "mhop:%s", is_mhop ? "yes" : "no"); pos += snprintf(buf + pos, sizeof(buf) - pos, " peer:%s", @@ -1435,8 +1437,8 @@ struct bfd_session *bfd_key_lookup(struct bfd_key key) inet_ntop(bs.key.family, &key.local, addr_buf, sizeof(addr_buf)); - log_debug(" peer %s found, but loc-addr %s ignored", - peer_buf, addr_buf); + zlog_debug(" peer %s found, but loc-addr %s ignored", + peer_buf, addr_buf); return bsp; } } @@ -1447,8 +1449,8 @@ struct bfd_session *bfd_key_lookup(struct bfd_key key) memset(bs.key.ifname, 0, sizeof(bs.key.ifname)); bsp = hash_lookup(bfd_key_hash, &bs); if (bsp) { - log_debug(" peer %s found, but ifp %s ignored", - peer_buf, key.ifname); + zlog_debug(" peer %s found, but ifp %s ignored", + peer_buf, key.ifname); return bsp; } } @@ -1462,10 +1464,10 @@ struct bfd_session *bfd_key_lookup(struct bfd_key key) inet_ntop(bs.key.family, &bs.key.local, addr_buf, sizeof(addr_buf)); - log_debug(" peer %s found, but ifp %s" - " and loc-addr %s ignored", - peer_buf, key.ifname, - addr_buf); + zlog_debug( + " peer %s found, but ifp %s" + " and loc-addr %s ignored", + peer_buf, key.ifname, addr_buf); return bsp; } } @@ -1483,8 +1485,10 @@ struct bfd_session *bfd_key_lookup(struct bfd_key key) /* change key */ if (ctx.result) { bsp = ctx.result; - log_debug(" peer %s found, but ifp" - " and/or loc-addr params ignored", peer_buf); + zlog_debug( + " peer %s found, but ifp" + " and/or loc-addr params ignored", + peer_buf); } return bsp; } @@ -1644,11 +1648,11 @@ static void _bfd_session_remove_manual(struct hash_bucket *hb, struct bfd_session *bs = hb->data; /* Delete only manually configured sessions. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) == 0) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) == 0) return; bs->refcount--; - BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG); + UNSET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG); /* Don't delete sessions still in use. */ if (bs->refcount != 0) @@ -1672,13 +1676,13 @@ void bfd_sessions_remove_manual(void) */ static int bfd_vrf_new(struct vrf *vrf) { - log_debug("VRF Created: %s(%u)", vrf->name, vrf->vrf_id); + zlog_debug("VRF Created: %s(%u)", vrf->name, vrf->vrf_id); return 0; } static int bfd_vrf_delete(struct vrf *vrf) { - log_debug("VRF Deletion: %s(%u)", vrf->name, vrf->vrf_id); + zlog_debug("VRF Deletion: %s(%u)", vrf->name, vrf->vrf_id); return 0; } @@ -1686,7 +1690,7 @@ static int bfd_vrf_update(struct vrf *vrf) { if (!vrf_is_enabled(vrf)) return 0; - log_debug("VRF update: %s(%u)", vrf->name, vrf->vrf_id); + zlog_debug("VRF update: %s(%u)", vrf->name, vrf->vrf_id); /* a different name is given; update bfd list */ bfdd_sessions_enable_vrf(vrf); return 0; @@ -1703,7 +1707,7 @@ static int bfd_vrf_enable(struct vrf *vrf) vrf->info = (void *)bvrf; } else bvrf = vrf->info; - log_debug("VRF enable add %s id %u", vrf->name, vrf->vrf_id); + zlog_debug("VRF enable add %s id %u", vrf->name, vrf->vrf_id); if (vrf->vrf_id == VRF_DEFAULT || vrf_get_backend() == VRF_BACKEND_NETNS) { if (!bvrf->bg_shop) @@ -1759,7 +1763,7 @@ static int bfd_vrf_disable(struct vrf *vrf) bfdd_zclient_unregister(vrf->vrf_id); } - log_debug("VRF disable %s id %d", vrf->name, vrf->vrf_id); + zlog_debug("VRF disable %s id %d", vrf->name, vrf->vrf_id); /* Disable read/write poll triggering. */ THREAD_OFF(bvrf->bg_ev[0]); diff --git a/bfdd/bfd.h b/bfdd/bfd.h index 2ae74d7880..a786bb71bc 100644 --- a/bfdd/bfd.h +++ b/bfdd/bfd.h @@ -172,10 +172,6 @@ enum bfd_session_flags { BFD_SESS_FLAG_CBIT = 1 << 9, /* CBIT is set */ }; -#define BFD_SET_FLAG(field, flag) (field |= flag) -#define BFD_UNSET_FLAG(field, flag) (field &= ~flag) -#define BFD_CHECK_FLAG(field, flag) (field & flag) - /* BFD session hash keys */ struct bfd_key { uint16_t family; @@ -429,15 +425,9 @@ void pl_free(struct peer_label *pl); /* * logging - alias to zebra log */ - -#define log_debug zlog_debug -#define log_info zlog_info -#define log_warning zlog_warn -#define log_error zlog_err - -#define log_fatal(msg, ...) \ +#define zlog_fatal(msg, ...) \ do { \ - zlog_err(msg, ## __VA_ARGS__); \ + zlog_err(msg, ##__VA_ARGS__); \ assert(!msg); \ abort(); \ } while (0) diff --git a/bfdd/bfd_packet.c b/bfdd/bfd_packet.c index 1ec761e3b8..79971fb3e2 100644 --- a/bfdd/bfd_packet.c +++ b/bfdd/bfd_packet.c @@ -76,7 +76,7 @@ int _ptm_bfd_send(struct bfd_session *bs, uint16_t *port, const void *data, ssize_t rv; int sd = -1; - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_IPV6)) { + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_IPV6)) { memset(&sin6, 0, sizeof(sin6)); sin6.sin6_family = AF_INET6; memcpy(&sin6.sin6_addr, &bs->key.peer, sizeof(sin6.sin6_addr)); @@ -85,7 +85,7 @@ int _ptm_bfd_send(struct bfd_session *bs, uint16_t *port, const void *data, sin6.sin6_port = (port) ? *port - : (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) + : (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) ? htons(BFD_DEF_MHOP_DEST_PORT) : htons(BFD_DEFDESTPORT); @@ -98,7 +98,7 @@ int _ptm_bfd_send(struct bfd_session *bs, uint16_t *port, const void *data, memcpy(&sin.sin_addr, &bs->key.peer, sizeof(sin.sin_addr)); sin.sin_port = (port) ? *port - : (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) + : (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) ? htons(BFD_DEF_MHOP_DEST_PORT) : htons(BFD_DEFDESTPORT); @@ -112,11 +112,11 @@ int _ptm_bfd_send(struct bfd_session *bs, uint16_t *port, const void *data, #endif /* HAVE_STRUCT_SOCKADDR_SA_LEN */ rv = sendto(sd, data, datalen, 0, sa, slen); if (rv <= 0) { - log_debug("packet-send: send failure: %s", strerror(errno)); + zlog_debug("packet-send: send failure: %s", strerror(errno)); return -1; } if (rv < (ssize_t)datalen) - log_debug("packet-send: send partial: %s", strerror(errno)); + zlog_debug("packet-send: send partial: %s", strerror(errno)); return 0; } @@ -133,15 +133,15 @@ void ptm_bfd_echo_snd(struct bfd_session *bfd) if (!bvrf) return; - if (!BFD_CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) - BFD_SET_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE); + if (!CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) + SET_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE); memset(&bep, 0, sizeof(bep)); bep.ver = BFD_ECHO_VERSION; bep.len = BFD_ECHO_PKT_LEN; bep.my_discr = htonl(bfd->discrs.my_discr); - if (BFD_CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_IPV6)) { + if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_IPV6)) { sd = bvrf->bg_echov6; memset(&sin6, 0, sizeof(sin6)); sin6.sin6_family = AF_INET6; @@ -190,13 +190,14 @@ static int ptm_bfd_process_echo_pkt(struct bfd_vrf_global *bvrf, int s) /* Your discriminator not zero - use it to find session */ bfd = bfd_id_lookup(my_discr); if (bfd == NULL) { - log_debug("echo-packet: no matching session (id:%u)", my_discr); + zlog_debug("echo-packet: no matching session (id:%u)", + my_discr); return -1; } - if (!BFD_CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) { - log_debug("echo-packet: echo disabled [%s] (id:%u)", - bs_to_string(bfd), my_discr); + if (!CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE)) { + zlog_debug("echo-packet: echo disabled [%s] (id:%u)", + bs_to_string(bfd), my_discr); return -1; } @@ -214,7 +215,7 @@ static int ptm_bfd_process_echo_pkt(struct bfd_vrf_global *bvrf, int s) void ptm_bfd_snd(struct bfd_session *bfd, int fbit) { - struct bfd_pkt cp; + struct bfd_pkt cp = {}; /* Set fields according to section 6.5.7 */ cp.diag = bfd->local_diag; @@ -222,7 +223,7 @@ void ptm_bfd_snd(struct bfd_session *bfd, int fbit) cp.flags = 0; BFD_SETSTATE(cp.flags, bfd->ses_state); - if (BFD_CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_CBIT)) + if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_CBIT)) BFD_SETCBIT(cp.flags, BFD_CBIT); BFD_SETDEMANDBIT(cp.flags, BFD_DEF_DEMAND); @@ -291,8 +292,7 @@ ssize_t bfd_recv_ipv4(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl, mlen = recvmsg(sd, &msghdr, MSG_DONTWAIT); if (mlen == -1) { if (errno != EAGAIN) - log_error("ipv4-recv: recv failed: %s", - strerror(errno)); + zlog_err("ipv4-recv: recv failed: %s", strerror(errno)); return -1; } @@ -313,7 +313,8 @@ ssize_t bfd_recv_ipv4(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl, memcpy(&ttlval, CMSG_DATA(cm), sizeof(ttlval)); if (ttlval > 255) { - log_debug("ipv4-recv: invalid TTL: %u", ttlval); + zlog_debug("ipv4-recv: invalid TTL: %u", + ttlval); return -1; } *ttl = ttlval; @@ -402,8 +403,7 @@ ssize_t bfd_recv_ipv6(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl, mlen = recvmsg(sd, &msghdr6, MSG_DONTWAIT); if (mlen == -1) { if (errno != EAGAIN) - log_error("ipv6-recv: recv failed: %s", - strerror(errno)); + zlog_err("ipv6-recv: recv failed: %s", strerror(errno)); return -1; } @@ -420,7 +420,8 @@ ssize_t bfd_recv_ipv6(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl, if (cm->cmsg_type == IPV6_HOPLIMIT) { memcpy(&ttlval, CMSG_DATA(cm), sizeof(ttlval)); if (ttlval > 255) { - log_debug("ipv6-recv: invalid TTL: %u", ttlval); + zlog_debug("ipv6-recv: invalid TTL: %u", + ttlval); return -1; } @@ -511,8 +512,8 @@ static void cp_debug(bool mhop, struct sockaddr_any *peer, vsnprintf(buf, sizeof(buf), fmt, vl); va_end(vl); - log_debug("control-packet: %s [mhop:%s%s%s%s%s]", buf, - mhop ? "yes" : "no", peerstr, localstr, portstr, vrfstr); + zlog_debug("control-packet: %s [mhop:%s%s%s%s%s]", buf, + mhop ? "yes" : "no", peerstr, localstr, portstr, vrfstr); } int bfd_recv_cb(struct thread *t) @@ -779,7 +780,7 @@ int bp_udp_send(int sd, uint8_t ttl, uint8_t *data, size_t datalen, cmsg->cmsg_level = IPPROTO_IPV6; cmsg->cmsg_type = IPV6_HOPLIMIT; } else { -#if BFD_LINUX +#ifdef BFD_LINUX cmsg->cmsg_level = IPPROTO_IP; cmsg->cmsg_type = IP_TTL; #else @@ -796,11 +797,12 @@ int bp_udp_send(int sd, uint8_t ttl, uint8_t *data, size_t datalen, /* Send echo back. */ wlen = sendmsg(sd, &msg, 0); if (wlen <= 0) { - log_debug("udp-send: loopback failure: (%d) %s", errno, strerror(errno)); + zlog_debug("udp-send: loopback failure: (%d) %s", errno, + strerror(errno)); return -1; } else if (wlen < (ssize_t)datalen) { - log_debug("udp-send: partial send: %zd expected %zu", wlen, - datalen); + zlog_debug("udp-send: partial send: %zd expected %zu", wlen, + datalen); return -1; } @@ -821,8 +823,8 @@ int bp_set_ttl(int sd, uint8_t value) int ttl = value; if (setsockopt(sd, IPPROTO_IP, IP_TTL, &ttl, sizeof(ttl)) == -1) { - log_warning("set-ttl: setsockopt(IP_TTL, %d): %s", value, - strerror(errno)); + zlog_warn("set-ttl: setsockopt(IP_TTL, %d): %s", value, + strerror(errno)); return -1; } @@ -834,8 +836,8 @@ int bp_set_tos(int sd, uint8_t value) int tos = value; if (setsockopt(sd, IPPROTO_IP, IP_TOS, &tos, sizeof(tos)) == -1) { - log_warning("set-tos: setsockopt(IP_TOS, %d): %s", value, - strerror(errno)); + zlog_warn("set-tos: setsockopt(IP_TOS, %d): %s", value, + strerror(errno)); return -1; } @@ -847,12 +849,12 @@ static void bp_set_ipopts(int sd) int rcvttl = BFD_RCV_TTL_VAL; if (bp_set_ttl(sd, BFD_TTL_VAL) != 0) - log_fatal("set-ipopts: TTL configuration failed"); + zlog_fatal("set-ipopts: TTL configuration failed"); if (setsockopt(sd, IPPROTO_IP, IP_RECVTTL, &rcvttl, sizeof(rcvttl)) == -1) - log_fatal("set-ipopts: setsockopt(IP_RECVTTL, %d): %s", rcvttl, - strerror(errno)); + zlog_fatal("set-ipopts: setsockopt(IP_RECVTTL, %d): %s", rcvttl, + strerror(errno)); #ifdef BFD_LINUX int pktinfo = BFD_PKT_INFO_VAL; @@ -860,21 +862,21 @@ static void bp_set_ipopts(int sd) /* Figure out address and interface to do the peer matching. */ if (setsockopt(sd, IPPROTO_IP, IP_PKTINFO, &pktinfo, sizeof(pktinfo)) == -1) - log_fatal("set-ipopts: setsockopt(IP_PKTINFO, %d): %s", pktinfo, - strerror(errno)); + zlog_fatal("set-ipopts: setsockopt(IP_PKTINFO, %d): %s", + pktinfo, strerror(errno)); #endif /* BFD_LINUX */ #ifdef BFD_BSD int yes = 1; /* Find out our address for peer matching. */ if (setsockopt(sd, IPPROTO_IP, IP_RECVDSTADDR, &yes, sizeof(yes)) == -1) - log_fatal("set-ipopts: setsockopt(IP_RECVDSTADDR, %d): %s", yes, - strerror(errno)); + zlog_fatal("set-ipopts: setsockopt(IP_RECVDSTADDR, %d): %s", + yes, strerror(errno)); /* Find out interface where the packet came in. */ if (setsockopt_ifindex(AF_INET, sd, yes) == -1) - log_fatal("set-ipopts: setsockopt_ipv4_ifindex(%d): %s", yes, - strerror(errno)); + zlog_fatal("set-ipopts: setsockopt_ipv4_ifindex(%d): %s", yes, + strerror(errno)); #endif /* BFD_BSD */ } @@ -887,7 +889,7 @@ static void bp_bind_ip(int sd, uint16_t port) sin.sin_addr.s_addr = htonl(INADDR_ANY); sin.sin_port = htons(port); if (bind(sd, (struct sockaddr *)&sin, sizeof(sin)) == -1) - log_fatal("bind-ip: bind: %s", strerror(errno)); + zlog_fatal("bind-ip: bind: %s", strerror(errno)); } int bp_udp_shop(const struct vrf *vrf) @@ -899,7 +901,7 @@ int bp_udp_shop(const struct vrf *vrf) vrf->name); } if (sd == -1) - log_fatal("udp-shop: socket: %s", strerror(errno)); + zlog_fatal("udp-shop: socket: %s", strerror(errno)); bp_set_ipopts(sd); bp_bind_ip(sd, BFD_DEFDESTPORT); @@ -915,7 +917,7 @@ int bp_udp_mhop(const struct vrf *vrf) vrf->name); } if (sd == -1) - log_fatal("udp-mhop: socket: %s", strerror(errno)); + zlog_fatal("udp-mhop: socket: %s", strerror(errno)); bp_set_ipopts(sd); bp_bind_ip(sd, BFD_DEF_MHOP_DEST_PORT); @@ -932,7 +934,7 @@ int bp_peer_socket(const struct bfd_session *bs) if (bs->key.ifname[0]) device_to_bind = (const char *)bs->key.ifname; - else if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) + else if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) && bs->key.vrfname[0]) device_to_bind = (const char *)bs->key.vrfname; @@ -941,8 +943,8 @@ int bp_peer_socket(const struct bfd_session *bs) bs->vrf->vrf_id, device_to_bind); } if (sd == -1) { - log_error("ipv4-new: failed to create socket: %s", - strerror(errno)); + zlog_err("ipv4-new: failed to create socket: %s", + strerror(errno)); return -1; } @@ -965,15 +967,15 @@ int bp_peer_socket(const struct bfd_session *bs) sin.sin_len = sizeof(sin); #endif /* HAVE_STRUCT_SOCKADDR_SA_LEN */ memcpy(&sin.sin_addr, &bs->key.local, sizeof(sin.sin_addr)); - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) == 0) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) == 0) sin.sin_addr.s_addr = INADDR_ANY; pcount = 0; do { if ((++pcount) > (BFD_SRCPORTMAX - BFD_SRCPORTINIT)) { /* Searched all ports, none available */ - log_error("ipv4-new: failed to bind port: %s", - strerror(errno)); + zlog_err("ipv4-new: failed to bind port: %s", + strerror(errno)); close(sd); return -1; } @@ -999,7 +1001,7 @@ int bp_peer_socketv6(const struct bfd_session *bs) if (bs->key.ifname[0]) device_to_bind = (const char *)bs->key.ifname; - else if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) + else if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) && bs->key.vrfname[0]) device_to_bind = (const char *)bs->key.vrfname; @@ -1008,8 +1010,8 @@ int bp_peer_socketv6(const struct bfd_session *bs) bs->vrf->vrf_id, device_to_bind); } if (sd == -1) { - log_error("ipv6-new: failed to create socket: %s", - strerror(errno)); + zlog_err("ipv6-new: failed to create socket: %s", + strerror(errno)); return -1; } @@ -1039,8 +1041,8 @@ int bp_peer_socketv6(const struct bfd_session *bs) do { if ((++pcount) > (BFD_SRCPORTMAX - BFD_SRCPORTINIT)) { /* Searched all ports, none available */ - log_error("ipv6-new: failed to bind port: %s", - strerror(errno)); + zlog_err("ipv6-new: failed to bind port: %s", + strerror(errno)); close(sd); return -1; } @@ -1058,8 +1060,8 @@ int bp_set_ttlv6(int sd, uint8_t value) if (setsockopt(sd, IPPROTO_IPV6, IPV6_UNICAST_HOPS, &ttl, sizeof(ttl)) == -1) { - log_warning("set-ttlv6: setsockopt(IPV6_UNICAST_HOPS, %d): %s", - value, strerror(errno)); + zlog_warn("set-ttlv6: setsockopt(IPV6_UNICAST_HOPS, %d): %s", + value, strerror(errno)); return -1; } @@ -1072,8 +1074,8 @@ int bp_set_tosv6(int sd, uint8_t value) if (setsockopt(sd, IPPROTO_IPV6, IPV6_TCLASS, &tos, sizeof(tos)) == -1) { - log_warning("set-tosv6: setsockopt(IPV6_TCLASS, %d): %s", value, - strerror(errno)); + zlog_warn("set-tosv6: setsockopt(IPV6_TCLASS, %d): %s", value, + strerror(errno)); return -1; } @@ -1086,22 +1088,23 @@ static void bp_set_ipv6opts(int sd) int ipv6_only = BFD_IPV6_ONLY_VAL; if (bp_set_ttlv6(sd, BFD_TTL_VAL) == -1) - log_fatal("set-ipv6opts: setsockopt(IPV6_UNICAST_HOPS, %d): %s", - BFD_TTL_VAL, strerror(errno)); + zlog_fatal( + "set-ipv6opts: setsockopt(IPV6_UNICAST_HOPS, %d): %s", + BFD_TTL_VAL, strerror(errno)); if (setsockopt_ipv6_hoplimit(sd, BFD_RCV_TTL_VAL) == -1) - log_fatal("set-ipv6opts: setsockopt(IPV6_HOPLIMIT, %d): %s", - BFD_RCV_TTL_VAL, strerror(errno)); + zlog_fatal("set-ipv6opts: setsockopt(IPV6_HOPLIMIT, %d): %s", + BFD_RCV_TTL_VAL, strerror(errno)); if (setsockopt_ipv6_pktinfo(sd, ipv6_pktinfo) == -1) - log_fatal("set-ipv6opts: setsockopt(IPV6_PKTINFO, %d): %s", - ipv6_pktinfo, strerror(errno)); + zlog_fatal("set-ipv6opts: setsockopt(IPV6_PKTINFO, %d): %s", + ipv6_pktinfo, strerror(errno)); if (setsockopt(sd, IPPROTO_IPV6, IPV6_V6ONLY, &ipv6_only, sizeof(ipv6_only)) == -1) - log_fatal("set-ipv6opts: setsockopt(IPV6_V6ONLY, %d): %s", - ipv6_only, strerror(errno)); + zlog_fatal("set-ipv6opts: setsockopt(IPV6_V6ONLY, %d): %s", + ipv6_only, strerror(errno)); } static void bp_bind_ipv6(int sd, uint16_t port) @@ -1116,7 +1119,7 @@ static void bp_bind_ipv6(int sd, uint16_t port) sin6.sin6_len = sizeof(sin6); #endif /* HAVE_STRUCT_SOCKADDR_SA_LEN */ if (bind(sd, (struct sockaddr *)&sin6, sizeof(sin6)) == -1) - log_fatal("bind-ipv6: bind: %s", strerror(errno)); + zlog_fatal("bind-ipv6: bind: %s", strerror(errno)); } int bp_udp6_shop(const struct vrf *vrf) @@ -1128,7 +1131,7 @@ int bp_udp6_shop(const struct vrf *vrf) vrf->name); } if (sd == -1) - log_fatal("udp6-shop: socket: %s", strerror(errno)); + zlog_fatal("udp6-shop: socket: %s", strerror(errno)); bp_set_ipv6opts(sd); bp_bind_ipv6(sd, BFD_DEFDESTPORT); @@ -1145,7 +1148,7 @@ int bp_udp6_mhop(const struct vrf *vrf) vrf->name); } if (sd == -1) - log_fatal("udp6-mhop: socket: %s", strerror(errno)); + zlog_fatal("udp6-mhop: socket: %s", strerror(errno)); bp_set_ipv6opts(sd); bp_bind_ipv6(sd, BFD_DEF_MHOP_DEST_PORT); @@ -1161,7 +1164,7 @@ int bp_echo_socket(const struct vrf *vrf) s = vrf_socket(AF_INET, SOCK_DGRAM, 0, vrf->vrf_id, vrf->name); } if (s == -1) - log_fatal("echo-socket: socket: %s", strerror(errno)); + zlog_fatal("echo-socket: socket: %s", strerror(errno)); bp_set_ipopts(s); bp_bind_ip(s, BFD_DEF_ECHO_PORT); @@ -1177,7 +1180,7 @@ int bp_echov6_socket(const struct vrf *vrf) s = vrf_socket(AF_INET6, SOCK_DGRAM, 0, vrf->vrf_id, vrf->name); } if (s == -1) - log_fatal("echov6-socket: socket: %s", strerror(errno)); + zlog_fatal("echov6-socket: socket: %s", strerror(errno)); bp_set_ipv6opts(s); bp_bind_ipv6(s, BFD_DEF_ECHO_PORT); diff --git a/bfdd/bfdd.c b/bfdd/bfdd.c index 69f268ab01..bed6ccd142 100644 --- a/bfdd/bfdd.c +++ b/bfdd/bfdd.c @@ -49,8 +49,8 @@ void socket_close(int *s) return; if (close(*s) != 0) - log_error("%s: close(%d): (%d) %s", __func__, *s, errno, - strerror(errno)); + zlog_err("%s: close(%d): (%d) %s", __func__, *s, errno, + strerror(errno)); *s = -1; } diff --git a/bfdd/bfdd_nb_config.c b/bfdd/bfdd_nb_config.c index 48dcce3ddf..c1123c4c33 100644 --- a/bfdd/bfdd_nb_config.c +++ b/bfdd/bfdd_nb_config.c @@ -99,7 +99,7 @@ static int bfd_session_create(enum nb_event event, const struct lyd_node *dnode, /* This session was already configured by another daemon. */ if (bs != NULL) { /* Now it is configured also by CLI. */ - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG); + SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG); bs->refcount++; resource->ptr = bs; @@ -115,11 +115,11 @@ static int bfd_session_create(enum nb_event event, const struct lyd_node *dnode, /* Set configuration flags. */ bs->refcount = 1; - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG); + SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG); if (mhop) - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_MH); + SET_FLAG(bs->flags, BFD_SESS_FLAG_MH); if (bs->key.family == AF_INET6) - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_IPV6); + SET_FLAG(bs->flags, BFD_SESS_FLAG_IPV6); resource->ptr = bs; break; @@ -164,10 +164,10 @@ static int bfd_session_destroy(enum nb_event event, case NB_EV_APPLY: bs = nb_running_unset_entry(dnode); /* CLI is not using this session anymore. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) == 0) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) == 0) break; - BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG); + UNSET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG); bs->refcount--; /* There are still daemons using it. */ if (bs->refcount > 0) @@ -384,10 +384,10 @@ int bfdd_bfd_sessions_single_hop_administrative_down_modify( bs = nb_running_get_entry(dnode, NULL, true); if (!shutdown) { - if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) + if (!CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) return NB_OK; - BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); + UNSET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); /* Change and notify state change. */ bs->ses_state = PTM_BFD_DOWN; @@ -396,15 +396,15 @@ int bfdd_bfd_sessions_single_hop_administrative_down_modify( /* Enable all timers. */ bfd_recvtimer_update(bs); bfd_xmttimer_update(bs, bs->xmt_TO); - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) { + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) { bfd_echo_recvtimer_update(bs); bfd_echo_xmttimer_update(bs, bs->echo_xmt_TO); } } else { - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) return NB_OK; - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); + SET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); /* Disable all events. */ bfd_recvtimer_delete(bs); @@ -448,18 +448,18 @@ int bfdd_bfd_sessions_single_hop_echo_mode_modify(enum nb_event event, bs = nb_running_get_entry(dnode, NULL, true); if (!echo) { - if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) + if (!CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) return NB_OK; - BFD_UNSET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); + UNSET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); ptm_bfd_echo_stop(bs); } else { - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) return NB_OK; - BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); + SET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); /* Apply setting immediately. */ - if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) + if (!CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) bs_echo_timer_handler(bs); } diff --git a/bfdd/bfdd_nb_state.c b/bfdd/bfdd_nb_state.c index dfca3d1417..2a44d46c41 100644 --- a/bfdd/bfdd_nb_state.c +++ b/bfdd/bfdd_nb_state.c @@ -211,7 +211,7 @@ struct yang_data *bfdd_bfd_sessions_single_hop_stats_detection_mode_get_elem( * * TODO: support demand mode. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) detection_mode = 1; else detection_mode = 2; diff --git a/bfdd/bfdd_vty.c b/bfdd/bfdd_vty.c index 2a98b0fb02..74ffd6d625 100644 --- a/bfdd/bfdd_vty.c +++ b/bfdd/bfdd_vty.c @@ -84,7 +84,7 @@ static void _display_peer_header(struct vty *vty, struct bfd_session *bs) inet_ntop(bs->key.family, &bs->key.peer, addr_buf, sizeof(addr_buf))); - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) vty_out(vty, " multihop"); if (memcmp(&bs->key.local, &zero_addr, sizeof(bs->key.local))) @@ -143,7 +143,7 @@ static void _display_peer(struct vty *vty, struct bfd_session *bs) vty_out(vty, "\t\tDiagnostics: %s\n", diag2str(bs->local_diag)); vty_out(vty, "\t\tRemote diagnostics: %s\n", diag2str(bs->remote_diag)); vty_out(vty, "\t\tPeer Type: %s\n", - BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) ? "configured" : "dynamic"); + CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) ? "configured" : "dynamic"); vty_out(vty, "\t\tLocal timers:\n"); vty_out(vty, "\t\t\tDetect-multiplier: %" PRIu32 "\n", @@ -235,7 +235,7 @@ static struct json_object *__display_peer_json(struct bfd_session *bs) bs->timers.required_min_rx / 1000); json_object_int_add(jo, "transmit-interval", bs->timers.desired_min_tx / 1000); - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) json_object_int_add(jo, "echo-interval", bs->timers.required_min_echo / 1000); else @@ -305,7 +305,7 @@ static void _display_peer_json_iter(struct hash_bucket *hb, void *arg) jon = __display_peer_json(bs); if (jon == NULL) { - log_warning("%s: not enough memory", __func__); + zlog_warn("%s: not enough memory", __func__); return; } @@ -415,7 +415,7 @@ static void _display_peer_counter_json_iter(struct hash_bucket *hb, void *arg) jon = __display_peer_counters_json(bs); if (jon == NULL) { - log_warning("%s: not enough memory", __func__); + zlog_warn("%s: not enough memory", __func__); return; } @@ -457,7 +457,7 @@ static void _display_peer_brief(struct vty *vty, struct bfd_session *bs) { char addr_buf[INET6_ADDRSTRLEN]; - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) { + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) { vty_out(vty, "%-10u", bs->discrs.my_discr); inet_ntop(bs->key.family, &bs->key.local, addr_buf, sizeof(addr_buf)); vty_out(vty, " %-40s", addr_buf); diff --git a/bfdd/config.c b/bfdd/config.c index dd4a192694..b71670f012 100644 --- a/bfdd/config.c +++ b/bfdd/config.c @@ -92,8 +92,8 @@ static int parse_config_json(struct json_object *jo, bpc_handle h, void *arg) error += parse_list(jo_val, PLT_LABEL, h, arg); } else { sval = json_object_get_string(jo_val); - log_warning("%s:%d invalid configuration: %s", __func__, - __LINE__, sval); + zlog_warn("%s:%d invalid configuration: %s", __func__, + __LINE__, sval); error++; } } @@ -139,15 +139,15 @@ static int parse_list(struct json_object *jo, enum peer_list_type plt, switch (plt) { case PLT_IPV4: - log_debug("ipv4 peers %d:", allen); + zlog_debug("ipv4 peers %d:", allen); bpc.bpc_ipv4 = true; break; case PLT_IPV6: - log_debug("ipv6 peers %d:", allen); + zlog_debug("ipv6 peers %d:", allen); bpc.bpc_ipv4 = false; break; case PLT_LABEL: - log_debug("label peers %d:", allen); + zlog_debug("label peers %d:", allen); if (parse_peer_label_config(jo_val, &bpc) != 0) { error++; continue; @@ -156,8 +156,8 @@ static int parse_list(struct json_object *jo, enum peer_list_type plt, default: error++; - log_error("%s:%d: unsupported peer type", __func__, - __LINE__); + zlog_err("%s:%d: unsupported peer type", __func__, + __LINE__); break; } @@ -178,7 +178,7 @@ static int parse_peer_config(struct json_object *jo, struct bfd_peer_cfg *bpc) int family_type = (bpc->bpc_ipv4) ? AF_INET : AF_INET6; int error = 0; - log_debug("\tpeer: %s", bpc->bpc_ipv4 ? "ipv4" : "ipv6"); + zlog_debug(" peer: %s", bpc->bpc_ipv4 ? "ipv4" : "ipv6"); JSON_FOREACH (jo, joi, join) { key = json_object_iter_peek_name(&joi); @@ -186,40 +186,41 @@ static int parse_peer_config(struct json_object *jo, struct bfd_peer_cfg *bpc) if (strcmp(key, "multihop") == 0) { bpc->bpc_mhop = json_object_get_boolean(jo_val); - log_debug("\tmultihop: %s", - bpc->bpc_mhop ? "true" : "false"); + zlog_debug(" multihop: %s", + bpc->bpc_mhop ? "true" : "false"); } else if (strcmp(key, "peer-address") == 0) { sval = json_object_get_string(jo_val); if (strtosa(sval, &bpc->bpc_peer) != 0 || bpc->bpc_peer.sa_sin.sin_family != family_type) { - log_info( + zlog_debug( "%s:%d failed to parse peer-address '%s'", __func__, __LINE__, sval); error++; } - log_debug("\tpeer-address: %s", sval); + zlog_debug(" peer-address: %s", sval); } else if (strcmp(key, "local-address") == 0) { sval = json_object_get_string(jo_val); if (strtosa(sval, &bpc->bpc_local) != 0 || bpc->bpc_local.sa_sin.sin_family != family_type) { - log_info( + zlog_debug( "%s:%d failed to parse local-address '%s'", __func__, __LINE__, sval); error++; } - log_debug("\tlocal-address: %s", sval); + zlog_debug(" local-address: %s", sval); } else if (strcmp(key, "local-interface") == 0) { bpc->bpc_has_localif = true; sval = json_object_get_string(jo_val); if (strlcpy(bpc->bpc_localif, sval, sizeof(bpc->bpc_localif)) > sizeof(bpc->bpc_localif)) { - log_debug("\tlocal-interface: %s (truncated)", - sval); + zlog_debug( + " local-interface: %s (truncated)", + sval); error++; } else { - log_debug("\tlocal-interface: %s", sval); + zlog_debug(" local-interface: %s", sval); } } else if (strcmp(key, "vrf-name") == 0) { bpc->bpc_has_vrfname = true; @@ -227,65 +228,68 @@ static int parse_peer_config(struct json_object *jo, struct bfd_peer_cfg *bpc) if (strlcpy(bpc->bpc_vrfname, sval, sizeof(bpc->bpc_vrfname)) > sizeof(bpc->bpc_vrfname)) { - log_debug("\tvrf-name: %s (truncated)", sval); + zlog_debug(" vrf-name: %s (truncated)", + sval); error++; } else { - log_debug("\tvrf-name: %s", sval); + zlog_debug(" vrf-name: %s", sval); } } else if (strcmp(key, "detect-multiplier") == 0) { bpc->bpc_detectmultiplier = json_object_get_int64(jo_val); bpc->bpc_has_detectmultiplier = true; - log_debug("\tdetect-multiplier: %u", - bpc->bpc_detectmultiplier); + zlog_debug(" detect-multiplier: %u", + bpc->bpc_detectmultiplier); } else if (strcmp(key, "receive-interval") == 0) { bpc->bpc_recvinterval = json_object_get_int64(jo_val); bpc->bpc_has_recvinterval = true; - log_debug("\treceive-interval: %llu", - bpc->bpc_recvinterval); + zlog_debug(" receive-interval: %" PRIu64, + bpc->bpc_recvinterval); } else if (strcmp(key, "transmit-interval") == 0) { bpc->bpc_txinterval = json_object_get_int64(jo_val); bpc->bpc_has_txinterval = true; - log_debug("\ttransmit-interval: %llu", - bpc->bpc_txinterval); + zlog_debug(" transmit-interval: %" PRIu64, + bpc->bpc_txinterval); } else if (strcmp(key, "echo-interval") == 0) { bpc->bpc_echointerval = json_object_get_int64(jo_val); bpc->bpc_has_echointerval = true; - log_debug("\techo-interval: %llu", - bpc->bpc_echointerval); + zlog_debug(" echo-interval: %" PRIu64, + bpc->bpc_echointerval); } else if (strcmp(key, "create-only") == 0) { bpc->bpc_createonly = json_object_get_boolean(jo_val); - log_debug("\tcreate-only: %s", - bpc->bpc_createonly ? "true" : "false"); + zlog_debug(" create-only: %s", + bpc->bpc_createonly ? "true" : "false"); } else if (strcmp(key, "shutdown") == 0) { bpc->bpc_shutdown = json_object_get_boolean(jo_val); - log_debug("\tshutdown: %s", - bpc->bpc_shutdown ? "true" : "false"); + zlog_debug(" shutdown: %s", + bpc->bpc_shutdown ? "true" : "false"); } else if (strcmp(key, "echo-mode") == 0) { bpc->bpc_echo = json_object_get_boolean(jo_val); - log_debug("\techo-mode: %s", - bpc->bpc_echo ? "true" : "false"); + zlog_debug(" echo-mode: %s", + bpc->bpc_echo ? "true" : "false"); } else if (strcmp(key, "label") == 0) { bpc->bpc_has_label = true; sval = json_object_get_string(jo_val); if (strlcpy(bpc->bpc_label, sval, sizeof(bpc->bpc_label)) > sizeof(bpc->bpc_label)) { - log_debug("\tlabel: %s (truncated)", sval); + zlog_debug(" label: %s (truncated)", + sval); error++; } else { - log_debug("\tlabel: %s", sval); + zlog_debug(" label: %s", sval); } } else { sval = json_object_get_string(jo_val); - log_warning("%s:%d invalid configuration: '%s: %s'", - __func__, __LINE__, key, sval); + zlog_warn("%s:%d invalid configuration: '%s: %s'", + __func__, __LINE__, key, sval); error++; } } if (bpc->bpc_peer.sa_sin.sin_family == 0) { - log_debug("%s:%d no peer address provided", __func__, __LINE__); + zlog_debug("%s:%d no peer address provided", __func__, + __LINE__); error++; } @@ -309,7 +313,7 @@ static int parse_peer_label_config(struct json_object *jo, if (pl == NULL) return 1; - log_debug("\tpeer-label: %s", sval); + zlog_debug(" peer-label: %s", sval); /* Translate the label into BFD address keys. */ bs_to_bpc(pl->pl_bs, bpc); @@ -471,12 +475,12 @@ char *config_notify_config(const char *op, struct bfd_session *bs) json_object_int_add(resp, "remote-echo-interval", bs->remote_timers.required_min_echo / 1000); - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO)) json_object_boolean_true_add(resp, "echo-mode"); else json_object_boolean_false_add(resp, "echo-mode"); - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN)) json_object_boolean_true_add(resp, "shutdown"); else json_object_boolean_false_add(resp, "shutdown"); @@ -508,12 +512,12 @@ static int json_object_add_peer(struct json_object *jo, struct bfd_session *bs) char addr_buf[INET6_ADDRSTRLEN]; /* Add peer 'key' information. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_IPV6)) + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_IPV6)) json_object_boolean_true_add(jo, "ipv6"); else json_object_boolean_false_add(jo, "ipv6"); - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) { + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) { json_object_boolean_true_add(jo, "multihop"); json_object_string_add(jo, "peer-address", inet_ntop(bs->key.family, &bs->key.peer, @@ -570,7 +574,7 @@ struct peer_label *pl_new(const char *label, struct bfd_session *bs) if (strlcpy(pl->pl_label, label, sizeof(pl->pl_label)) > sizeof(pl->pl_label)) - log_warning("%s:%d: label was truncated", __func__, __LINE__); + zlog_warn("%s:%d: label was truncated", __func__, __LINE__); pl->pl_bs = bs; bs->pl = pl; diff --git a/bfdd/control.c b/bfdd/control.c index ae6f5a3e79..4adc54a64a 100644 --- a/bfdd/control.c +++ b/bfdd/control.c @@ -86,13 +86,13 @@ static int sock_set_nonblock(int fd) flags = fcntl(fd, F_GETFL, 0); if (flags == -1) { - log_warning("%s: fcntl F_GETFL: %s", __func__, strerror(errno)); + zlog_warn("%s: fcntl F_GETFL: %s", __func__, strerror(errno)); return -1; } flags |= O_NONBLOCK; if (fcntl(fd, F_SETFL, flags) == -1) { - log_warning("%s: fcntl F_SETFL: %s", __func__, strerror(errno)); + zlog_warn("%s: fcntl F_SETFL: %s", __func__, strerror(errno)); return -1; } @@ -116,20 +116,20 @@ int control_init(const char *path) sd = socket(AF_UNIX, SOCK_STREAM, PF_UNSPEC); if (sd == -1) { - log_error("%s: socket: %s", __func__, strerror(errno)); + zlog_err("%s: socket: %s", __func__, strerror(errno)); return -1; } umval = umask(0); if (bind(sd, (struct sockaddr *)&sun_, sizeof(sun_)) == -1) { - log_error("%s: bind: %s", __func__, strerror(errno)); + zlog_err("%s: bind: %s", __func__, strerror(errno)); close(sd); return -1; } umask(umval); if (listen(sd, SOMAXCONN) == -1) { - log_error("%s: listen: %s", __func__, strerror(errno)); + zlog_err("%s: listen: %s", __func__, strerror(errno)); close(sd); return -1; } @@ -164,7 +164,7 @@ int control_accept(struct thread *t) csock = accept(sd, NULL, 0); if (csock == -1) { - log_warning("%s: accept: %s", __func__, strerror(errno)); + zlog_warn("%s: accept: %s", __func__, strerror(errno)); return 0; } @@ -440,7 +440,7 @@ static int control_read(struct thread *t) if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) goto schedule_next_read; - log_warning("%s: read: %s", __func__, strerror(errno)); + zlog_warn("%s: read: %s", __func__, strerror(errno)); control_free(bcs); return 0; } @@ -448,15 +448,15 @@ static int control_read(struct thread *t) /* Validate header fields. */ plen = ntohl(bcm.bcm_length); if (plen < 2) { - log_debug("%s: client closed due small message length: %d", - __func__, bcm.bcm_length); + zlog_debug("%s: client closed due small message length: %d", + __func__, bcm.bcm_length); control_free(bcs); return 0; } if (bcm.bcm_ver != BMV_VERSION_1) { - log_debug("%s: client closed due bad version: %d", __func__, - bcm.bcm_ver); + zlog_debug("%s: client closed due bad version: %d", __func__, + bcm.bcm_ver); control_free(bcs); return 0; } @@ -470,8 +470,8 @@ static int control_read(struct thread *t) bcb->bcb_buf = XMALLOC(MTYPE_BFDD_NOTIFICATION, sizeof(bcm) + bcb->bcb_left + 1); if (bcb->bcb_buf == NULL) { - log_warning("%s: not enough memory for message size: %zu", - __func__, bcb->bcb_left); + zlog_warn("%s: not enough memory for message size: %zu", + __func__, bcb->bcb_left); control_free(bcs); return 0; } @@ -492,7 +492,7 @@ skip_header: if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) goto schedule_next_read; - log_warning("%s: read: %s", __func__, strerror(errno)); + zlog_warn("%s: read: %s", __func__, strerror(errno)); control_free(bcs); return 0; } @@ -521,8 +521,8 @@ skip_header: break; default: - log_debug("%s: unhandled message type: %d", __func__, - bcb->bcb_bcm->bcm_type); + zlog_debug("%s: unhandled message type: %d", __func__, + bcb->bcb_bcm->bcm_type); control_response(bcs, bcb->bcb_bcm->bcm_id, BCM_RESPONSE_ERROR, "invalid message type"); break; @@ -559,7 +559,7 @@ static int control_write(struct thread *t) return 0; } - log_warning("%s: write: %s", __func__, strerror(errno)); + zlog_warn("%s: write: %s", __func__, strerror(errno)); control_free(bcs); return 0; } @@ -723,8 +723,8 @@ static void control_response(struct bfd_control_socket *bcs, uint16_t id, /* Generate JSON response. */ jsonstr = config_response(status, error); if (jsonstr == NULL) { - log_warning("%s: config_response: failed to get JSON str", - __func__); + zlog_warn("%s: config_response: failed to get JSON str", + __func__); return; } @@ -753,8 +753,8 @@ static void _control_notify(struct bfd_control_socket *bcs, /* Generate JSON response. */ jsonstr = config_notify(bs); if (jsonstr == NULL) { - log_warning("%s: config_notify: failed to get JSON str", - __func__); + zlog_warn("%s: config_notify: failed to get JSON str", + __func__); return; } @@ -816,8 +816,8 @@ static void _control_notify_config(struct bfd_control_socket *bcs, /* Generate JSON response. */ jsonstr = config_notify_config(op, bs); if (jsonstr == NULL) { - log_warning("%s: config_notify_config: failed to get JSON str", - __func__); + zlog_warn("%s: config_notify_config: failed to get JSON str", + __func__); return; } diff --git a/bfdd/event.c b/bfdd/event.c index 5ba54c2b0b..686f39cc0a 100644 --- a/bfdd/event.c +++ b/bfdd/event.c @@ -43,13 +43,14 @@ void bfd_recvtimer_update(struct bfd_session *bs) bfd_recvtimer_delete(bs); /* Don't add event if peer is deactivated. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || bs->sock == -1) return; tv_normalize(&tv); #ifdef BFD_EVENT_DEBUG - log_debug("%s: sec = %ld, usec = %ld", __func__, tv.tv_sec, tv.tv_usec); + zlog_debug("%s: sec = %ld, usec = %ld", __func__, tv.tv_sec, + tv.tv_usec); #endif /* BFD_EVENT_DEBUG */ thread_add_timer_tv(master, bfd_recvtimer_cb, bs, &tv, @@ -64,13 +65,14 @@ void bfd_echo_recvtimer_update(struct bfd_session *bs) bfd_echo_recvtimer_delete(bs); /* Don't add event if peer is deactivated. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || bs->sock == -1) return; tv_normalize(&tv); #ifdef BFD_EVENT_DEBUG - log_debug("%s: sec = %ld, usec = %ld", __func__, tv.tv_sec, tv.tv_usec); + zlog_debug("%s: sec = %ld, usec = %ld", __func__, tv.tv_sec, + tv.tv_usec); #endif /* BFD_EVENT_DEBUG */ thread_add_timer_tv(master, bfd_echo_recvtimer_cb, bs, &tv, @@ -85,13 +87,14 @@ void bfd_xmttimer_update(struct bfd_session *bs, uint64_t jitter) bfd_xmttimer_delete(bs); /* Don't add event if peer is deactivated. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || bs->sock == -1) return; tv_normalize(&tv); #ifdef BFD_EVENT_DEBUG - log_debug("%s: sec = %ld, usec = %ld", __func__, tv.tv_sec, tv.tv_usec); + zlog_debug("%s: sec = %ld, usec = %ld", __func__, tv.tv_sec, + tv.tv_usec); #endif /* BFD_EVENT_DEBUG */ thread_add_timer_tv(master, bfd_xmt_cb, bs, &tv, &bs->xmttimer_ev); @@ -105,13 +108,14 @@ void bfd_echo_xmttimer_update(struct bfd_session *bs, uint64_t jitter) bfd_echo_xmttimer_delete(bs); /* Don't add event if peer is deactivated. */ - if (BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || + if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || bs->sock == -1) return; tv_normalize(&tv); #ifdef BFD_EVENT_DEBUG - log_debug("%s: sec = %ld, usec = %ld", __func__, tv.tv_sec, tv.tv_usec); + zlog_debug("%s: sec = %ld, usec = %ld", __func__, tv.tv_sec, + tv.tv_usec); #endif /* BFD_EVENT_DEBUG */ thread_add_timer_tv(master, bfd_echo_xmt_cb, bs, &tv, diff --git a/bfdd/ptm_adapter.c b/bfdd/ptm_adapter.c index dcca70b796..eae2158acb 100644 --- a/bfdd/ptm_adapter.c +++ b/bfdd/ptm_adapter.c @@ -120,9 +120,9 @@ static void debug_printbpc(const char *func, unsigned int line, sprintf(cbit_str, "CB %x", bpc->bpc_cbit); - log_debug("%s:%d: %s %s%s%s%s%s%s %s", func, line, - bpc->bpc_mhop ? "multi-hop" : "single-hop", addr[0], addr[1], - addr[2], timers[0], timers[1], timers[2], cbit_str); + zlog_debug("%s:%d: %s %s%s%s%s%s%s %s", func, line, + bpc->bpc_mhop ? "multi-hop" : "single-hop", addr[0], addr[1], + addr[2], timers[0], timers[1], timers[2], cbit_str); } #define DEBUG_PRINTBPC(bpc) debug_printbpc(__FILE__, __LINE__, (bpc)) @@ -260,7 +260,7 @@ static void _ptm_msg_read_address(struct stream *msg, struct sockaddr_any *sa) return; default: - log_warning("ptm-read-address: invalid family: %d", family); + zlog_warn("ptm-read-address: invalid family: %d", family); break; } @@ -316,7 +316,7 @@ static int _ptm_msg_read(struct stream *msg, int command, vrf_id_t vrf_id, *pc = pc_new(pid); if (*pc == NULL) { - log_debug("ptm-read: failed to allocate memory"); + zlog_debug("ptm-read: failed to allocate memory"); return -1; } @@ -358,7 +358,7 @@ static int _ptm_msg_read(struct stream *msg, int command, vrf_id_t vrf_id, */ STREAM_GETC(msg, ifnamelen); if (ifnamelen >= sizeof(bpc->bpc_localif)) { - log_error("ptm-read: interface name is too big"); + zlog_err("ptm-read: interface name is too big"); return -1; } @@ -376,7 +376,8 @@ static int _ptm_msg_read(struct stream *msg, int command, vrf_id_t vrf_id, bpc->bpc_has_vrfname = true; strlcpy(bpc->bpc_vrfname, vrf->name, sizeof(bpc->bpc_vrfname)); } else { - log_error("ptm-read: vrf id %u could not be identified", vrf_id); + zlog_err("ptm-read: vrf id %u could not be identified", + vrf_id); return -1; } } else { @@ -390,7 +391,7 @@ static int _ptm_msg_read(struct stream *msg, int command, vrf_id_t vrf_id, if (bpc->bpc_local.sa_sin.sin_family != 0 && (bpc->bpc_local.sa_sin.sin_family != bpc->bpc_peer.sa_sin.sin_family)) { - log_warning("ptm-read: peer family doesn't match local type"); + zlog_warn("ptm-read: peer family doesn't match local type"); return -1; } @@ -418,20 +419,21 @@ static void bfdd_dest_register(struct stream *msg, vrf_id_t vrf_id) if (bs == NULL) { bs = ptm_bfd_sess_new(&bpc); if (bs == NULL) { - log_debug("ptm-add-dest: failed to create BFD session"); + zlog_debug( + "ptm-add-dest: failed to create BFD session"); return; } } else { /* Don't try to change echo/shutdown state. */ - bpc.bpc_echo = BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); + bpc.bpc_echo = CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO); bpc.bpc_shutdown = - BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); + CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); } /* Create client peer notification register. */ pcn = pcn_new(pc, bs); if (pcn == NULL) { - log_error("ptm-add-dest: failed to registrate notifications"); + zlog_err("ptm-add-dest: failed to registrate notifications"); return; } @@ -454,7 +456,7 @@ static void bfdd_dest_deregister(struct stream *msg, vrf_id_t vrf_id) /* Find or start new BFD session. */ bs = bs_peer_find(&bpc); if (bs == NULL) { - log_debug("ptm-del-dest: failed to find BFD session"); + zlog_debug("ptm-del-dest: failed to find BFD session"); return; } @@ -462,7 +464,7 @@ static void bfdd_dest_deregister(struct stream *msg, vrf_id_t vrf_id) pcn = pcn_lookup(pc, bs); pcn_free(pcn); if (bs->refcount || - BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG)) + CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG)) return; bs->ses_state = PTM_BFD_ADM_DOWN; @@ -485,14 +487,14 @@ static void bfdd_client_register(struct stream *msg) pc = pc_new(pid); if (pc == NULL) { - log_error("ptm-add-client: failed to register client: %u", pid); + zlog_err("ptm-add-client: failed to register client: %u", pid); return; } return; stream_failure: - log_error("ptm-add-client: failed to register client"); + zlog_err("ptm-add-client: failed to register client"); } /* @@ -509,7 +511,7 @@ static void bfdd_client_deregister(struct stream *msg) pc = pc_lookup(pid); if (pc == NULL) { - log_debug("ptm-del-client: failed to find client: %u", pid); + zlog_debug("ptm-del-client: failed to find client: %u", pid); return; } @@ -518,7 +520,7 @@ static void bfdd_client_deregister(struct stream *msg) return; stream_failure: - log_error("ptm-del-client: failed to deregister client"); + zlog_err("ptm-del-client: failed to deregister client"); } static int bfdd_replay(ZAPI_CALLBACK_ARGS) @@ -544,14 +546,14 @@ static int bfdd_replay(ZAPI_CALLBACK_ARGS) break; default: - log_debug("ptm-replay: invalid message type %u", rcmd); + zlog_debug("ptm-replay: invalid message type %u", rcmd); return -1; } return 0; stream_failure: - log_error("ptm-replay: failed to find command"); + zlog_err("ptm-replay: failed to find command"); return -1; } diff --git a/bgpd/bgp_addpath.c b/bgpd/bgp_addpath.c index aaa77b04dc..75fdb0bb44 100644 --- a/bgpd/bgp_addpath.c +++ b/bgpd/bgp_addpath.c @@ -65,8 +65,8 @@ bgp_addpath_names(enum bgp_addpath_strat strat) /* * Returns if any peer is transmitting addpaths for a given afi/safi. */ -int bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data *d, afi_t afi, - safi_t safi) +bool bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data *d, afi_t afi, + safi_t safi) { return d->total_peercount[afi][safi] > 0; } @@ -123,15 +123,15 @@ uint32_t bgp_addpath_id_for_peer(struct peer *peer, afi_t afi, safi_t safi, * Returns true if the path has an assigned addpath ID for any of the addpath * strategies. */ -int bgp_addpath_info_has_ids(struct bgp_addpath_info_data *d) +bool bgp_addpath_info_has_ids(struct bgp_addpath_info_data *d) { int i; for (i = 0; i < BGP_ADDPATH_MAX; i++) if (d->addpath_tx_id[i] != 0) - return 1; + return true; - return 0; + return false; } /* @@ -152,7 +152,7 @@ void bgp_addpath_free_node_data(struct bgp_addpath_bgp_data *bd, /* * Check to see if the addpath strategy requires DMED to be configured to work. */ -int bgp_addpath_dmed_required(int strategy) +bool bgp_addpath_dmed_required(int strategy) { return strategy == BGP_ADDPATH_BEST_PER_AS; } @@ -161,21 +161,20 @@ int bgp_addpath_dmed_required(int strategy) * Return true if this is a path we should advertise due to a * configured addpath-tx knob */ -int bgp_addpath_tx_path(enum bgp_addpath_strat strat, - struct bgp_path_info *pi) +bool bgp_addpath_tx_path(enum bgp_addpath_strat strat, struct bgp_path_info *pi) { switch (strat) { case BGP_ADDPATH_NONE: - return 0; + return false; case BGP_ADDPATH_ALL: - return 1; + return true; case BGP_ADDPATH_BEST_PER_AS: if (CHECK_FLAG(pi->flags, BGP_PATH_DMED_SELECTED)) - return 1; + return true; else - return 0; + return false; default: - return 0; + return false; } } diff --git a/bgpd/bgp_addpath.h b/bgpd/bgp_addpath.h index 786873a004..f61d68e18f 100644 --- a/bgpd/bgp_addpath.h +++ b/bgpd/bgp_addpath.h @@ -32,8 +32,8 @@ void bgp_addpath_init_bgp_data(struct bgp_addpath_bgp_data *d); -int bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data *d, afi_t afi, - safi_t safi); +bool bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data *d, afi_t afi, + safi_t safi); void bgp_addpath_free_node_data(struct bgp_addpath_bgp_data *bd, struct bgp_addpath_node_data *nd, @@ -43,7 +43,7 @@ void bgp_addpath_free_info_data(struct bgp_addpath_info_data *d, struct bgp_addpath_node_data *nd); -int bgp_addpath_info_has_ids(struct bgp_addpath_info_data *d); +bool bgp_addpath_info_has_ids(struct bgp_addpath_info_data *d); uint32_t bgp_addpath_id_for_peer(struct peer *peer, afi_t afi, safi_t safi, struct bgp_addpath_info_data *d); @@ -51,14 +51,14 @@ uint32_t bgp_addpath_id_for_peer(struct peer *peer, afi_t afi, safi_t safi, const struct bgp_addpath_strategy_names * bgp_addpath_names(enum bgp_addpath_strat strat); -int bgp_addpath_dmed_required(int strategy); +bool bgp_addpath_dmed_required(int strategy); /* * Return true if this is a path we should advertise due to a configured * addpath-tx knob */ -int bgp_addpath_tx_path(enum bgp_addpath_strat strat, - struct bgp_path_info *pi); +bool bgp_addpath_tx_path(enum bgp_addpath_strat strat, + struct bgp_path_info *pi); /* * Change the type of addpath used for a peer. */ diff --git a/bgpd/bgp_advertise.c b/bgpd/bgp_advertise.c index 8d1c83cf5f..9ee6a24bc5 100644 --- a/bgpd/bgp_advertise.c +++ b/bgpd/bgp_advertise.c @@ -144,8 +144,8 @@ void bgp_advertise_unintern(struct hash *hash, struct bgp_advertise_attr *baa) } } -int bgp_adj_out_lookup(struct peer *peer, struct bgp_node *rn, - uint32_t addpath_tx_id) +bool bgp_adj_out_lookup(struct peer *peer, struct bgp_node *rn, + uint32_t addpath_tx_id) { struct bgp_adj_out *adj; struct peer_af *paf; @@ -169,11 +169,12 @@ int bgp_adj_out_lookup(struct peer *peer, struct bgp_node *rn, && adj->addpath_tx_id != addpath_tx_id) continue; - return (adj->adv ? (adj->adv->baa ? 1 : 0) - : (adj->attr ? 1 : 0)); + return (adj->adv + ? (adj->adv->baa ? true : false) + : (adj->attr ? true : false)); } - return 0; + return false; } @@ -208,8 +209,8 @@ void bgp_adj_in_remove(struct bgp_node *rn, struct bgp_adj_in *bai) XFREE(MTYPE_BGP_ADJ_IN, bai); } -int bgp_adj_in_unset(struct bgp_node *rn, struct peer *peer, - uint32_t addpath_id) +bool bgp_adj_in_unset(struct bgp_node *rn, struct peer *peer, + uint32_t addpath_id) { struct bgp_adj_in *adj; struct bgp_adj_in *adj_next; @@ -217,7 +218,7 @@ int bgp_adj_in_unset(struct bgp_node *rn, struct peer *peer, adj = rn->adj_in; if (!adj) - return 0; + return false; while (adj) { adj_next = adj->next; @@ -230,7 +231,7 @@ int bgp_adj_in_unset(struct bgp_node *rn, struct peer *peer, adj = adj_next; } - return 1; + return true; } void bgp_sync_init(struct peer *peer) diff --git a/bgpd/bgp_advertise.h b/bgpd/bgp_advertise.h index c983598756..6223dc94a3 100644 --- a/bgpd/bgp_advertise.h +++ b/bgpd/bgp_advertise.h @@ -139,10 +139,10 @@ struct bgp_synchronize { #define BGP_ADJ_IN_DEL(N, A) BGP_PATH_INFO_DEL(N, A, adj_in) /* Prototypes. */ -extern int bgp_adj_out_lookup(struct peer *, struct bgp_node *, uint32_t); +extern bool bgp_adj_out_lookup(struct peer *, struct bgp_node *, uint32_t); extern void bgp_adj_in_set(struct bgp_node *, struct peer *, struct attr *, uint32_t); -extern int bgp_adj_in_unset(struct bgp_node *, struct peer *, uint32_t); +extern bool bgp_adj_in_unset(struct bgp_node *, struct peer *, uint32_t); extern void bgp_adj_in_remove(struct bgp_node *, struct bgp_adj_in *); extern void bgp_sync_init(struct peer *); diff --git a/bgpd/bgp_aspath.c b/bgpd/bgp_aspath.c index be80675b5d..44962f5af3 100644 --- a/bgpd/bgp_aspath.c +++ b/bgpd/bgp_aspath.c @@ -428,6 +428,22 @@ bool aspath_check_as_sets(struct aspath *aspath) return false; } +/* Check if aspath has BGP_AS_ZERO */ +bool aspath_check_as_zero(struct aspath *aspath) +{ + struct assegment *seg = aspath->segments; + unsigned int i; + + while (seg) { + for (i = 0; i < seg->length; i++) + if (seg->as[i] == BGP_AS_ZERO) + return true; + seg = seg->next; + } + + return false; +} + /* Estimate size aspath /might/ take if encoded into an * ASPATH attribute. * diff --git a/bgpd/bgp_aspath.h b/bgpd/bgp_aspath.h index f327751f33..9df352fcd6 100644 --- a/bgpd/bgp_aspath.h +++ b/bgpd/bgp_aspath.h @@ -39,6 +39,7 @@ #define BGP_PRIVATE_AS4_MAX 4294967294U /* we leave BGP_AS_MAX as the 16bit AS MAX number. */ +#define BGP_AS_ZERO 0 #define BGP_AS_MAX 65535U #define BGP_AS4_MAX 4294967295U /* Transition 16Bit AS as defined by IANA */ @@ -121,6 +122,7 @@ extern bool aspath_left_confed_check(struct aspath *); extern unsigned long aspath_count(void); extern unsigned int aspath_count_hops(const struct aspath *); extern bool aspath_check_as_sets(struct aspath *aspath); +extern bool aspath_check_as_zero(struct aspath *aspath); extern unsigned int aspath_count_confeds(struct aspath *); extern unsigned int aspath_size(struct aspath *); extern as_t aspath_highest(struct aspath *); diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c index a1278874c4..b7e2f45195 100644 --- a/bgpd/bgp_attr.c +++ b/bgpd/bgp_attr.c @@ -119,25 +119,25 @@ static void *cluster_hash_alloc(void *p) /* Cluster list related functions. */ static struct cluster_list *cluster_parse(struct in_addr *pnt, int length) { - struct cluster_list tmp; + struct cluster_list tmp = {}; struct cluster_list *cluster; tmp.length = length; - tmp.list = pnt; + tmp.list = length == 0 ? NULL : pnt; cluster = hash_get(cluster_hash, &tmp, cluster_hash_alloc); cluster->refcnt++; return cluster; } -int cluster_loop_check(struct cluster_list *cluster, struct in_addr originator) +bool cluster_loop_check(struct cluster_list *cluster, struct in_addr originator) { int i; for (i = 0; i < cluster->length / 4; i++) if (cluster->list[i].s_addr == originator.s_addr) - return 1; - return 0; + return true; + return false; } static unsigned int cluster_hash_key_make(const void *p) @@ -152,10 +152,16 @@ static bool cluster_hash_cmp(const void *p1, const void *p2) const struct cluster_list *cluster1 = p1; const struct cluster_list *cluster2 = p2; - return (cluster1->length == cluster2->length - && (cluster1->list == cluster2->list - || memcmp(cluster1->list, cluster2->list, cluster1->length) - == 0)); + if (cluster1->list == cluster2->list) + return true; + + if (!cluster1->list || !cluster2->list) + return false; + + if (cluster1->length != cluster2->length) + return false; + + return (memcmp(cluster1->list, cluster2->list, cluster1->length) == 0); } static void cluster_free(struct cluster_list *cluster) @@ -174,14 +180,16 @@ static struct cluster_list *cluster_intern(struct cluster_list *cluster) return find; } -void cluster_unintern(struct cluster_list *cluster) +static void cluster_unintern(struct cluster_list **cluster) { - if (cluster->refcnt) - cluster->refcnt--; + if ((*cluster)->refcnt) + (*cluster)->refcnt--; - if (cluster->refcnt == 0) { - hash_release(cluster_hash, cluster); - cluster_free(cluster); + if ((*cluster)->refcnt == 0) { + void *p = hash_release(cluster_hash, *cluster); + assert(p == *cluster); + cluster_free(*cluster); + *cluster = NULL; } } @@ -263,16 +271,16 @@ void bgp_attr_flush_encap(struct attr *attr) * * This algorithm could be made faster if needed */ -static int encap_same(const struct bgp_attr_encap_subtlv *h1, - const struct bgp_attr_encap_subtlv *h2) +static bool encap_same(const struct bgp_attr_encap_subtlv *h1, + const struct bgp_attr_encap_subtlv *h2) { const struct bgp_attr_encap_subtlv *p; const struct bgp_attr_encap_subtlv *q; if (h1 == h2) - return 1; + return true; if (h1 == NULL || h2 == NULL) - return 0; + return false; for (p = h1; p; p = p->next) { for (q = h2; q; q = q->next) { @@ -283,7 +291,7 @@ static int encap_same(const struct bgp_attr_encap_subtlv *h1, } } if (!q) - return 0; + return false; } for (p = h2; p; p = p->next) { @@ -295,10 +303,10 @@ static int encap_same(const struct bgp_attr_encap_subtlv *h1, } } if (!q) - return 0; + return false; } - return 1; + return true; } static void *encap_hash_alloc(void *p) @@ -898,14 +906,11 @@ struct attr *bgp_attr_default_set(struct attr *attr, uint8_t origin) } /* Create the attributes for an aggregate */ -struct attr *bgp_attr_aggregate_intern(struct bgp *bgp, uint8_t origin, - struct aspath *aspath, - struct community *community, - struct ecommunity *ecommunity, - struct lcommunity *lcommunity, - struct bgp_aggregate *aggregate, - uint8_t atomic_aggregate, - struct prefix *p) +struct attr *bgp_attr_aggregate_intern( + struct bgp *bgp, uint8_t origin, struct aspath *aspath, + struct community *community, struct ecommunity *ecommunity, + struct lcommunity *lcommunity, struct bgp_aggregate *aggregate, + uint8_t atomic_aggregate, const struct prefix *p) { struct attr attr; struct attr *new; @@ -1032,7 +1037,7 @@ void bgp_attr_unintern_sub(struct attr *attr) UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES)); if (attr->cluster) - cluster_unintern(attr->cluster); + cluster_unintern(&attr->cluster); UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_CLUSTER_LIST)); if (attr->transit) @@ -1274,7 +1279,7 @@ const uint8_t attr_flags_values[] = { }; static const size_t attr_flags_values_max = array_size(attr_flags_values) - 1; -static int bgp_attr_flag_invalid(struct bgp_attr_parser_args *args) +static bool bgp_attr_flag_invalid(struct bgp_attr_parser_args *args) { uint8_t mask = BGP_ATTR_FLAG_EXTLEN; const uint8_t flags = args->flags; @@ -1282,9 +1287,9 @@ static int bgp_attr_flag_invalid(struct bgp_attr_parser_args *args) /* there may be attributes we don't know about */ if (attr_code > attr_flags_values_max) - return 0; + return false; if (attr_flags_values[attr_code] == 0) - return 0; + return false; /* RFC4271, "For well-known attributes, the Transitive bit MUST be set * to @@ -1296,7 +1301,7 @@ static int bgp_attr_flag_invalid(struct bgp_attr_parser_args *args) EC_BGP_ATTR_FLAG, "%s well-known attributes must have transitive flag set (%x)", lookup_msg(attr_str, attr_code, NULL), flags); - return 1; + return true; } /* "For well-known attributes and for optional non-transitive @@ -1309,7 +1314,7 @@ static int bgp_attr_flag_invalid(struct bgp_attr_parser_args *args) "%s well-known attribute " "must NOT have the partial flag set (%x)", lookup_msg(attr_str, attr_code, NULL), flags); - return 1; + return true; } if (CHECK_FLAG(flags, BGP_ATTR_FLAG_OPTIONAL) && !CHECK_FLAG(flags, BGP_ATTR_FLAG_TRANS)) { @@ -1317,7 +1322,7 @@ static int bgp_attr_flag_invalid(struct bgp_attr_parser_args *args) "%s optional + transitive attribute " "must NOT have the partial flag set (%x)", lookup_msg(attr_str, attr_code, NULL), flags); - return 1; + return true; } } @@ -1329,10 +1334,10 @@ static int bgp_attr_flag_invalid(struct bgp_attr_parser_args *args) SET_FLAG(mask, BGP_ATTR_FLAG_PARTIAL); if ((flags & ~mask) == attr_flags_values[attr_code]) - return 0; + return false; bgp_attr_flags_diagnose(args, attr_flags_values[attr_code]); - return 1; + return true; } /* Get origin attribute of the update message. */ @@ -1398,6 +1403,15 @@ static int bgp_attr_aspath(struct bgp_attr_parser_args *args) 0); } + /* Codification of AS 0 Processing */ + if (aspath_check_as_zero(attr->aspath)) { + flog_err(EC_BGP_ATTR_MAL_AS_PATH, + "Malformed AS path, contains BGP_AS_ZERO(0) from %s", + peer->host); + return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_MAL_AS_PATH, + 0); + } + /* Set aspath attribute flag. */ attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS_PATH); @@ -1469,6 +1483,15 @@ static int bgp_attr_as4_path(struct bgp_attr_parser_args *args, 0); } + /* Codification of AS 0 Processing */ + if (aspath_check_as_zero(*as4_path)) { + flog_err(EC_BGP_ATTR_MAL_AS_PATH, + "Malformed AS4 path, contains BGP_AS_ZERO(0) from %s", + peer->host); + return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_MAL_AS_PATH, + 0); + } + /* Set aspath attribute flag. */ attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS4_PATH); @@ -1615,6 +1638,7 @@ static int bgp_attr_aggregator(struct bgp_attr_parser_args *args) struct peer *const peer = args->peer; struct attr *const attr = args->attr; const bgp_size_t length = args->length; + as_t aggregator_as; int wantedlen = 6; @@ -1632,14 +1656,25 @@ static int bgp_attr_aggregator(struct bgp_attr_parser_args *args) } if (CHECK_FLAG(peer->cap, PEER_CAP_AS4_RCV)) - attr->aggregator_as = stream_getl(peer->curr); + aggregator_as = stream_getl(peer->curr); else - attr->aggregator_as = stream_getw(peer->curr); + aggregator_as = stream_getw(peer->curr); + + attr->aggregator_as = aggregator_as; attr->aggregator_addr.s_addr = stream_get_ipv4(peer->curr); /* Set atomic aggregate flag. */ attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR); + /* Codification of AS 0 Processing */ + if (aggregator_as == BGP_AS_ZERO) { + flog_err(EC_BGP_ATTR_LEN, + "AGGREGATOR AS number is 0 for aspath: %s", + aspath_print(attr->aspath)); + return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_MAL_AS_PATH, + args->total); + } + return BGP_ATTR_PARSE_PROCEED; } @@ -1652,6 +1687,7 @@ bgp_attr_as4_aggregator(struct bgp_attr_parser_args *args, struct peer *const peer = args->peer; struct attr *const attr = args->attr; const bgp_size_t length = args->length; + as_t aggregator_as; if (length != 8) { flog_err(EC_BGP_ATTR_LEN, "New Aggregator length is not 8 [%d]", @@ -1660,11 +1696,21 @@ bgp_attr_as4_aggregator(struct bgp_attr_parser_args *args, 0); } - *as4_aggregator_as = stream_getl(peer->curr); + aggregator_as = stream_getl(peer->curr); + *as4_aggregator_as = aggregator_as; as4_aggregator_addr->s_addr = stream_get_ipv4(peer->curr); attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR); + /* Codification of AS 0 Processing */ + if (aggregator_as == BGP_AS_ZERO) { + flog_err(EC_BGP_ATTR_LEN, + "AS4_AGGREGATOR AS number is 0 for aspath: %s", + aspath_print(attr->aspath)); + return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_MAL_AS_PATH, + 0); + } + return BGP_ATTR_PARSE_PROCEED; } @@ -1786,7 +1832,8 @@ bgp_attr_community(struct bgp_attr_parser_args *args) if (length == 0) { attr->community = NULL; - return BGP_ATTR_PARSE_PROCEED; + return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR, + args->total); } attr->community = @@ -1848,7 +1895,7 @@ bgp_attr_cluster_list(struct bgp_attr_parser_args *args) * malformed, the UPDATE message SHALL be handled using the approach * of "treat-as-withdraw". */ - if (length % 4) { + if (length == 0 || length % 4) { flog_err(EC_BGP_ATTR_LEN, "Bad cluster list length %d", length); return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR, @@ -2127,11 +2174,11 @@ bgp_attr_large_community(struct bgp_attr_parser_args *args) if (length == 0) { attr->lcommunity = NULL; /* Empty extcomm doesn't seem to be invalid per se */ - return BGP_ATTR_PARSE_PROCEED; + return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR, + args->total); } - attr->lcommunity = - lcommunity_parse((uint8_t *)stream_pnt(peer->curr), length); + attr->lcommunity = lcommunity_parse(stream_pnt(peer->curr), length); /* XXX: fix ecommunity_parse to use stream API */ stream_forward_getp(peer->curr, length); @@ -2156,11 +2203,12 @@ bgp_attr_ext_communities(struct bgp_attr_parser_args *args) if (length == 0) { attr->ecommunity = NULL; /* Empty extcomm doesn't seem to be invalid per se */ - return BGP_ATTR_PARSE_PROCEED; + return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR, + args->total); } attr->ecommunity = - ecommunity_parse((uint8_t *)stream_pnt(peer->curr), length); + ecommunity_parse(stream_pnt(peer->curr), length); /* XXX: fix ecommunity_parse to use stream API */ stream_forward_getp(peer->curr, length); @@ -2208,6 +2256,9 @@ bgp_attr_ext_communities(struct bgp_attr_parser_args *args) bgp_attr_extcom_tunnel_type(attr, (bgp_encap_types *)&attr->encap_tunneltype); + /* Extract link bandwidth, if any. */ + (void)ecommunity_linkbw_present(attr->ecommunity, &attr->link_bw); + return BGP_ATTR_PARSE_PROCEED; } @@ -2343,8 +2394,7 @@ static int bgp_attr_encap(uint8_t type, struct peer *peer, /* IN */ * Returns 0 if there was an error that needs to be passed up the stack */ static bgp_attr_parse_ret_t bgp_attr_psid_sub(uint8_t type, uint16_t length, - struct bgp_attr_parser_args *args, - struct bgp_nlri *mp_update) + struct bgp_attr_parser_args *args) { struct peer *const peer = args->peer; struct attr *const attr = args->attr; @@ -2382,15 +2432,6 @@ static bgp_attr_parse_ret_t bgp_attr_psid_sub(uint8_t type, uint16_t length, /* Store label index; subsequently, we'll check on * address-family */ attr->label_index = label_index; - - /* - * Ignore the Label index attribute unless received for - * labeled-unicast - * SAFI. - */ - if (!mp_update->length - || mp_update->safi != SAFI_LABELED_UNICAST) - attr->label_index = BGP_INVALID_LABEL_INDEX; } /* Placeholder code for the IPv6 SID type */ @@ -2589,8 +2630,7 @@ static bgp_attr_parse_ret_t bgp_attr_psid_sub(uint8_t type, uint16_t length, /* Prefix SID attribute * draft-ietf-idr-bgp-prefix-sid-05 */ -bgp_attr_parse_ret_t bgp_attr_prefix_sid(struct bgp_attr_parser_args *args, - struct bgp_nlri *mp_update) +bgp_attr_parse_ret_t bgp_attr_prefix_sid(struct bgp_attr_parser_args *args) { struct peer *const peer = args->peer; struct attr *const attr = args->attr; @@ -2601,8 +2641,10 @@ bgp_attr_parse_ret_t bgp_attr_prefix_sid(struct bgp_attr_parser_args *args, uint8_t type; uint16_t length; size_t headersz = sizeof(type) + sizeof(length); + size_t psid_parsed_length = 0; - while (STREAM_READABLE(peer->curr) > 0) { + while (STREAM_READABLE(peer->curr) > 0 + && psid_parsed_length < args->length) { if (STREAM_READABLE(peer->curr) < headersz) { flog_err( @@ -2620,7 +2662,7 @@ bgp_attr_parse_ret_t bgp_attr_prefix_sid(struct bgp_attr_parser_args *args, if (STREAM_READABLE(peer->curr) < length) { flog_err( EC_BGP_ATTR_LEN, - "Malformed Prefix SID attribute - insufficient data (need %" PRIu8 + "Malformed Prefix SID attribute - insufficient data (need %" PRIu16 " for attribute body, have %zu remaining in UPDATE)", length, STREAM_READABLE(peer->curr)); return bgp_attr_malformed(args, @@ -2628,10 +2670,23 @@ bgp_attr_parse_ret_t bgp_attr_prefix_sid(struct bgp_attr_parser_args *args, args->total); } - ret = bgp_attr_psid_sub(type, length, args, mp_update); + ret = bgp_attr_psid_sub(type, length, args); if (ret != BGP_ATTR_PARSE_PROCEED) return ret; + + psid_parsed_length += length + headersz; + + if (psid_parsed_length > args->length) { + flog_err( + EC_BGP_ATTR_LEN, + "Malformed Prefix SID attribute - TLV overflow by attribute (need %zu" + " for TLV length, have %zu overflowed in UPDATE)", + length + headersz, psid_parsed_length - (length + headersz)); + return bgp_attr_malformed( + args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR, + args->total); + } } return BGP_ATTR_PARSE_PROCEED; @@ -3027,7 +3082,7 @@ bgp_attr_parse_ret_t bgp_attr_parse(struct peer *peer, struct attr *attr, startp); break; case BGP_ATTR_PREFIX_SID: - ret = bgp_attr_prefix_sid(&attr_args, mp_update); + ret = bgp_attr_prefix_sid(&attr_args); break; case BGP_ATTR_PMSI_TUNNEL: ret = bgp_attr_pmsi_tunnel(&attr_args); @@ -3074,6 +3129,17 @@ bgp_attr_parse_ret_t bgp_attr_parse(struct peer *peer, struct attr *attr, } } + /* + * draft-ietf-idr-bgp-prefix-sid-27#section-3: + * About Prefix-SID path attribute, + * Label-Index TLV(type1) and The Originator SRGB TLV(type-3) + * may only appear in a BGP Prefix-SID attribute attached to + * IPv4/IPv6 Labeled Unicast prefixes ([RFC8277]). + * It MUST be ignored when received for other BGP AFI/SAFI combinations. + */ + if (!attr->mp_nexthop_len || mp_update->safi != SAFI_LABELED_UNICAST) + attr->label_index = BGP_INVALID_LABEL_INDEX; + /* Check final read pointer is same as end pointer. */ if (BGP_INPUT_PNT(peer) != endp) { flog_warn(EC_BGP_ATTRIBUTES_MISMATCH, @@ -3362,10 +3428,10 @@ size_t bgp_packet_mpattr_start(struct stream *s, struct peer *peer, afi_t afi, } void bgp_packet_mpattr_prefix(struct stream *s, afi_t afi, safi_t safi, - struct prefix *p, struct prefix_rd *prd, - mpls_label_t *label, uint32_t num_labels, - int addpath_encode, uint32_t addpath_tx_id, - struct attr *attr) + const struct prefix *p, + const struct prefix_rd *prd, mpls_label_t *label, + uint32_t num_labels, int addpath_encode, + uint32_t addpath_tx_id, struct attr *attr) { if (safi == SAFI_MPLS_VPN) { if (addpath_encode) @@ -3391,7 +3457,8 @@ void bgp_packet_mpattr_prefix(struct stream *s, afi_t afi, safi_t safi, stream_put_prefix_addpath(s, p, addpath_encode, addpath_tx_id); } -size_t bgp_packet_mpattr_prefix_size(afi_t afi, safi_t safi, struct prefix *p) +size_t bgp_packet_mpattr_prefix_size(afi_t afi, safi_t safi, + const struct prefix *p) { int size = PSIZE(p->prefixlen); if (safi == SAFI_MPLS_VPN) @@ -3508,7 +3575,7 @@ void bgp_packet_mpattr_end(struct stream *s, size_t sizep) stream_putw_at(s, sizep, (stream_get_endp(s) - sizep) - 2); } -static int bgp_append_local_as(struct peer *peer, afi_t afi, safi_t safi) +static bool bgp_append_local_as(struct peer *peer, afi_t afi, safi_t safi) { if (!BGP_AS_IS_PRIVATE(peer->local_as) || (BGP_AS_IS_PRIVATE(peer->local_as) @@ -3520,8 +3587,8 @@ static int bgp_append_local_as(struct peer *peer, afi_t afi, safi_t safi) PEER_FLAG_REMOVE_PRIVATE_AS_REPLACE) && !CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE))) - return 1; - return 0; + return true; + return false; } /* Make attribute packet. */ @@ -4025,8 +4092,9 @@ size_t bgp_packet_mpunreach_start(struct stream *s, afi_t afi, safi_t safi) return attrlen_pnt; } -void bgp_packet_mpunreach_prefix(struct stream *s, struct prefix *p, afi_t afi, - safi_t safi, struct prefix_rd *prd, +void bgp_packet_mpunreach_prefix(struct stream *s, const struct prefix *p, + afi_t afi, safi_t safi, + const struct prefix_rd *prd, mpls_label_t *label, uint32_t num_labels, int addpath_encode, uint32_t addpath_tx_id, struct attr *attr) @@ -4076,7 +4144,7 @@ void bgp_attr_finish(void) /* Make attribute packet. */ void bgp_dump_routes_attr(struct stream *s, struct attr *attr, - struct prefix *prefix) + const struct prefix *prefix) { unsigned long cp; unsigned long len; diff --git a/bgpd/bgp_attr.h b/bgpd/bgp_attr.h index 2e91f56df5..94531313ae 100644 --- a/bgpd/bgp_attr.h +++ b/bgpd/bgp_attr.h @@ -87,7 +87,7 @@ struct bgp_attr_encap_subtlv { uint8_t value[0]; /* will be extended */ }; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC /* * old rfp<->rfapi representation */ @@ -233,7 +233,7 @@ struct attr { uint16_t encap_tunneltype; /* grr */ struct bgp_attr_encap_subtlv *encap_subtlvs; /* rfc5512 */ -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC struct bgp_attr_encap_subtlv *vnc_subtlvs; /* VNC-specific */ #endif /* EVPN */ @@ -250,6 +250,9 @@ struct attr { /* rmap set table */ uint32_t rmap_table_id; + + /* Link bandwidth value, if any. */ + uint32_t link_bw; }; /* rmap_change_flags definition */ @@ -260,6 +263,7 @@ struct attr { #define BATTR_RMAP_IPV6_GLOBAL_NHOP_CHANGED (1 << 4) #define BATTR_RMAP_IPV6_LL_NHOP_CHANGED (1 << 5) #define BATTR_RMAP_IPV6_PREFER_GLOBAL_CHANGED (1 << 6) +#define BATTR_RMAP_LINK_BW_SET (1 << 7) /* Router Reflector related structure. */ struct cluster_list { @@ -310,22 +314,19 @@ extern void bgp_attr_unintern_sub(struct attr *); extern void bgp_attr_unintern(struct attr **); extern void bgp_attr_flush(struct attr *); extern struct attr *bgp_attr_default_set(struct attr *attr, uint8_t); -extern struct attr *bgp_attr_aggregate_intern(struct bgp *bgp, uint8_t origin, - struct aspath *aspath, - struct community *community, - struct ecommunity *ecommunity, - struct lcommunity *lcommunity, - struct bgp_aggregate *aggregate, - uint8_t atomic_aggregate, - struct prefix *p); +extern struct attr *bgp_attr_aggregate_intern( + struct bgp *bgp, uint8_t origin, struct aspath *aspath, + struct community *community, struct ecommunity *ecommunity, + struct lcommunity *lcommunity, struct bgp_aggregate *aggregate, + uint8_t atomic_aggregate, const struct prefix *p); extern bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *, struct stream *, struct attr *, struct bpacket_attr_vec_arr *vecarr, struct prefix *, afi_t, safi_t, struct peer *, struct prefix_rd *, mpls_label_t *, uint32_t, int, uint32_t); -extern void bgp_dump_routes_attr(struct stream *, struct attr *, - struct prefix *); +extern void bgp_dump_routes_attr(struct stream *s, struct attr *attr, + const struct prefix *p); extern bool attrhash_cmp(const void *arg1, const void *arg2); extern unsigned int attrhash_key_make(const void *); extern void attr_show_all(struct vty *); @@ -333,8 +334,7 @@ extern unsigned long int attr_count(void); extern unsigned long int attr_unknown_count(void); /* Cluster list prototypes. */ -extern int cluster_loop_check(struct cluster_list *, struct in_addr); -extern void cluster_unintern(struct cluster_list *); +extern bool cluster_loop_check(struct cluster_list *, struct in_addr); /* Below exported for unit-test purposes only */ struct bgp_attr_parser_args { @@ -351,8 +351,7 @@ extern int bgp_mp_reach_parse(struct bgp_attr_parser_args *args, extern int bgp_mp_unreach_parse(struct bgp_attr_parser_args *args, struct bgp_nlri *); extern bgp_attr_parse_ret_t -bgp_attr_prefix_sid(struct bgp_attr_parser_args *args, - struct bgp_nlri *mp_update); +bgp_attr_prefix_sid(struct bgp_attr_parser_args *args); extern struct bgp_attr_encap_subtlv * encap_tlv_dup(struct bgp_attr_encap_subtlv *orig); @@ -373,20 +372,21 @@ extern size_t bgp_packet_mpattr_start(struct stream *s, struct peer *peer, struct bpacket_attr_vec_arr *vecarr, struct attr *attr); extern void bgp_packet_mpattr_prefix(struct stream *s, afi_t afi, safi_t safi, - struct prefix *p, struct prefix_rd *prd, + const struct prefix *p, + const struct prefix_rd *prd, mpls_label_t *label, uint32_t num_labels, int addpath_encode, uint32_t addpath_tx_id, struct attr *); extern size_t bgp_packet_mpattr_prefix_size(afi_t afi, safi_t safi, - struct prefix *p); + const struct prefix *p); extern void bgp_packet_mpattr_end(struct stream *s, size_t sizep); extern size_t bgp_packet_mpunreach_start(struct stream *s, afi_t afi, safi_t safi); -extern void bgp_packet_mpunreach_prefix(struct stream *s, struct prefix *p, - afi_t afi, safi_t safi, - struct prefix_rd *prd, mpls_label_t *, - uint32_t, int, uint32_t, struct attr *); +extern void bgp_packet_mpunreach_prefix( + struct stream *s, const struct prefix *p, afi_t afi, safi_t safi, + const struct prefix_rd *prd, mpls_label_t *label, uint32_t num_labels, + int addpath_encode, uint32_t addpath_tx_id, struct attr *attr); extern void bgp_packet_mpunreach_end(struct stream *s, size_t attrlen_pnt); extern bgp_attr_parse_ret_t bgp_attr_nexthop_valid(struct peer *peer, @@ -412,5 +412,4 @@ static inline uint32_t mac_mobility_seqnum(struct attr *attr) { return (attr) ? attr->mm_seqnum : 0; } - #endif /* _QUAGGA_BGP_ATTR_H */ diff --git a/bgpd/bgp_attr_evpn.c b/bgpd/bgp_attr_evpn.c index ec9656a98d..65072088ae 100644 --- a/bgpd/bgp_attr_evpn.c +++ b/bgpd/bgp_attr_evpn.c @@ -45,7 +45,7 @@ void bgp_add_routermac_ecom(struct attr *attr, struct ethaddr *routermac) memcpy(&routermac_ecom.val[2], routermac->octet, ETH_ALEN); if (!attr->ecommunity) attr->ecommunity = ecommunity_new(); - ecommunity_add_val(attr->ecommunity, &routermac_ecom); + ecommunity_add_val(attr->ecommunity, &routermac_ecom, false, false); ecommunity_str(attr->ecommunity); } @@ -54,25 +54,25 @@ void bgp_add_routermac_ecom(struct attr *attr, struct ethaddr *routermac) * format accepted: AA:BB:CC:DD:EE:FF:GG:HH:II:JJ * if id is null, check only is done */ -int str2esi(const char *str, struct eth_segment_id *id) +bool str2esi(const char *str, struct eth_segment_id *id) { unsigned int a[ESI_LEN]; int i; if (!str) - return 0; + return false; if (sscanf(str, "%2x:%2x:%2x:%2x:%2x:%2x:%2x:%2x:%2x:%2x", a + 0, a + 1, a + 2, a + 3, a + 4, a + 5, a + 6, a + 7, a + 8, a + 9) != ESI_LEN) { /* error in incoming str length */ - return 0; + return false; } /* valid mac address */ if (!id) - return 1; + return true; for (i = 0; i < ESI_LEN; ++i) id->val[i] = a[i] & 0xff; - return 1; + return true; } char *esi2str(struct eth_segment_id *id) @@ -186,7 +186,7 @@ uint32_t bgp_attr_mac_mobility_seqnum(struct attr *attr, uint8_t *sticky) * one. */ for (i = 0; i < ecom->size; i++) { - uint8_t *pnt; + const uint8_t *pnt; uint8_t type, sub_type; uint32_t seq_num; diff --git a/bgpd/bgp_attr_evpn.h b/bgpd/bgp_attr_evpn.h index 25654ba709..c1bfd83765 100644 --- a/bgpd/bgp_attr_evpn.h +++ b/bgpd/bgp_attr_evpn.h @@ -51,7 +51,7 @@ struct bgp_route_evpn { union gw_addr gw_ip; }; -extern int str2esi(const char *str, struct eth_segment_id *id); +extern bool str2esi(const char *str, struct eth_segment_id *id); extern char *esi2str(struct eth_segment_id *id); extern char *ecom_mac2str(char *ecom_mac); diff --git a/bgpd/bgp_bfd.c b/bgpd/bgp_bfd.c index 1f650aaeb7..a200589bd3 100644 --- a/bgpd/bgp_bfd.c +++ b/bgpd/bgp_bfd.c @@ -72,21 +72,21 @@ void bgp_bfd_peer_group2peer_copy(struct peer *conf, struct peer *peer) * bgp_bfd_is_peer_multihop - returns whether BFD peer is multi-hop or single * hop. */ -int bgp_bfd_is_peer_multihop(struct peer *peer) +bool bgp_bfd_is_peer_multihop(struct peer *peer) { struct bfd_info *bfd_info; bfd_info = (struct bfd_info *)peer->bfd_info; if (!bfd_info) - return 0; + return false; if ((bfd_info->type == BFD_TYPE_MULTIHOP) || ((peer->sort == BGP_PEER_IBGP) && !peer->shared_network) || is_ebgp_multihop_configured(peer)) - return 1; + return true; else - return 0; + return false; } /* @@ -200,6 +200,25 @@ static void bgp_bfd_update_peer(struct peer *peer) bgp_bfd_peer_sendmsg(peer, ZEBRA_BFD_DEST_UPDATE); } +/** + * bgp_bfd_reset_peer - reinitialise bfd + * ensures that bfd state machine is restarted + * to be synced with remote bfd + */ +void bgp_bfd_reset_peer(struct peer *peer) +{ + struct bfd_info *bfd_info; + + if (!peer->bfd_info) + return; + bfd_info = (struct bfd_info *)peer->bfd_info; + + /* if status is not down, reset bfd */ + if (bfd_info->status != BFD_STATUS_DOWN) + bgp_bfd_peer_sendmsg(peer, ZEBRA_BFD_DEST_DEREGISTER); + bgp_bfd_peer_sendmsg(peer, ZEBRA_BFD_DEST_REGISTER); +} + /* * bgp_bfd_update_type - update session type with BFD through zebra. */ diff --git a/bgpd/bgp_bfd.h b/bgpd/bgp_bfd.h index caa5651e3a..f2fa959b45 100644 --- a/bgpd/bgp_bfd.h +++ b/bgpd/bgp_bfd.h @@ -31,12 +31,14 @@ extern void bgp_bfd_register_peer(struct peer *peer); extern void bgp_bfd_deregister_peer(struct peer *peer); +extern void bgp_bfd_reset_peer(struct peer *peer); + extern void bgp_bfd_peer_config_write(struct vty *vty, struct peer *peer, char *addr); extern void bgp_bfd_show_info(struct vty *vty, struct peer *peer, bool use_json, json_object *json_neigh); -extern int bgp_bfd_is_peer_multihop(struct peer *peer); +extern bool bgp_bfd_is_peer_multihop(struct peer *peer); #endif /* _QUAGGA_BGP_BFD_H */ diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c index 24a9cab5d1..a6fc4ebd03 100644 --- a/bgpd/bgp_bmp.c +++ b/bgpd/bgp_bmp.c @@ -385,15 +385,15 @@ static struct stream *bmp_peerstate(struct peer *peer, bool down) stream_put(s, bbpeer->open_tx, bbpeer->open_tx_len); else { stream_put(s, dummy_open, sizeof(dummy_open)); - zlog_warn("bmp: missing TX OPEN message for peer %s\n", - peer->host); + zlog_warn("bmp: missing TX OPEN message for peer %s", + peer->host); } if (bbpeer && bbpeer->open_rx) stream_put(s, bbpeer->open_rx, bbpeer->open_rx_len); else { stream_put(s, dummy_open, sizeof(dummy_open)); - zlog_warn("bmp: missing RX OPEN message for peer %s\n", - peer->host); + zlog_warn("bmp: missing RX OPEN message for peer %s", + peer->host); } if (peer->desc) @@ -664,8 +664,7 @@ static int bmp_peer_established(struct peer *peer) return 0; /* Check if this peer just went to Established */ - if ((peer->last_major_event != OpenConfirm) || - !(peer_established(peer))) + if ((peer->ostatus != OpenConfirm) || !(peer_established(peer))) return 0; if (peer->doppelganger && (peer->doppelganger->status != Deleted)) { @@ -766,8 +765,8 @@ static void bmp_eor(struct bmp *bmp, afi_t afi, safi_t safi, uint8_t flags) stream_free(s); } -static struct stream *bmp_update(struct prefix *p, struct peer *peer, - struct attr *attr, afi_t afi, safi_t safi) +static struct stream *bmp_update(const struct prefix *p, struct peer *peer, + struct attr *attr, afi_t afi, safi_t safi) { struct bpacket_attr_vec_arr vecarr; struct stream *s; @@ -814,7 +813,8 @@ static struct stream *bmp_update(struct prefix *p, struct peer *peer, return s; } -static struct stream *bmp_withdraw(struct prefix *p, afi_t afi, safi_t safi) +static struct stream *bmp_withdraw(const struct prefix *p, afi_t afi, + safi_t safi) { struct stream *s; size_t attrlen_pos = 0, mp_start, mplen_pos; @@ -854,7 +854,7 @@ static struct stream *bmp_withdraw(struct prefix *p, afi_t afi, safi_t safi) } static void bmp_monitor(struct bmp *bmp, struct peer *peer, uint8_t flags, - struct prefix *p, struct attr *attr, afi_t afi, + const struct prefix *p, struct attr *attr, afi_t afi, safi_t safi, time_t uptime) { struct stream *hdr, *msg; @@ -941,7 +941,7 @@ afibreak: return true; } bmp->syncpeerid = 0; - prefix_copy(&bmp->syncpos, &bn->p); + prefix_copy(&bmp->syncpos, bgp_node_get_prefix(bn)); } if (bmp->targets->afimon[afi][safi] & BMP_MON_POSTPOLICY) { @@ -989,12 +989,14 @@ afibreak: bmp->syncpeerid = adjin->peer->qobj_node.nid; } + const struct prefix *bn_p = bgp_node_get_prefix(bn); + if (bpi) - bmp_monitor(bmp, bpi->peer, BMP_PEER_FLAG_L, &bn->p, bpi->attr, + bmp_monitor(bmp, bpi->peer, BMP_PEER_FLAG_L, bn_p, bpi->attr, afi, safi, bpi->uptime); if (adjin) - bmp_monitor(bmp, adjin->peer, 0, &bn->p, adjin->attr, - afi, safi, adjin->uptime); + bmp_monitor(bmp, adjin->peer, 0, bn_p, adjin->attr, afi, safi, + adjin->uptime); return true; } @@ -1131,16 +1133,13 @@ static void bmp_process_one(struct bmp_targets *bt, struct bgp *bgp, struct bmp *bmp; struct bmp_queue_entry *bqe, bqeref; size_t refcount; - char buf[256]; - - prefix2str(&bn->p, buf, sizeof(buf)); refcount = bmp_session_count(&bt->sessions); if (refcount == 0) return; memset(&bqeref, 0, sizeof(bqeref)); - prefix_copy(&bqeref.p, &bn->p); + prefix_copy(&bqeref.p, bgp_node_get_prefix(bn)); bqeref.peerid = peer->qobj_node.nid; bqeref.afi = afi; bqeref.safi = safi; @@ -1343,8 +1342,7 @@ static int bmp_accept(struct thread *thread) /* We can handle IPv4 or IPv6 socket. */ bmp_sock = sockunion_accept(bl->sock, &su); if (bmp_sock < 0) { - zlog_info("bmp: accept_sock failed: %s\n", - safe_strerror (errno)); + zlog_info("bmp: accept_sock failed: %s", safe_strerror(errno)); return -1; } bmp_open(bl->targets, bmp_sock); diff --git a/bgpd/bgp_btoa.c b/bgpd/bgp_btoa.c index cc37e352ef..cbe18e23cb 100644 --- a/bgpd/bgp_btoa.c +++ b/bgpd/bgp_btoa.c @@ -68,7 +68,7 @@ enum MRT_MSG_TYPES { MSG_TABLE_DUMP /* routing table dump */ }; -static int attr_parse(struct stream *s, uint16_t len) +static void attr_parse(struct stream *s, uint16_t len) { unsigned int flag; unsigned int type; @@ -115,8 +115,6 @@ static int attr_parse(struct stream *s, uint16_t len) break; } } - - return 0; } int main(int argc, char **argv) diff --git a/bgpd/bgp_clist.c b/bgpd/bgp_clist.c index 5c461dbe77..cf4d44ea22 100644 --- a/bgpd/bgp_clist.c +++ b/bgpd/bgp_clist.c @@ -311,9 +311,9 @@ static void community_list_delete(struct community_list_master *cm, community_list_free(list); } -static int community_list_empty_p(struct community_list *list) +static bool community_list_empty_p(struct community_list *list) { - return (list->head == NULL && list->tail == NULL) ? 1 : 0; + return list->head == NULL && list->tail == NULL; } /* Delete community-list entry from the list. */ @@ -497,7 +497,7 @@ static char *community_str_get(struct community *com, int i) /* Internal function to perform regular expression match for * a single community. */ -static int community_regexp_include(regex_t *reg, struct community *com, int i) +static bool community_regexp_include(regex_t *reg, struct community *com, int i) { char *str; int rv; @@ -514,16 +514,12 @@ static int community_regexp_include(regex_t *reg, struct community *com, int i) XFREE(MTYPE_COMMUNITY_STR, str); - if (rv == 0) - return 1; - - /* No match. */ - return 0; + return rv == 0; } /* Internal function to perform regular expression match for community attribute. */ -static int community_regexp_match(struct community *com, regex_t *reg) +static bool community_regexp_match(struct community *com, regex_t *reg) { const char *str; @@ -536,10 +532,10 @@ static int community_regexp_match(struct community *com, regex_t *reg) /* Regular expression match. */ if (regexec(reg, str, 0, NULL, 0) == 0) - return 1; + return true; /* No match. */ - return 0; + return false; } static char *lcommunity_str_get(struct lcommunity *lcom, int i) @@ -549,7 +545,7 @@ static char *lcommunity_str_get(struct lcommunity *lcom, int i) uint32_t localdata1; uint32_t localdata2; char *str; - uint8_t *ptr; + const uint8_t *ptr; char *pnt; ptr = lcom->val + (i * LCOMMUNITY_SIZE); @@ -574,8 +570,8 @@ static char *lcommunity_str_get(struct lcommunity *lcom, int i) /* Internal function to perform regular expression match for * a single community. */ -static int lcommunity_regexp_include(regex_t *reg, struct lcommunity *lcom, - int i) +static bool lcommunity_regexp_include(regex_t *reg, struct lcommunity *lcom, + int i) { char *str; @@ -589,15 +585,15 @@ static int lcommunity_regexp_include(regex_t *reg, struct lcommunity *lcom, /* Regular expression match. */ if (regexec(reg, str, 0, NULL, 0) == 0) { XFREE(MTYPE_LCOMMUNITY_STR, str); - return 1; + return true; } XFREE(MTYPE_LCOMMUNITY_STR, str); /* No match. */ - return 0; + return false; } -static int lcommunity_regexp_match(struct lcommunity *com, regex_t *reg) +static bool lcommunity_regexp_match(struct lcommunity *com, regex_t *reg) { const char *str; @@ -610,14 +606,14 @@ static int lcommunity_regexp_match(struct lcommunity *com, regex_t *reg) /* Regular expression match. */ if (regexec(reg, str, 0, NULL, 0) == 0) - return 1; + return true; /* No match. */ - return 0; + return false; } -static int ecommunity_regexp_match(struct ecommunity *ecom, regex_t *reg) +static bool ecommunity_regexp_match(struct ecommunity *ecom, regex_t *reg) { const char *str; @@ -630,10 +626,10 @@ static int ecommunity_regexp_match(struct ecommunity *ecom, regex_t *reg) /* Regular expression match. */ if (regexec(reg, str, 0, NULL, 0) == 0) - return 1; + return true; /* No match. */ - return 0; + return false; } #if 0 @@ -718,125 +714,113 @@ community_regexp_delete (struct community *com, regex_t * reg) /* When given community attribute matches to the community-list return 1 else return 0. */ -int community_list_match(struct community *com, struct community_list *list) +bool community_list_match(struct community *com, struct community_list *list) { struct community_entry *entry; for (entry = list->head; entry; entry = entry->next) { if (entry->any) - return entry->direct == COMMUNITY_PERMIT ? 1 : 0; + return entry->direct == COMMUNITY_PERMIT; if (entry->style == COMMUNITY_LIST_STANDARD) { if (community_include(entry->u.com, COMMUNITY_INTERNET)) - return entry->direct == COMMUNITY_PERMIT ? 1 - : 0; + return entry->direct == COMMUNITY_PERMIT; if (community_match(com, entry->u.com)) - return entry->direct == COMMUNITY_PERMIT ? 1 - : 0; + return entry->direct == COMMUNITY_PERMIT; } else if (entry->style == COMMUNITY_LIST_EXPANDED) { if (community_regexp_match(com, entry->reg)) - return entry->direct == COMMUNITY_PERMIT ? 1 - : 0; + return entry->direct == COMMUNITY_PERMIT; } } - return 0; + return false; } -int lcommunity_list_match(struct lcommunity *lcom, struct community_list *list) +bool lcommunity_list_match(struct lcommunity *lcom, struct community_list *list) { struct community_entry *entry; for (entry = list->head; entry; entry = entry->next) { if (entry->any) - return entry->direct == COMMUNITY_PERMIT ? 1 : 0; + return entry->direct == COMMUNITY_PERMIT; if (entry->style == LARGE_COMMUNITY_LIST_STANDARD) { if (lcommunity_match(lcom, entry->u.lcom)) - return entry->direct == COMMUNITY_PERMIT ? 1 - : 0; + return entry->direct == COMMUNITY_PERMIT; } else if (entry->style == LARGE_COMMUNITY_LIST_EXPANDED) { if (lcommunity_regexp_match(lcom, entry->reg)) - return entry->direct == COMMUNITY_PERMIT ? 1 - : 0; + return entry->direct == COMMUNITY_PERMIT; } } - return 0; + return false; } /* Perform exact matching. In case of expanded large-community-list, do * same thing as lcommunity_list_match(). */ -int lcommunity_list_exact_match(struct lcommunity *lcom, - struct community_list *list) +bool lcommunity_list_exact_match(struct lcommunity *lcom, + struct community_list *list) { struct community_entry *entry; for (entry = list->head; entry; entry = entry->next) { if (entry->any) - return entry->direct == COMMUNITY_PERMIT ? 1 : 0; + return entry->direct == COMMUNITY_PERMIT; if (entry->style == LARGE_COMMUNITY_LIST_STANDARD) { if (lcommunity_cmp(lcom, entry->u.com)) - return entry->direct == COMMUNITY_PERMIT ? 1 - : 0; + return entry->direct == COMMUNITY_PERMIT; } else if (entry->style == LARGE_COMMUNITY_LIST_EXPANDED) { if (lcommunity_regexp_match(lcom, entry->reg)) - return entry->direct == COMMUNITY_PERMIT ? 1 - : 0; + return entry->direct == COMMUNITY_PERMIT; } } - return 0; + return false; } -int ecommunity_list_match(struct ecommunity *ecom, struct community_list *list) +bool ecommunity_list_match(struct ecommunity *ecom, struct community_list *list) { struct community_entry *entry; for (entry = list->head; entry; entry = entry->next) { if (entry->any) - return entry->direct == COMMUNITY_PERMIT ? 1 : 0; + return entry->direct == COMMUNITY_PERMIT; if (entry->style == EXTCOMMUNITY_LIST_STANDARD) { if (ecommunity_match(ecom, entry->u.ecom)) - return entry->direct == COMMUNITY_PERMIT ? 1 - : 0; + return entry->direct == COMMUNITY_PERMIT; } else if (entry->style == EXTCOMMUNITY_LIST_EXPANDED) { if (ecommunity_regexp_match(ecom, entry->reg)) - return entry->direct == COMMUNITY_PERMIT ? 1 - : 0; + return entry->direct == COMMUNITY_PERMIT; } } - return 0; + return false; } /* Perform exact matching. In case of expanded community-list, do same thing as community_list_match(). */ -int community_list_exact_match(struct community *com, - struct community_list *list) +bool community_list_exact_match(struct community *com, + struct community_list *list) { struct community_entry *entry; for (entry = list->head; entry; entry = entry->next) { if (entry->any) - return entry->direct == COMMUNITY_PERMIT ? 1 : 0; + return entry->direct == COMMUNITY_PERMIT; if (entry->style == COMMUNITY_LIST_STANDARD) { if (community_include(entry->u.com, COMMUNITY_INTERNET)) - return entry->direct == COMMUNITY_PERMIT ? 1 - : 0; + return entry->direct == COMMUNITY_PERMIT; if (community_cmp(com, entry->u.com)) - return entry->direct == COMMUNITY_PERMIT ? 1 - : 0; + return entry->direct == COMMUNITY_PERMIT; } else if (entry->style == COMMUNITY_LIST_EXPANDED) { if (community_regexp_match(com, entry->reg)) - return entry->direct == COMMUNITY_PERMIT ? 1 - : 0; + return entry->direct == COMMUNITY_PERMIT; } } - return 0; + return false; } /* Delete all permitted communities in the list from com. */ @@ -900,8 +884,8 @@ struct community *community_list_match_delete(struct community *com, /* To avoid duplicated entry in the community-list, this function compares specified entry to existing entry. */ -static int community_list_dup_check(struct community_list *list, - struct community_entry *new) +static bool community_list_dup_check(struct community_list *list, + struct community_entry *new) { struct community_entry *entry; @@ -916,32 +900,32 @@ static int community_list_dup_check(struct community_list *list, continue; if (entry->any) - return 1; + return true; switch (entry->style) { case COMMUNITY_LIST_STANDARD: if (community_cmp(entry->u.com, new->u.com)) - return 1; + return true; break; case LARGE_COMMUNITY_LIST_STANDARD: if (lcommunity_cmp(entry->u.lcom, new->u.lcom)) - return 1; + return true; break; case EXTCOMMUNITY_LIST_STANDARD: if (ecommunity_cmp(entry->u.ecom, new->u.ecom)) - return 1; + return true; break; case COMMUNITY_LIST_EXPANDED: case EXTCOMMUNITY_LIST_EXPANDED: case LARGE_COMMUNITY_LIST_EXPANDED: if (strcmp(entry->config, new->config) == 0) - return 1; + return true; break; default: break; } } - return 0; + return false; } /* Set community-list. */ @@ -1104,7 +1088,7 @@ struct lcommunity *lcommunity_list_match_delete(struct lcommunity *lcom, } /* Helper to check if every octet do not exceed UINT_MAX */ -static int lcommunity_list_valid(const char *community) +static bool lcommunity_list_valid(const char *community) { int octets = 0; char **splits; @@ -1114,10 +1098,10 @@ static int lcommunity_list_valid(const char *community) for (int i = 0; i < num; i++) { if (strtoul(splits[i], NULL, 10) > UINT_MAX) - return 0; + return false; if (strlen(splits[i]) == 0) - return 0; + return false; octets++; XFREE(MTYPE_TMP, splits[i]); @@ -1125,9 +1109,9 @@ static int lcommunity_list_valid(const char *community) XFREE(MTYPE_TMP, splits); if (octets < 3) - return 0; + return false; - return 1; + return true; } /* Set lcommunity-list. */ diff --git a/bgpd/bgp_clist.h b/bgpd/bgp_clist.h index c5718aecac..4cb5d7c593 100644 --- a/bgpd/bgp_clist.h +++ b/bgpd/bgp_clist.h @@ -165,13 +165,13 @@ extern struct community_list * community_list_lookup(struct community_list_handler *c, const char *name, uint32_t name_hash, int master); -extern int community_list_match(struct community *, struct community_list *); -extern int ecommunity_list_match(struct ecommunity *, struct community_list *); -extern int lcommunity_list_match(struct lcommunity *, struct community_list *); -extern int community_list_exact_match(struct community *, - struct community_list *); -extern int lcommunity_list_exact_match(struct lcommunity *lcom, - struct community_list *list); +extern bool community_list_match(struct community *, struct community_list *); +extern bool ecommunity_list_match(struct ecommunity *, struct community_list *); +extern bool lcommunity_list_match(struct lcommunity *, struct community_list *); +extern bool community_list_exact_match(struct community *, + struct community_list *); +extern bool lcommunity_list_exact_match(struct lcommunity *lcom, + struct community_list *list); extern struct community *community_list_match_delete(struct community *, struct community_list *); extern struct lcommunity * diff --git a/bgpd/bgp_community.c b/bgpd/bgp_community.c index c145c47d02..30de84c878 100644 --- a/bgpd/bgp_community.c +++ b/bgpd/bgp_community.c @@ -128,7 +128,7 @@ static int community_compare(const void *a1, const void *a2) return 0; } -int community_include(struct community *com, uint32_t val) +bool community_include(struct community *com, uint32_t val) { int i; @@ -136,9 +136,8 @@ int community_include(struct community *com, uint32_t val) for (i = 0; i < com->size; i++) if (memcmp(&val, com_nthval(com, i), sizeof(uint32_t)) == 0) - return 1; - - return 0; + return true; + return false; } uint32_t community_val_get(struct community *com, int i) @@ -147,7 +146,7 @@ uint32_t community_val_get(struct community *com, int i) uint32_t val; p = (uint8_t *)com->val; - p += (i * 4); + p += (i * COMMUNITY_SIZE); memcpy(&val, p, sizeof(uint32_t)); @@ -515,11 +514,11 @@ struct community *community_parse(uint32_t *pnt, unsigned short length) struct community *new; /* If length is malformed return NULL. */ - if (length % 4) + if (length % COMMUNITY_SIZE) return NULL; /* Make temporary community for hash look up. */ - tmp.size = length / 4; + tmp.size = length / COMMUNITY_SIZE; tmp.val = pnt; new = community_uniq_sort(&tmp); @@ -534,8 +533,9 @@ struct community *community_dup(struct community *com) new = XCALLOC(MTYPE_COMMUNITY, sizeof(struct community)); new->size = com->size; if (new->size) { - new->val = XMALLOC(MTYPE_COMMUNITY_VAL, com->size * 4); - memcpy(new->val, com->val, com->size * 4); + new->val = XMALLOC(MTYPE_COMMUNITY_VAL, + com->size * COMMUNITY_SIZE); + memcpy(new->val, com->val, com->size * COMMUNITY_SIZE); } else new->val = NULL; return new; @@ -559,24 +559,24 @@ char *community_str(struct community *com, bool make_json) hash package.*/ unsigned int community_hash_make(const struct community *com) { - uint32_t *pnt = (uint32_t *)com->val; + uint32_t *pnt = com->val; return jhash2(pnt, com->size, 0x43ea96c1); } -int community_match(const struct community *com1, const struct community *com2) +bool community_match(const struct community *com1, const struct community *com2) { int i = 0; int j = 0; if (com1 == NULL && com2 == NULL) - return 1; + return true; if (com1 == NULL || com2 == NULL) - return 0; + return false; if (com1->size < com2->size) - return 0; + return false; /* Every community on com2 needs to be on com1 for this to match */ while (i < com1->size && j < com2->size) { @@ -586,9 +586,9 @@ int community_match(const struct community *com1, const struct community *com2) } if (j == com2->size) - return 1; + return true; else - return 0; + return false; } /* If two aspath have same value then return 1 else return 0. This @@ -601,7 +601,8 @@ bool community_cmp(const struct community *com1, const struct community *com2) return false; if (com1->size == com2->size) - if (memcmp(com1->val, com2->val, com1->size * 4) == 0) + if (memcmp(com1->val, com2->val, com1->size * COMMUNITY_SIZE) + == 0) return true; return false; } @@ -611,13 +612,14 @@ struct community *community_merge(struct community *com1, struct community *com2) { if (com1->val) - com1->val = XREALLOC(MTYPE_COMMUNITY_VAL, com1->val, - (com1->size + com2->size) * 4); + com1->val = + XREALLOC(MTYPE_COMMUNITY_VAL, com1->val, + (com1->size + com2->size) * COMMUNITY_SIZE); else com1->val = XMALLOC(MTYPE_COMMUNITY_VAL, - (com1->size + com2->size) * 4); + (com1->size + com2->size) * COMMUNITY_SIZE); - memcpy(com1->val + com1->size, com2->val, com2->size * 4); + memcpy(com1->val + com1->size, com2->val, com2->size * COMMUNITY_SIZE); com1->size += com2->size; return com1; diff --git a/bgpd/bgp_community.h b/bgpd/bgp_community.h index 74a3a6b507..b99f38ab64 100644 --- a/bgpd/bgp_community.h +++ b/bgpd/bgp_community.h @@ -61,8 +61,10 @@ struct community { #define COMMUNITY_LOCAL_AS 0xFFFFFF03 #define COMMUNITY_NO_PEER 0xFFFFFF04 +#define COMMUNITY_SIZE 4 + /* Macros of community attribute. */ -#define com_length(X) ((X)->size * 4) +#define com_length(X) ((X)->size * COMMUNITY_SIZE) #define com_lastval(X) ((X)->val + (X)->size - 1) #define com_nthval(X,n) ((X)->val + (n)) @@ -77,7 +79,7 @@ extern void community_unintern(struct community **); extern char *community_str(struct community *, bool make_json); extern unsigned int community_hash_make(const struct community *); extern struct community *community_str2com(const char *); -extern int community_match(const struct community *, const struct community *); +extern bool community_match(const struct community *, const struct community *); extern bool community_cmp(const struct community *c1, const struct community *c2); extern struct community *community_merge(struct community *, @@ -85,7 +87,7 @@ extern struct community *community_merge(struct community *, extern struct community *community_delete(struct community *, struct community *); extern struct community *community_dup(struct community *); -extern int community_include(struct community *, uint32_t); +extern bool community_include(struct community *, uint32_t); extern void community_del_val(struct community *, uint32_t *); extern unsigned long community_count(void); extern struct hash *community_hash(void); diff --git a/bgpd/bgp_damp.c b/bgpd/bgp_damp.c index 792f3cea70..538610f6d7 100644 --- a/bgpd/bgp_damp.c +++ b/bgpd/bgp_damp.c @@ -155,9 +155,9 @@ static int bgp_reuse_timer(struct thread *t) if (bdi->lastrecord == BGP_RECORD_UPDATE) { bgp_path_info_unset_flag(bdi->rn, bdi->path, BGP_PATH_HISTORY); - bgp_aggregate_increment(bgp, &bdi->rn->p, - bdi->path, bdi->afi, - bdi->safi); + bgp_aggregate_increment( + bgp, bgp_node_get_prefix(bdi->rn), + bdi->path, bdi->afi, bdi->safi); bgp_process(bgp, bdi->rn, bdi->afi, bdi->safi); } diff --git a/bgpd/bgp_debug.c b/bgpd/bgp_debug.c index 466fecc581..5104e23515 100644 --- a/bgpd/bgp_debug.c +++ b/bgpd/bgp_debug.c @@ -170,12 +170,22 @@ static const struct message bgp_notify_capability_msg[] = { {BGP_NOTIFY_CAPABILITY_MALFORMED_CODE, "/Malformed Capability Value"}, {0}}; +static const struct message bgp_notify_fsm_msg[] = { + {BGP_NOTIFY_FSM_ERR_SUBCODE_UNSPECIFIC, "/Unspecific"}, + {BGP_NOTIFY_FSM_ERR_SUBCODE_OPENSENT, + "/Receive Unexpected Message in OpenSent State"}, + {BGP_NOTIFY_FSM_ERR_SUBCODE_OPENCONFIRM, + "/Receive Unexpected Message in OpenConfirm State"}, + {BGP_NOTIFY_FSM_ERR_SUBCODE_ESTABLISHED, + "/Receive Unexpected Message in Established State"}, + {0}}; + /* Origin strings. */ const char *const bgp_origin_str[] = {"i", "e", "?"}; const char *const bgp_origin_long_str[] = {"IGP", "EGP", "incomplete"}; -static int bgp_debug_print_evpn_prefix(struct vty *vty, const char *desc, - struct prefix *p); +static void bgp_debug_print_evpn_prefix(struct vty *vty, const char *desc, + struct prefix *p); /* Given a string return a pointer the corresponding peer structure */ static struct peer *bgp_find_peer(struct vty *vty, const char *peer_str) { @@ -305,8 +315,8 @@ static void bgp_debug_list_add_entry(struct list *list, const char *host, listnode_add(list, filter); } -static int bgp_debug_list_remove_entry(struct list *list, const char *host, - struct prefix *p) +static bool bgp_debug_list_remove_entry(struct list *list, const char *host, + struct prefix *p) { struct bgp_debug_filter *filter; struct listnode *node, *nnode; @@ -316,21 +326,21 @@ static int bgp_debug_list_remove_entry(struct list *list, const char *host, listnode_delete(list, filter); XFREE(MTYPE_BGP_DEBUG_STR, filter->host); XFREE(MTYPE_BGP_DEBUG_FILTER, filter); - return 1; + return true; } else if (p && filter->p->prefixlen == p->prefixlen && prefix_match(filter->p, p)) { listnode_delete(list, filter); prefix_free(&filter->p); XFREE(MTYPE_BGP_DEBUG_FILTER, filter); - return 1; + return true; } } - return 0; + return false; } -static int bgp_debug_list_has_entry(struct list *list, const char *host, - const struct prefix *p) +static bool bgp_debug_list_has_entry(struct list *list, const char *host, + const struct prefix *p) { struct bgp_debug_filter *filter; struct listnode *node, *nnode; @@ -338,32 +348,32 @@ static int bgp_debug_list_has_entry(struct list *list, const char *host, for (ALL_LIST_ELEMENTS(list, node, nnode, filter)) { if (host) { if (strcmp(filter->host, host) == 0) { - return 1; + return true; } } else if (p) { if (filter->p->prefixlen == p->prefixlen && prefix_match(filter->p, p)) { - return 1; + return true; } } } - return 0; + return false; } -int bgp_debug_peer_updout_enabled(char *host) +bool bgp_debug_peer_updout_enabled(char *host) { return (bgp_debug_list_has_entry(bgp_debug_update_out_peers, host, NULL)); } /* Dump attribute. */ -int bgp_dump_attr(struct attr *attr, char *buf, size_t size) +bool bgp_dump_attr(struct attr *attr, char *buf, size_t size) { char addrbuf[BUFSIZ]; if (!attr) - return 0; + return false; buf[0] = '\0'; @@ -445,9 +455,9 @@ int bgp_dump_attr(struct attr *attr, char *buf, size_t size) } if (strlen(buf) > 1) - return 1; + return true; else - return 0; + return false; } const char *bgp_notify_code_str(char code) @@ -471,7 +481,8 @@ const char *bgp_notify_subcode_str(char code, char subcode) case BGP_NOTIFY_HOLD_ERR: break; case BGP_NOTIFY_FSM_ERR: - break; + return lookup_msg(bgp_notify_fsm_msg, subcode, + "Unrecognized Error Subcode"); case BGP_NOTIFY_CEASE: return lookup_msg(bgp_notify_cease_msg, subcode, "Unrecognized Error Subcode"); @@ -550,8 +561,8 @@ static void bgp_debug_clear_updgrp_update_dbg(struct bgp *bgp) update_group_walk(bgp, update_group_clear_update_dbg, NULL); } -static int bgp_debug_print_evpn_prefix(struct vty *vty, const char *desc, - struct prefix *p) +static void bgp_debug_print_evpn_prefix(struct vty *vty, const char *desc, + struct prefix *p) { char evpn_desc[PREFIX2STR_BUFFER + INET_ADDRSTRLEN]; char buf[PREFIX2STR_BUFFER]; @@ -590,8 +601,6 @@ static int bgp_debug_print_evpn_prefix(struct vty *vty, const char *desc, } vty_out(vty, "%s %s\n", desc, evpn_desc); - - return 0; } static int bgp_debug_parse_evpn_prefix(struct vty *vty, struct cmd_token **argv, @@ -2395,7 +2404,7 @@ void bgp_debug_init(void) /* Return true if this prefix is on the per_prefix_list of prefixes to debug * for BGP_DEBUG_TYPE */ -static int bgp_debug_per_prefix(struct prefix *p, +static int bgp_debug_per_prefix(const struct prefix *p, unsigned long term_bgp_debug_type, unsigned int BGP_DEBUG_TYPE, struct list *per_prefix_list) @@ -2480,8 +2489,8 @@ int bgp_debug_keepalive(struct peer *peer) bgp_debug_keepalive_peers); } -int bgp_debug_update(struct peer *peer, struct prefix *p, - struct update_group *updgrp, unsigned int inbound) +bool bgp_debug_update(struct peer *peer, const struct prefix *p, + struct update_group *updgrp, unsigned int inbound) { char *host = NULL; @@ -2492,7 +2501,7 @@ int bgp_debug_update(struct peer *peer, struct prefix *p, if (bgp_debug_per_peer(host, term_bgp_debug_update, BGP_DEBUG_UPDATE_IN, bgp_debug_update_in_peers)) - return 1; + return true; } /* outbound */ @@ -2500,12 +2509,12 @@ int bgp_debug_update(struct peer *peer, struct prefix *p, if (bgp_debug_per_peer(host, term_bgp_debug_update, BGP_DEBUG_UPDATE_OUT, bgp_debug_update_out_peers)) - return 1; + return true; /* Check if update debugging implicitly enabled for the group. */ if (updgrp && UPDGRP_DBG_ON(updgrp)) - return 1; + return true; } @@ -2513,38 +2522,38 @@ int bgp_debug_update(struct peer *peer, struct prefix *p, if (bgp_debug_per_prefix(p, term_bgp_debug_update, BGP_DEBUG_UPDATE_PREFIX, bgp_debug_update_prefixes)) - return 1; + return true; } - return 0; + return false; } -int bgp_debug_bestpath(struct prefix *p) +bool bgp_debug_bestpath(struct bgp_node *rn) { if (BGP_DEBUG(bestpath, BESTPATH)) { - if (bgp_debug_per_prefix(p, term_bgp_debug_bestpath, - BGP_DEBUG_BESTPATH, - bgp_debug_bestpath_prefixes)) - return 1; + if (bgp_debug_per_prefix( + bgp_node_get_prefix(rn), term_bgp_debug_bestpath, + BGP_DEBUG_BESTPATH, bgp_debug_bestpath_prefixes)) + return true; } - return 0; + return false; } -int bgp_debug_zebra(struct prefix *p) +bool bgp_debug_zebra(const struct prefix *p) { if (BGP_DEBUG(zebra, ZEBRA)) { if (bgp_debug_per_prefix(p, term_bgp_debug_zebra, BGP_DEBUG_ZEBRA, bgp_debug_zebra_prefixes)) - return 1; + return true; } - return 0; + return false; } const char *bgp_debug_rdpfxpath2str(afi_t afi, safi_t safi, - struct prefix_rd *prd, + const struct prefix_rd *prd, union prefixconstptr pu, mpls_label_t *label, uint32_t num_labels, int addpath_valid, uint32_t addpath_id, diff --git a/bgpd/bgp_debug.h b/bgpd/bgp_debug.h index 1e6482e969..7352b7917a 100644 --- a/bgpd/bgp_debug.h +++ b/bgpd/bgp_debug.h @@ -157,8 +157,8 @@ struct bgp_debug_filter { extern const char *const bgp_type_str[]; -extern int bgp_dump_attr(struct attr *, char *, size_t); -extern int bgp_debug_peer_updout_enabled(char *host); +extern bool bgp_dump_attr(struct attr *, char *, size_t); +extern bool bgp_debug_peer_updout_enabled(char *host); extern const char *bgp_notify_code_str(char); extern const char *bgp_notify_subcode_str(char, char); extern void bgp_notify_print(struct peer *, struct bgp_notify *, const char *); @@ -166,15 +166,16 @@ extern void bgp_notify_print(struct peer *, struct bgp_notify *, const char *); extern const struct message bgp_status_msg[]; extern int bgp_debug_neighbor_events(struct peer *peer); extern int bgp_debug_keepalive(struct peer *peer); -extern int bgp_debug_update(struct peer *peer, struct prefix *p, - struct update_group *updgrp, unsigned int inbound); -extern int bgp_debug_bestpath(struct prefix *p); -extern int bgp_debug_zebra(struct prefix *p); - -extern const char *bgp_debug_rdpfxpath2str(afi_t, safi_t, struct prefix_rd *, - union prefixconstptr, mpls_label_t *, - uint32_t, int, uint32_t, char *, - int); +extern bool bgp_debug_update(struct peer *peer, const struct prefix *p, + struct update_group *updgrp, unsigned int inbound); +extern bool bgp_debug_bestpath(struct bgp_node *rn); +extern bool bgp_debug_zebra(const struct prefix *p); + +extern const char * +bgp_debug_rdpfxpath2str(afi_t afi, safi_t safi, const struct prefix_rd *prd, + union prefixconstptr pu, mpls_label_t *label, + uint32_t num_labels, int addpath_valid, + uint32_t addpath_id, char *str, int size); const char *bgp_notify_admin_message(char *buf, size_t bufsz, uint8_t *data, size_t datalen); diff --git a/bgpd/bgp_dump.c b/bgpd/bgp_dump.c index c448b9894a..cd1722ccca 100644 --- a/bgpd/bgp_dump.c +++ b/bgpd/bgp_dump.c @@ -307,6 +307,7 @@ bgp_dump_route_node_record(int afi, struct bgp_node *rn, struct stream *obuf; size_t sizep; size_t endp; + const struct prefix *p = bgp_node_get_prefix(rn); obuf = bgp_dump_obuf; stream_reset(obuf); @@ -325,19 +326,19 @@ bgp_dump_route_node_record(int afi, struct bgp_node *rn, stream_putl(obuf, seq); /* Prefix length */ - stream_putc(obuf, rn->p.prefixlen); + stream_putc(obuf, p->prefixlen); /* Prefix */ if (afi == AFI_IP) { /* We'll dump only the useful bits (those not 0), but have to * align on 8 bits */ - stream_write(obuf, (uint8_t *)&rn->p.u.prefix4, - (rn->p.prefixlen + 7) / 8); + stream_write(obuf, (uint8_t *)&p->u.prefix4, + (p->prefixlen + 7) / 8); } else if (afi == AFI_IP6) { /* We'll dump only the useful bits (those not 0), but have to * align on 8 bits */ - stream_write(obuf, (uint8_t *)&rn->p.u.prefix6, - (rn->p.prefixlen + 7) / 8); + stream_write(obuf, (uint8_t *)&p->u.prefix6, + (p->prefixlen + 7) / 8); } /* Save where we are now, so we can overwride the entry count later */ @@ -361,7 +362,7 @@ bgp_dump_route_node_record(int afi, struct bgp_node *rn, /* Dump attribute. */ /* Skip prefix & AFI/SAFI for MP_NLRI */ - bgp_dump_routes_attr(obuf, path->attr, &rn->p); + bgp_dump_routes_attr(obuf, path->attr, p); cur_endp = stream_get_endp(obuf); if (cur_endp > BGP_MAX_PACKET_SIZE + BGP_DUMP_MSG_HEADER diff --git a/bgpd/bgp_ecommunity.c b/bgpd/bgp_ecommunity.c index 2711cf7a69..fe09aab956 100644 --- a/bgpd/bgp_ecommunity.c +++ b/bgpd/bgp_ecommunity.c @@ -74,41 +74,67 @@ static void ecommunity_hash_free(struct ecommunity *ecom) Attribute structure. When the value is already exists in the structure, we don't add the value. Newly added value is sorted by numerical order. When the value is added to the structure return 1 - else return 0. */ -int ecommunity_add_val(struct ecommunity *ecom, struct ecommunity_val *eval) + else return 0. + The additional parameters 'unique' and 'overwrite' ensure a particular + extended community (based on type and sub-type) is present only + once and whether the new value should replace what is existing or + not. +*/ +bool ecommunity_add_val(struct ecommunity *ecom, struct ecommunity_val *eval, + bool unique, bool overwrite) { - int c; + int c, ins_idx; /* When this is fist value, just add it. */ if (ecom->val == NULL) { ecom->size = 1; ecom->val = XCALLOC(MTYPE_ECOMMUNITY_VAL, ECOMMUNITY_SIZE); memcpy(ecom->val, eval->val, ECOMMUNITY_SIZE); - return 1; + return true; } /* If the value already exists in the structure return 0. */ + /* check also if the extended community itself exists. */ c = 0; + ins_idx = -1; for (uint8_t *p = ecom->val; c < ecom->size; p += ECOMMUNITY_SIZE, c++) { + if (unique) { + if (p[0] == eval->val[0] && + p[1] == eval->val[1]) { + if (overwrite) { + memcpy(p, eval->val, ECOMMUNITY_SIZE); + return 1; + } + return 0; + } + } int ret = memcmp(p, eval->val, ECOMMUNITY_SIZE); if (ret == 0) return 0; - else if (ret > 0) - break; + if (ret > 0) { + if (!unique) + break; + if (ins_idx == -1) + ins_idx = c; + } } + if (ins_idx == -1) + ins_idx = c; + /* Add the value to the structure with numerical sorting. */ ecom->size++; ecom->val = XREALLOC(MTYPE_ECOMMUNITY_VAL, ecom->val, ecom->size * ECOMMUNITY_SIZE); - memmove(ecom->val + ((c + 1) * ECOMMUNITY_SIZE), - ecom->val + (c * ECOMMUNITY_SIZE), - (ecom->size - 1 - c) * ECOMMUNITY_SIZE); - memcpy(ecom->val + (c * ECOMMUNITY_SIZE), eval->val, ECOMMUNITY_SIZE); + memmove(ecom->val + ((ins_idx + 1) * ECOMMUNITY_SIZE), + ecom->val + (ins_idx * ECOMMUNITY_SIZE), + (ecom->size - 1 - ins_idx) * ECOMMUNITY_SIZE); + memcpy(ecom->val + (ins_idx * ECOMMUNITY_SIZE), + eval->val, ECOMMUNITY_SIZE); - return 1; + return true; } /* This function takes pointer to Extended Communites strucutre then @@ -128,7 +154,7 @@ struct ecommunity *ecommunity_uniq_sort(struct ecommunity *ecom) for (i = 0; i < ecom->size; i++) { eval = (struct ecommunity_val *)(ecom->val + (i * ECOMMUNITY_SIZE)); - ecommunity_add_val(new, eval); + ecommunity_add_val(new, eval, false, false); } return new; } @@ -543,7 +569,7 @@ struct ecommunity *ecommunity_str2com(const char *str, int type, if (ecom == NULL) ecom = ecommunity_new(); eval.val[1] = type; - ecommunity_add_val(ecom, &eval); + ecommunity_add_val(ecom, &eval, false, false); break; case ecommunity_token_unknown: default: @@ -555,7 +581,7 @@ struct ecommunity *ecommunity_str2com(const char *str, int type, return ecom; } -static int ecommunity_rt_soo_str(char *buf, size_t bufsz, uint8_t *pnt, +static int ecommunity_rt_soo_str(char *buf, size_t bufsz, const uint8_t *pnt, int type, int sub_type, int format) { int len = 0; @@ -611,6 +637,33 @@ static int ecommunity_rt_soo_str(char *buf, size_t bufsz, uint8_t *pnt, return len; } +static int ecommunity_lb_str(char *buf, size_t bufsz, const uint8_t *pnt) +{ + int len = 0; + as_t as; + uint32_t bw; + char bps_buf[20] = {0}; + +#define ONE_GBPS_BYTES (1000 * 1000 * 1000 / 8) +#define ONE_MBPS_BYTES (1000 * 1000 / 8) +#define ONE_KBPS_BYTES (1000 / 8) + + as = (*pnt++ << 8); + as |= (*pnt++); + (void)ptr_get_be32(pnt, &bw); + if (bw >= ONE_GBPS_BYTES) + sprintf(bps_buf, "%.3f Gbps", (float)(bw/ONE_GBPS_BYTES)); + else if (bw >= ONE_MBPS_BYTES) + sprintf(bps_buf, "%.3f Mbps", (float)(bw/ONE_MBPS_BYTES)); + else if (bw >= ONE_KBPS_BYTES) + sprintf(bps_buf, "%.3f Kbps", (float)(bw/ONE_KBPS_BYTES)); + else + sprintf(bps_buf, "%u bps", bw * 8); + + len = snprintf(buf, bufsz, "LB:%u:%u (%s)", as, bw, bps_buf); + return len; +} + /* Convert extended community attribute to string. Due to historical reason of industry standard implementation, there @@ -686,6 +739,11 @@ char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter) INET_ADDRSTRLEN); snprintf(encbuf, sizeof(encbuf), "NH:%s:%d", ipv4str, pnt[5]); + } else if (sub_type == + ECOMMUNITY_LINK_BANDWIDTH && + type == ECOMMUNITY_ENCODE_AS) { + ecommunity_lb_str(encbuf, + sizeof(encbuf), pnt); } else unk_ecom = 1; } else { @@ -765,7 +823,7 @@ char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter) if (sub_type == ECOMMUNITY_REDIRECT_VRF) { char buf[16] = {}; ecommunity_rt_soo_str( - buf, sizeof(buf), (uint8_t *)pnt, + buf, sizeof(buf), pnt, type & ~ECOMMUNITY_ENCODE_TRANS_EXP, ECOMMUNITY_ROUTE_TARGET, ECOMMUNITY_FORMAT_DISPLAY); @@ -821,6 +879,12 @@ char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter) (uint8_t)mac.octet[5]); } else unk_ecom = 1; + } else if (type == ECOMMUNITY_ENCODE_AS_NON_TRANS) { + sub_type = *pnt++; + if (sub_type == ECOMMUNITY_LINK_BANDWIDTH) + ecommunity_lb_str(encbuf, sizeof(encbuf), pnt); + else + unk_ecom = 1; } else { sub_type = *pnt++; unk_ecom = 1; @@ -837,20 +901,20 @@ char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter) return str_buf; } -int ecommunity_match(const struct ecommunity *ecom1, - const struct ecommunity *ecom2) +bool ecommunity_match(const struct ecommunity *ecom1, + const struct ecommunity *ecom2) { int i = 0; int j = 0; if (ecom1 == NULL && ecom2 == NULL) - return 1; + return true; if (ecom1 == NULL || ecom2 == NULL) - return 0; + return false; if (ecom1->size < ecom2->size) - return 0; + return false; /* Every community on com2 needs to be on com1 for this to match */ while (i < ecom1->size && j < ecom2->size) { @@ -862,9 +926,9 @@ int ecommunity_match(const struct ecommunity *ecom1, } if (j == ecom2->size) - return 1; + return true; else - return 0; + return false; } /* return first occurence of type */ @@ -889,54 +953,63 @@ extern struct ecommunity_val *ecommunity_lookup(const struct ecommunity *ecom, /* remove ext. community matching type and subtype * return 1 on success ( removed ), 0 otherwise (not present) */ -extern int ecommunity_strip(struct ecommunity *ecom, uint8_t type, - uint8_t subtype) +bool ecommunity_strip(struct ecommunity *ecom, uint8_t type, + uint8_t subtype) { - uint8_t *p; + uint8_t *p, *q, *new; int c, found = 0; /* When this is fist value, just add it. */ - if (ecom == NULL || ecom->val == NULL) { - return 0; - } + if (ecom == NULL || ecom->val == NULL) + return false; - /* If the value already exists in the structure return 0. */ + /* Check if any existing ext community matches. */ + /* Certain extended communities like the Route Target can be present + * multiple times, handle that. + */ c = 0; for (p = ecom->val; c < ecom->size; p += ECOMMUNITY_SIZE, c++) { - if (p[0] == type && p[1] == subtype) { - found = 1; - break; - } + if (p[0] == type && p[1] == subtype) + found++; } + /* If no matching ext community exists, return. */ if (found == 0) - return 0; - /* Strip The selected value */ - ecom->size--; - /* size is reduced. no memmove to do */ - p = XMALLOC(MTYPE_ECOMMUNITY_VAL, ecom->size * ECOMMUNITY_SIZE); - if (c != 0) - memcpy(p, ecom->val, c * ECOMMUNITY_SIZE); - if ((ecom->size - c) != 0) - memcpy(p + (c)*ECOMMUNITY_SIZE, - ecom->val + (c + 1) * ECOMMUNITY_SIZE, - (ecom->size - c) * ECOMMUNITY_SIZE); - /* shift last ecommunities */ - XFREE(MTYPE_ECOMMUNITY, ecom->val); - ecom->val = p; - return 1; + return false; + + /* Handle the case where everything needs to be stripped. */ + if (found == ecom->size) { + XFREE(MTYPE_ECOMMUNITY_VAL, ecom->val); + ecom->size = 0; + return true; + } + + /* Strip matching ext community(ies). */ + new = XMALLOC(MTYPE_ECOMMUNITY_VAL, + (ecom->size - found) * ECOMMUNITY_SIZE); + q = new; + for (c = 0, p = ecom->val; c < ecom->size; c++, p += ECOMMUNITY_SIZE) { + if (!(p[0] == type && p[1] == subtype)) { + memcpy(q, p, ECOMMUNITY_SIZE); + q += ECOMMUNITY_SIZE; + } + } + XFREE(MTYPE_ECOMMUNITY_VAL, ecom->val); + ecom->val = new; + ecom->size -= found; + return true; } /* * Remove specified extended community value from extended community. * Returns 1 if value was present (and hence, removed), 0 otherwise. */ -int ecommunity_del_val(struct ecommunity *ecom, struct ecommunity_val *eval) +bool ecommunity_del_val(struct ecommunity *ecom, struct ecommunity_val *eval) { uint8_t *p; int c, found = 0; /* Make sure specified value exists. */ if (ecom == NULL || ecom->val == NULL) - return 0; + return false; c = 0; for (p = ecom->val; c < ecom->size; p += ECOMMUNITY_SIZE, c++) { if (!memcmp(p, eval->val, ECOMMUNITY_SIZE)) { @@ -945,7 +1018,7 @@ int ecommunity_del_val(struct ecommunity *ecom, struct ecommunity_val *eval) } } if (found == 0) - return 0; + return false; /* Delete the selected value */ ecom->size--; @@ -958,7 +1031,7 @@ int ecommunity_del_val(struct ecommunity *ecom, struct ecommunity_val *eval) (ecom->size - c) * ECOMMUNITY_SIZE); XFREE(MTYPE_ECOMMUNITY_VAL, ecom->val); ecom->val = p; - return 1; + return true; } int ecommunity_fill_pbr_action(struct ecommunity_val *ecom_eval, @@ -1156,3 +1229,83 @@ void bgp_remove_ecomm_from_aggregate_hash(struct bgp_aggregate *aggregate, } } } + +/* + * return the BGP link bandwidth extended community, if present; + * the actual bandwidth is returned via param + */ +const uint8_t *ecommunity_linkbw_present(struct ecommunity *ecom, uint32_t *bw) +{ + const uint8_t *eval; + int i; + + if (bw) + *bw = 0; + + if (!ecom || !ecom->size) + return NULL; + + for (i = 0; i < ecom->size; i++) { + const uint8_t *pnt; + uint8_t type, sub_type; + uint32_t bwval; + + eval = pnt = (ecom->val + (i * ECOMMUNITY_SIZE)); + type = *pnt++; + sub_type = *pnt++; + + if ((type == ECOMMUNITY_ENCODE_AS || + type == ECOMMUNITY_ENCODE_AS_NON_TRANS) && + sub_type == ECOMMUNITY_LINK_BANDWIDTH) { + pnt += 2; /* bandwidth is encoded as AS:val */ + pnt = ptr_get_be32(pnt, &bwval); + (void)pnt; /* consume value */ + if (bw) + *bw = bwval; + return eval; + } + } + + return NULL; +} + + +struct ecommunity *ecommunity_replace_linkbw(as_t as, + struct ecommunity *ecom, + uint64_t cum_bw) +{ + struct ecommunity *new; + struct ecommunity_val lb_eval; + const uint8_t *eval; + uint8_t type; + uint32_t cur_bw; + + /* Nothing to replace if link-bandwidth doesn't exist or + * is non-transitive - just return existing extcommunity. + */ + new = ecom; + if (!ecom || !ecom->size) + return new; + + eval = ecommunity_linkbw_present(ecom, &cur_bw); + if (!eval) + return new; + + type = *eval; + if (type & ECOMMUNITY_FLAG_NON_TRANSITIVE) + return new; + + /* Transitive link-bandwidth exists, replace with the passed + * (cumulative) bandwidth value. We need to create a new + * extcommunity for this - refer to AS-Path replace function + * for reference. + */ + if (cum_bw > 0xFFFFFFFF) + cum_bw = 0xFFFFFFFF; + encode_lb_extcomm(as > BGP_AS_MAX ? BGP_AS_TRANS : as, cum_bw, + false, &lb_eval); + new = ecommunity_dup(ecom); + ecommunity_add_val(new, &lb_eval, true, true); + + return new; +} diff --git a/bgpd/bgp_ecommunity.h b/bgpd/bgp_ecommunity.h index ae64f41ca1..7deae8e746 100644 --- a/bgpd/bgp_ecommunity.h +++ b/bgpd/bgp_ecommunity.h @@ -24,21 +24,36 @@ #include "bgpd/bgp_route.h" #include "bgpd/bgpd.h" +/* Refer to rfc7153 for the IANA registry definitions. These are + * updated by other standards like rfc7674. + */ /* High-order octet of the Extended Communities type field. */ #define ECOMMUNITY_ENCODE_AS 0x00 #define ECOMMUNITY_ENCODE_IP 0x01 #define ECOMMUNITY_ENCODE_AS4 0x02 #define ECOMMUNITY_ENCODE_OPAQUE 0x03 #define ECOMMUNITY_ENCODE_EVPN 0x06 -#define ECOMMUNITY_ENCODE_TRANS_EXP 0x80 /* Flow Spec */ #define ECOMMUNITY_ENCODE_REDIRECT_IP_NH 0x08 /* Flow Spec */ +/* Generic Transitive Experimental */ +#define ECOMMUNITY_ENCODE_TRANS_EXP 0x80 + /* RFC7674 */ #define ECOMMUNITY_EXTENDED_COMMUNITY_PART_2 0x81 #define ECOMMUNITY_EXTENDED_COMMUNITY_PART_3 0x82 +/* Non-transitive extended community types. */ +#define ECOMMUNITY_ENCODE_AS_NON_TRANS 0x40 +#define ECOMMUNITY_ENCODE_IP_NON_TRANS 0x41 +#define ECOMMUNITY_ENCODE_AS4_NON_TRANS 0x42 +#define ECOMMUNITY_ENCODE_OPAQUE_NON_TRANS 0x43 + /* Low-order octet of the Extended Communities type field. */ +/* Note: This really depends on the high-order octet. This means that + * multiple definitions for the same value are possible. + */ #define ECOMMUNITY_ROUTE_TARGET 0x02 #define ECOMMUNITY_SITE_ORIGIN 0x03 +#define ECOMMUNITY_LINK_BANDWIDTH 0x04 #define ECOMMUNITY_TRAFFIC_RATE 0x06 /* Flow Spec */ #define ECOMMUNITY_TRAFFIC_ACTION 0x07 #define ECOMMUNITY_REDIRECT_VRF 0x08 @@ -150,6 +165,26 @@ static inline void encode_route_target_as4(as_t as, uint16_t val, eval->val[7] = val & 0xff; } +/* + * Encode BGP Link Bandwidth extended community + * bandwidth (bw) is in bytes-per-sec + */ +static inline void encode_lb_extcomm(as_t as, uint32_t bw, bool non_trans, + struct ecommunity_val *eval) +{ + memset(eval, 0, sizeof(*eval)); + eval->val[0] = ECOMMUNITY_ENCODE_AS; + if (non_trans) + eval->val[0] |= ECOMMUNITY_FLAG_NON_TRANSITIVE; + eval->val[1] = ECOMMUNITY_LINK_BANDWIDTH; + eval->val[2] = (as >> 8) & 0xff; + eval->val[3] = as & 0xff; + eval->val[4] = (bw >> 24) & 0xff; + eval->val[5] = (bw >> 16) & 0xff; + eval->val[6] = (bw >> 8) & 0xff; + eval->val[7] = bw & 0xff; +} + extern void ecommunity_init(void); extern void ecommunity_finish(void); extern void ecommunity_free(struct ecommunity **); @@ -165,22 +200,22 @@ extern unsigned int ecommunity_hash_make(const void *); extern struct ecommunity *ecommunity_str2com(const char *, int, int); extern char *ecommunity_ecom2str(struct ecommunity *, int, int); extern void ecommunity_strfree(char **s); -extern int ecommunity_match(const struct ecommunity *, - const struct ecommunity *); +extern bool ecommunity_match(const struct ecommunity *, + const struct ecommunity *); extern char *ecommunity_str(struct ecommunity *); extern struct ecommunity_val *ecommunity_lookup(const struct ecommunity *, uint8_t, uint8_t); -extern int ecommunity_add_val(struct ecommunity *ecom, - struct ecommunity_val *eval); +extern bool ecommunity_add_val(struct ecommunity *ecom, + struct ecommunity_val *eval, + bool unique, bool overwrite); /* for vpn */ extern struct ecommunity *ecommunity_new(void); -extern int ecommunity_add_val(struct ecommunity *, struct ecommunity_val *); -extern int ecommunity_strip(struct ecommunity *ecom, uint8_t type, - uint8_t subtype); +extern bool ecommunity_strip(struct ecommunity *ecom, uint8_t type, + uint8_t subtype); extern struct ecommunity *ecommunity_new(void); -extern int ecommunity_del_val(struct ecommunity *ecom, - struct ecommunity_val *eval); +extern bool ecommunity_del_val(struct ecommunity *ecom, + struct ecommunity_val *eval); struct bgp_pbr_entry_action; extern int ecommunity_fill_pbr_action(struct ecommunity_val *ecom_eval, struct bgp_pbr_entry_action *api); @@ -201,5 +236,17 @@ extern void bgp_remove_ecomm_from_aggregate_hash( struct bgp_aggregate *aggregate, struct ecommunity *ecommunity); extern void bgp_aggr_ecommunity_remove(void *arg); +extern const uint8_t *ecommunity_linkbw_present(struct ecommunity *ecom, + uint32_t *bw); +extern struct ecommunity *ecommunity_replace_linkbw(as_t as, + struct ecommunity *ecom, uint64_t cum_bw); +static inline void ecommunity_strip_rts(struct ecommunity *ecom) +{ + uint8_t subtype = ECOMMUNITY_ROUTE_TARGET; + + ecommunity_strip(ecom, ECOMMUNITY_ENCODE_AS, subtype); + ecommunity_strip(ecom, ECOMMUNITY_ENCODE_IP, subtype); + ecommunity_strip(ecom, ECOMMUNITY_ENCODE_AS4, subtype); +} #endif /* _QUAGGA_BGP_ECOMMUNITY_H */ diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c index 664d62fd11..fadccc5026 100644 --- a/bgpd/bgp_evpn.c +++ b/bgpd/bgp_evpn.c @@ -496,7 +496,7 @@ static void unmap_vni_from_rt(struct bgp *bgp, struct bgpevpn *vpn, } static void bgp_evpn_get_rmac_nexthop(struct bgpevpn *vpn, - struct prefix_evpn *p, + const struct prefix_evpn *p, struct attr *attr, uint8_t flags) { struct bgp *bgp_vrf = vpn->bgp_vrf; @@ -548,7 +548,7 @@ static void form_auto_rt(struct bgp *bgp, vni_t vni, struct list *rtl) encode_route_target_as((bgp->as & 0xFFFF), vni, &eval); ecomadd = ecommunity_new(); - ecommunity_add_val(ecomadd, &eval); + ecommunity_add_val(ecomadd, &eval, false, false); for (ALL_LIST_ELEMENTS_RO(rtl, node, ecom)) if (ecommunity_cmp(ecomadd, ecom)) ecom_found = true; @@ -583,7 +583,7 @@ static void evpn_convert_nexthop_to_ipv6(struct attr *attr) * Add (update) or delete MACIP from zebra. */ static int bgp_zebra_send_remote_macip(struct bgp *bgp, struct bgpevpn *vpn, - struct prefix_evpn *p, + const struct prefix_evpn *p, struct in_addr remote_vtep_ip, int add, uint8_t flags, uint32_t seq) { @@ -651,8 +651,8 @@ static int bgp_zebra_send_remote_macip(struct bgp *bgp, struct bgpevpn *vpn, * Add (update) or delete remote VTEP from zebra. */ static int bgp_zebra_send_remote_vtep(struct bgp *bgp, struct bgpevpn *vpn, - struct prefix_evpn *p, - int flood_control, int add) + const struct prefix_evpn *p, + int flood_control, int add) { struct stream *s; @@ -738,12 +738,12 @@ static void build_evpn_type5_route_extcomm(struct bgp *bgp_vrf, struct attr *attr) { struct ecommunity ecom_encap; - struct ecommunity ecom_rmac; struct ecommunity_val eval; struct ecommunity_val eval_rmac; bgp_encap_types tnl_type; struct listnode *node, *nnode; struct ecommunity *ecom; + struct ecommunity *old_ecom; struct list *vrf_export_rtl = NULL; /* Encap */ @@ -754,7 +754,14 @@ static void build_evpn_type5_route_extcomm(struct bgp *bgp_vrf, ecom_encap.val = (uint8_t *)eval.val; /* Add Encap */ - attr->ecommunity = ecommunity_dup(&ecom_encap); + if (attr->ecommunity) { + old_ecom = attr->ecommunity; + ecom = ecommunity_merge(ecommunity_dup(old_ecom), &ecom_encap); + if (!old_ecom->refcnt) + ecommunity_free(&old_ecom); + } else + ecom = ecommunity_dup(&ecom_encap); + attr->ecommunity = ecom; /* Add the export RTs for L3VNI/VRF */ vrf_export_rtl = bgp_vrf->vrf_export_rtl; @@ -764,12 +771,8 @@ static void build_evpn_type5_route_extcomm(struct bgp *bgp_vrf, /* add the router mac extended community */ if (!is_zero_mac(&attr->rmac)) { - memset(&ecom_rmac, 0, sizeof(ecom_rmac)); encode_rmac_extcomm(&eval_rmac, &attr->rmac); - ecom_rmac.size = 1; - ecom_rmac.val = (uint8_t *)eval_rmac.val; - attr->ecommunity = - ecommunity_merge(attr->ecommunity, &ecom_rmac); + ecommunity_add_val(attr->ecommunity, &eval_rmac, true, true); } attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES); @@ -791,7 +794,6 @@ static void build_evpn_route_extcomm(struct bgpevpn *vpn, struct attr *attr, struct ecommunity ecom_encap; struct ecommunity ecom_sticky; struct ecommunity ecom_default_gw; - struct ecommunity ecom_rmac; struct ecommunity ecom_na; struct ecommunity_val eval; struct ecommunity_val eval_sticky; @@ -845,12 +847,8 @@ static void build_evpn_route_extcomm(struct bgpevpn *vpn, struct attr *attr, /* Add RMAC, if told to. */ if (add_l3_ecomm) { - memset(&ecom_rmac, 0, sizeof(ecom_rmac)); encode_rmac_extcomm(&eval_rmac, &attr->rmac); - ecom_rmac.size = 1; - ecom_rmac.val = (uint8_t *)eval_rmac.val; - attr->ecommunity = - ecommunity_merge(attr->ecommunity, &ecom_rmac); + ecommunity_add_val(attr->ecommunity, &eval_rmac, true, true); } /* Add default gateway, if needed. */ @@ -903,8 +901,8 @@ static void add_mac_mobility_to_attr(uint32_t seq_num, struct attr *attr) if (type == ECOMMUNITY_ENCODE_EVPN && sub_type == ECOMMUNITY_EVPN_SUBTYPE_MACMOBILITY) { - ecom_val_ptr = (uint8_t *)(attr->ecommunity->val - + (i * 8)); + ecom_val_ptr = + (attr->ecommunity->val + (i * 8)); break; } } @@ -930,7 +928,8 @@ static void add_mac_mobility_to_attr(uint32_t seq_num, struct attr *attr) /* Install EVPN route into zebra. */ static int evpn_zebra_install(struct bgp *bgp, struct bgpevpn *vpn, - struct prefix_evpn *p, struct bgp_path_info *pi) + const struct prefix_evpn *p, + struct bgp_path_info *pi) { int ret; uint8_t flags; @@ -970,7 +969,7 @@ static int evpn_zebra_install(struct bgp *bgp, struct bgpevpn *vpn, /* Uninstall EVPN route from zebra. */ static int evpn_zebra_uninstall(struct bgp *bgp, struct bgpevpn *vpn, - struct prefix_evpn *p, + const struct prefix_evpn *p, struct in_addr remote_vtep_ip) { int ret; @@ -1004,7 +1003,7 @@ static void evpn_delete_old_local_route(struct bgp *bgp, struct bgpevpn *vpn, * L3VPN routes. */ global_rn = bgp_afi_node_lookup(bgp->rib[afi][safi], afi, safi, - (struct prefix *)&rn->p, &vpn->prd); + bgp_node_get_prefix(rn), &vpn->prd); if (global_rn) { /* Delete route entry in the global EVPN table. */ delete_evpn_route_entry(bgp, afi, safi, global_rn, &pi); @@ -1054,9 +1053,8 @@ static int is_vtep_present_in_list(struct list *list, * Best path for ES route was changed, * update the list of VTEPs for this ES */ -static int evpn_es_install_vtep(struct bgp *bgp, - struct evpnes *es, - struct prefix_evpn *p, +static int evpn_es_install_vtep(struct bgp *bgp, struct evpnes *es, + const struct prefix_evpn *p, struct in_addr rvtep) { struct in_addr *vtep_ip; @@ -1128,10 +1126,11 @@ static int evpn_es_route_select_install(struct bgp *bgp, && !bgp_addpath_is_addpath_used(&bgp->tx_addpath, afi, safi)) { if (bgp_zebra_has_route_changed(rn, old_select)) { ret = evpn_es_install_vtep(bgp, es, - (struct prefix_evpn *)&rn->p, + (const struct prefix_evpn *)bgp_node_get_prefix(rn), old_select->attr->nexthop); } UNSET_FLAG(old_select->flags, BGP_PATH_MULTIPATH_CHG); + UNSET_FLAG(old_select->flags, BGP_PATH_LINK_BW_CHG); bgp_zebra_clear_route_change_flags(rn); return ret; } @@ -1152,18 +1151,19 @@ static int evpn_es_route_select_install(struct bgp *bgp, bgp_path_info_set_flag(rn, new_select, BGP_PATH_SELECTED); bgp_path_info_unset_flag(rn, new_select, BGP_PATH_ATTR_CHANGED); UNSET_FLAG(new_select->flags, BGP_PATH_MULTIPATH_CHG); + UNSET_FLAG(new_select->flags, BGP_PATH_LINK_BW_CHG); } if (new_select && new_select->type == ZEBRA_ROUTE_BGP && new_select->sub_type == BGP_ROUTE_IMPORTED) { ret = evpn_es_install_vtep(bgp, es, - (struct prefix_evpn *)&rn->p, + (const struct prefix_evpn *)bgp_node_get_prefix(rn), new_select->attr->nexthop); } else { if (old_select && old_select->type == ZEBRA_ROUTE_BGP && old_select->sub_type == BGP_ROUTE_IMPORTED) ret = evpn_es_uninstall_vtep( - bgp, es, (struct prefix_evpn *)&rn->p, + bgp, es, (struct prefix_evpn *)bgp_node_get_prefix(rn), old_select->attr->nexthop); } @@ -1208,9 +1208,10 @@ static int evpn_route_select_install(struct bgp *bgp, struct bgpevpn *vpn, && !bgp_addpath_is_addpath_used(&bgp->tx_addpath, afi, safi)) { if (bgp_zebra_has_route_changed(rn, old_select)) ret = evpn_zebra_install( - bgp, vpn, (struct prefix_evpn *)&rn->p, + bgp, vpn, (const struct prefix_evpn *)bgp_node_get_prefix(rn), old_select); UNSET_FLAG(old_select->flags, BGP_PATH_MULTIPATH_CHG); + UNSET_FLAG(old_select->flags, BGP_PATH_LINK_BW_CHG); bgp_zebra_clear_route_change_flags(rn); return ret; } @@ -1230,12 +1231,14 @@ static int evpn_route_select_install(struct bgp *bgp, struct bgpevpn *vpn, bgp_path_info_set_flag(rn, new_select, BGP_PATH_SELECTED); bgp_path_info_unset_flag(rn, new_select, BGP_PATH_ATTR_CHANGED); UNSET_FLAG(new_select->flags, BGP_PATH_MULTIPATH_CHG); + UNSET_FLAG(new_select->flags, BGP_PATH_LINK_BW_CHG); } if (new_select && new_select->type == ZEBRA_ROUTE_BGP && new_select->sub_type == BGP_ROUTE_IMPORTED) { - ret = evpn_zebra_install(bgp, vpn, (struct prefix_evpn *)&rn->p, - new_select); + ret = evpn_zebra_install( + bgp, vpn, (struct prefix_evpn *)bgp_node_get_prefix(rn), + new_select); /* If an old best existed and it was a "local" route, the only * reason @@ -1251,9 +1254,11 @@ static int evpn_route_select_install(struct bgp *bgp, struct bgpevpn *vpn, } else { if (old_select && old_select->type == ZEBRA_ROUTE_BGP && old_select->sub_type == BGP_ROUTE_IMPORTED) - ret = evpn_zebra_uninstall(bgp, vpn, - (struct prefix_evpn *)&rn->p, - old_select->attr->nexthop); + ret = evpn_zebra_uninstall( + bgp, vpn, + (const struct prefix_evpn *)bgp_node_get_prefix( + rn), + old_select->attr->nexthop); } /* Clear any route change flags. */ @@ -1330,11 +1335,11 @@ static int update_evpn_type4_route_entry(struct bgp *bgp, struct evpnes *es, struct bgp_path_info *local_pi = NULL; /* local route entry if any */ struct bgp_path_info *remote_pi = NULL; /* remote route entry if any */ struct attr *attr_new = NULL; - struct prefix_evpn *evp = NULL; + const struct prefix_evpn *evp = NULL; *ri = NULL; *route_changed = 1; - evp = (struct prefix_evpn *)&rn->p; + evp = (const struct prefix_evpn *)bgp_node_get_prefix(rn); /* locate the local and remote entries if any */ for (tmp_pi = bgp_node_get_bgp_path_info(rn); tmp_pi; @@ -1662,10 +1667,10 @@ static int update_evpn_route_entry(struct bgp *bgp, struct bgpevpn *vpn, uint32_t num_labels = 1; int route_change = 1; uint8_t sticky = 0; - struct prefix_evpn *evp; + const struct prefix_evpn *evp; *pi = NULL; - evp = (struct prefix_evpn *)&rn->p; + evp = (const struct prefix_evpn *)bgp_node_get_prefix(rn); memset(&label, 0, sizeof(label)); /* See if this is an update of an existing route, or a new add. */ @@ -1797,8 +1802,8 @@ static void evpn_zebra_reinstall_best_route(struct bgp *bgp, if (curr_select && curr_select->type == ZEBRA_ROUTE_BGP && curr_select->sub_type == BGP_ROUTE_IMPORTED) evpn_zebra_install(bgp, vpn, - (struct prefix_evpn *)&rn->p, - curr_select); + (const struct prefix_evpn *)bgp_node_get_prefix(rn), + curr_select); } /* @@ -1820,13 +1825,10 @@ static void evpn_cleanup_local_non_best_route(struct bgp *bgp, struct bgp_node *rn, struct bgp_path_info *local_pi) { - char buf[PREFIX_STRLEN]; - /* local path was not picked as the winner; kick it out */ - if (bgp_debug_zebra(NULL)) { - zlog_debug("evicting local evpn prefix %s as remote won", - prefix2str(&rn->p, buf, sizeof(buf))); - } + if (bgp_debug_zebra(NULL)) + zlog_debug("evicting local evpn prefix %pRN as remote won", rn); + evpn_delete_old_local_route(bgp, vpn, rn, local_pi); bgp_path_info_reap(rn, local_pi); @@ -2145,7 +2147,7 @@ static int update_all_type2_routes(struct bgp *bgp, struct bgpevpn *vpn) */ for (rn = bgp_table_top(vpn->route_table); rn; rn = bgp_route_next(rn)) { - struct prefix_evpn *evp = (struct prefix_evpn *)&rn->p; + const struct prefix_evpn *evp = (const struct prefix_evpn *)bgp_node_get_prefix(rn); struct bgp_node *rd_rn; struct bgp_path_info *global_pi; @@ -2278,7 +2280,7 @@ static int delete_global_type2_routes(struct bgp *bgp, struct bgpevpn *vpn) if (rdrn && bgp_node_has_bgp_path_info_data(rdrn)) { table = bgp_node_get_bgp_table_info(rdrn); for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) { - struct prefix_evpn *evp = (struct prefix_evpn *)&rn->p; + const struct prefix_evpn *evp = (const struct prefix_evpn *)bgp_node_get_prefix(rn); if (evp->prefix.route_type != BGP_EVPN_MAC_IP_ROUTE) continue; @@ -2319,7 +2321,7 @@ static int delete_all_type2_routes(struct bgp *bgp, struct bgpevpn *vpn) /* Next, walk this VNI's route table and delete local type-2 routes. */ for (rn = bgp_table_top(vpn->route_table); rn; rn = bgp_route_next(rn)) { - struct prefix_evpn *evp = (struct prefix_evpn *)&rn->p; + const struct prefix_evpn *evp = (const struct prefix_evpn *)bgp_node_get_prefix(rn); if (evp->prefix.route_type != BGP_EVPN_MAC_IP_ROUTE) continue; @@ -2555,7 +2557,7 @@ bgp_create_evpn_bgp_path_info(struct bgp_path_info *parent_pi, /* Install EVPN route entry in ES */ static int install_evpn_route_entry_in_es(struct bgp *bgp, struct evpnes *es, - struct prefix_evpn *p, + const struct prefix_evpn *p, struct bgp_path_info *parent_pi) { int ret = 0; @@ -2619,7 +2621,7 @@ static int install_evpn_route_entry_in_es(struct bgp *bgp, struct evpnes *es, * Install route entry into the VRF routing table and invoke route selection. */ static int install_evpn_route_entry_in_vrf(struct bgp *bgp_vrf, - struct prefix_evpn *evp, + const struct prefix_evpn *evp, struct bgp_path_info *parent_pi) { struct bgp_node *rn; @@ -2709,7 +2711,8 @@ static int install_evpn_route_entry_in_vrf(struct bgp *bgp_vrf, /* as it is an importation, change nexthop */ bgp_path_info_set_flag(rn, pi, BGP_PATH_ANNC_NH_SELF); - bgp_aggregate_increment(bgp_vrf, &rn->p, pi, afi, safi); + bgp_aggregate_increment(bgp_vrf, bgp_node_get_prefix(rn), pi, afi, + safi); /* Perform route selection and update zebra, if required. */ bgp_process(bgp_vrf, rn, afi, safi); @@ -2726,7 +2729,7 @@ static int install_evpn_route_entry_in_vrf(struct bgp *bgp_vrf, * Install route entry into the VNI routing table and invoke route selection. */ static int install_evpn_route_entry(struct bgp *bgp, struct bgpevpn *vpn, - struct prefix_evpn *p, + const struct prefix_evpn *p, struct bgp_path_info *parent_pi) { struct bgp_node *rn; @@ -2782,7 +2785,7 @@ static int install_evpn_route_entry(struct bgp *bgp, struct bgpevpn *vpn, /* Uninstall EVPN route entry from ES route table */ static int uninstall_evpn_route_entry_in_es(struct bgp *bgp, struct evpnes *es, - struct prefix_evpn *p, + const struct prefix_evpn *p, struct bgp_path_info *parent_pi) { int ret; @@ -2825,7 +2828,7 @@ static int uninstall_evpn_route_entry_in_es(struct bgp *bgp, struct evpnes *es, * to zebra, if appropriate. */ static int uninstall_evpn_route_entry_in_vrf(struct bgp *bgp_vrf, - struct prefix_evpn *evp, + const struct prefix_evpn *evp, struct bgp_path_info *parent_pi) { struct bgp_node *rn; @@ -2876,7 +2879,8 @@ static int uninstall_evpn_route_entry_in_vrf(struct bgp *bgp_vrf, /* Process for route leaking. */ vpn_leak_from_vrf_withdraw(bgp_get_default(), bgp_vrf, pi); - bgp_aggregate_decrement(bgp_vrf, &rn->p, pi, afi, safi); + bgp_aggregate_decrement(bgp_vrf, bgp_node_get_prefix(rn), pi, afi, + safi); /* Mark entry for deletion */ bgp_path_info_delete(rn, pi); @@ -2895,7 +2899,7 @@ static int uninstall_evpn_route_entry_in_vrf(struct bgp *bgp_vrf, * to zebra, if appropriate. */ static int uninstall_evpn_route_entry(struct bgp *bgp, struct bgpevpn *vpn, - struct prefix_evpn *p, + const struct prefix_evpn *p, struct bgp_path_info *parent_pi) { struct bgp_node *rn; @@ -2932,7 +2936,7 @@ static int uninstall_evpn_route_entry(struct bgp *bgp, struct bgpevpn *vpn, /* * Given a prefix, see if it belongs to ES. */ -static int is_prefix_matching_for_es(struct prefix_evpn *p, +static int is_prefix_matching_for_es(const struct prefix_evpn *p, struct evpnes *es) { /* if not an ES route return false */ @@ -3107,7 +3111,9 @@ static int install_uninstall_routes_for_es(struct bgp *bgp, continue; for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) { - struct prefix_evpn *evp = (struct prefix_evpn *)&rn->p; + const struct prefix_evpn *evp = + (const struct prefix_evpn *)bgp_node_get_prefix( + rn); for (pi = bgp_node_get_bgp_path_info(rn); pi; pi = pi->next) { @@ -3153,7 +3159,7 @@ static int install_uninstall_routes_for_es(struct bgp *bgp, * route into bgp vrf table and remote rmac in bridge table. */ static int bgp_evpn_route_rmac_self_check(struct bgp *bgp_vrf, - struct prefix_evpn *evp, + const struct prefix_evpn *evp, struct bgp_path_info *pi) { /* evpn route could have learnt prior to L3vni has come up, @@ -3214,7 +3220,7 @@ static int install_uninstall_routes_for_vrf(struct bgp *bgp_vrf, int install) continue; for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) { - struct prefix_evpn *evp = (struct prefix_evpn *)&rn->p; + const struct prefix_evpn *evp = (const struct prefix_evpn *)bgp_node_get_prefix(rn); /* if not mac-ip route skip this route */ if (!(evp->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE @@ -3302,7 +3308,9 @@ static int install_uninstall_routes_for_vni(struct bgp *bgp, continue; for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) { - struct prefix_evpn *evp = (struct prefix_evpn *)&rn->p; + const struct prefix_evpn *evp = + (const struct prefix_evpn *)bgp_node_get_prefix( + rn); if (evp->prefix.route_type != rtype) continue; @@ -3524,7 +3532,7 @@ static int install_uninstall_route_in_vnis(struct bgp *bgp, afi_t afi, * Install or uninstall route for appropriate VNIs/ESIs. */ static int install_uninstall_evpn_route(struct bgp *bgp, afi_t afi, safi_t safi, - struct prefix *p, + const struct prefix *p, struct bgp_path_info *pi, int import) { struct prefix_evpn *evp = (struct prefix_evpn *)p; @@ -3774,7 +3782,8 @@ static int update_advertise_vni_routes(struct bgp *bgp, struct bgpevpn *vpn) */ for (rn = bgp_table_top(vpn->route_table); rn; rn = bgp_route_next(rn)) { - struct prefix_evpn *evp = (struct prefix_evpn *)&rn->p; + const struct prefix_evpn *evp = + (const struct prefix_evpn *)bgp_node_get_prefix(rn); /* Identify MAC-IP local routes. */ if (evp->prefix.route_type != BGP_EVPN_MAC_IP_ROUTE) @@ -4297,13 +4306,14 @@ static int process_type5_route(struct peer *peer, afi_t afi, safi_t safi, return ret; } -static void evpn_mpattr_encode_type5(struct stream *s, struct prefix *p, - struct prefix_rd *prd, mpls_label_t *label, - uint32_t num_labels, struct attr *attr) +static void evpn_mpattr_encode_type5(struct stream *s, const struct prefix *p, + const struct prefix_rd *prd, + mpls_label_t *label, uint32_t num_labels, + struct attr *attr) { int len; char temp[16]; - struct evpn_addr *p_evpn_p; + const struct evpn_addr *p_evpn_p; memset(&temp, 0, 16); if (p->family != AF_EVPN) @@ -4465,7 +4475,7 @@ static void update_autort_vni(struct hash_bucket *bucket, struct bgp *bgp) */ /* withdraw type-5 route corresponding to ip prefix */ -void bgp_evpn_withdraw_type5_route(struct bgp *bgp_vrf, struct prefix *p, +void bgp_evpn_withdraw_type5_route(struct bgp *bgp_vrf, const struct prefix *p, afi_t afi, safi_t safi) { int ret = 0; @@ -4499,8 +4509,9 @@ void bgp_evpn_withdraw_type5_routes(struct bgp *bgp_vrf, afi_t afi, safi_t safi) for (pi = bgp_node_get_bgp_path_info(rn); pi; pi = pi->next) { if (CHECK_FLAG(pi->flags, BGP_PATH_SELECTED) && is_route_injectable_into_evpn(pi)) { - bgp_evpn_withdraw_type5_route(bgp_vrf, &rn->p, - afi, safi); + bgp_evpn_withdraw_type5_route( + bgp_vrf, bgp_node_get_prefix(rn), afi, + safi); break; } } @@ -4535,7 +4546,7 @@ void bgp_evpn_install_uninstall_default_route(struct bgp *bgp_vrf, afi_t afi, * path in the case of the attr. In the case of a local prefix (when we * are advertising local subnets), the src_attr will be NULL. */ -void bgp_evpn_advertise_type5_route(struct bgp *bgp_vrf, struct prefix *p, +void bgp_evpn_advertise_type5_route(struct bgp *bgp_vrf, const struct prefix *p, struct attr *src_attr, afi_t afi, safi_t safi) { @@ -4576,16 +4587,37 @@ void bgp_evpn_advertise_type5_routes(struct bgp *bgp_vrf, afi_t afi, /* apply the route-map */ if (bgp_vrf->adv_cmd_rmap[afi][safi].map) { route_map_result_t ret; + struct bgp_path_info tmp_pi; + struct bgp_path_info_extra tmp_pie; + struct attr tmp_attr; + + tmp_attr = *pi->attr; + + /* Fill temp path_info */ + prep_for_rmap_apply( + &tmp_pi, &tmp_pie, rn, pi, + pi->peer, &tmp_attr); + + RESET_FLAG(tmp_attr.rmap_change_flags); ret = route_map_apply( bgp_vrf->adv_cmd_rmap[afi][safi] .map, - &rn->p, RMAP_BGP, pi); - if (ret == RMAP_DENYMATCH) + bgp_node_get_prefix(rn), + RMAP_BGP, &tmp_pi); + if (ret == RMAP_DENYMATCH) { + bgp_attr_flush(&tmp_attr); continue; - } - bgp_evpn_advertise_type5_route( - bgp_vrf, &rn->p, pi->attr, afi, safi); + } + bgp_evpn_advertise_type5_route( + bgp_vrf, + bgp_node_get_prefix(rn), + &tmp_attr, afi, safi); + } else + bgp_evpn_advertise_type5_route( + bgp_vrf, + bgp_node_get_prefix(rn), + pi->attr, afi, safi); break; } } @@ -4603,7 +4635,7 @@ void evpn_rt_delete_auto(struct bgp *bgp, vni_t vni, struct list *rtl) encode_route_target_as((bgp->as & 0xFFFF), vni, &eval); ecom_auto = ecommunity_new(); - ecommunity_add_val(ecom_auto, &eval); + ecommunity_add_val(ecom_auto, &eval, false, false); node_to_del = NULL; for (ALL_LIST_ELEMENTS(rtl, node, nnode, ecom)) { @@ -4882,7 +4914,7 @@ char *bgp_evpn_label2str(mpls_label_t *label, uint32_t num_labels, char *buf, * Function to convert evpn route to json format. * NOTE: We don't use prefix2str as the output here is a bit different. */ -void bgp_evpn_route2json(struct prefix_evpn *p, json_object *json) +void bgp_evpn_route2json(const struct prefix_evpn *p, json_object *json) { char buf1[ETHER_ADDR_STRLEN]; char buf2[PREFIX2STR_BUFFER]; @@ -4947,7 +4979,7 @@ void bgp_evpn_route2json(struct prefix_evpn *p, json_object *json) * Function to convert evpn route to string. * NOTE: We don't use prefix2str as the output here is a bit different. */ -char *bgp_evpn_route2str(struct prefix_evpn *p, char *buf, int len) +char *bgp_evpn_route2str(const struct prefix_evpn *p, char *buf, int len) { char buf1[ETHER_ADDR_STRLEN]; char buf2[PREFIX2STR_BUFFER]; @@ -5012,8 +5044,8 @@ char *bgp_evpn_route2str(struct prefix_evpn *p, char *buf, int len) /* * Encode EVPN prefix in Update (MP_REACH) */ -void bgp_evpn_encode_prefix(struct stream *s, struct prefix *p, - struct prefix_rd *prd, mpls_label_t *label, +void bgp_evpn_encode_prefix(struct stream *s, const struct prefix *p, + const struct prefix_rd *prd, mpls_label_t *label, uint32_t num_labels, struct attr *attr, int addpath_encode, uint32_t addpath_tx_id) { @@ -5532,7 +5564,7 @@ void bgp_evpn_es_free(struct bgp *bgp, struct evpnes *es) * Import evpn route from global table to VNI/VRF/ESI. */ int bgp_evpn_import_route(struct bgp *bgp, afi_t afi, safi_t safi, - struct prefix *p, struct bgp_path_info *pi) + const struct prefix *p, struct bgp_path_info *pi) { return install_uninstall_evpn_route(bgp, afi, safi, p, pi, 1); } @@ -5541,7 +5573,7 @@ int bgp_evpn_import_route(struct bgp *bgp, afi_t afi, safi_t safi, * Unimport evpn route from VNI/VRF/ESI. */ int bgp_evpn_unimport_route(struct bgp *bgp, afi_t afi, safi_t safi, - struct prefix *p, struct bgp_path_info *pi) + const struct prefix *p, struct bgp_path_info *pi) { return install_uninstall_evpn_route(bgp, afi, safi, p, pi, 0); } @@ -5583,25 +5615,23 @@ int bgp_filter_evpn_routes_upon_martian_nh_change(struct bgp *bgp) if (bgp_nexthop_self(bgp, afi, pi->type, pi->sub_type, pi->attr, rn)) { + const struct prefix *p = + bgp_node_get_prefix(rn); - char attr_str[BUFSIZ] = {0}; - char pbuf[PREFIX_STRLEN]; + if (bgp_debug_update(pi->peer, p, NULL, + 1)) { + char attr_str[BUFSIZ] = {0}; - bgp_dump_attr(pi->attr, attr_str, - BUFSIZ); + bgp_dump_attr(pi->attr, + attr_str, BUFSIZ); - if (bgp_debug_update(pi->peer, &rn->p, - NULL, 1)) zlog_debug( - "%u: prefix %s with attr %s - DENIED due to martian or self nexthop", - bgp->vrf_id, - prefix2str( - &rn->p, pbuf, - sizeof(pbuf)), + "%u: prefix %pRN with attr %s - DENIED due to martian or self nexthop", + bgp->vrf_id, rn, attr_str); - + } bgp_evpn_unimport_route(bgp, afi, safi, - &rn->p, pi); + p, pi); bgp_rib_remove(rn, pi, pi->peer, afi, safi); @@ -6246,7 +6276,7 @@ void bgp_evpn_vrf_delete(struct bgp *bgp_vrf) /* * Get the prefixlen of the ip prefix carried within the type5 evpn route. */ -int bgp_evpn_get_type5_prefixlen(struct prefix *pfx) +int bgp_evpn_get_type5_prefixlen(const struct prefix *pfx) { struct prefix_evpn *evp = (struct prefix_evpn *)pfx; @@ -6262,7 +6292,7 @@ int bgp_evpn_get_type5_prefixlen(struct prefix *pfx) /* * Should we register nexthop for this EVPN prefix for nexthop tracking? */ -bool bgp_evpn_is_prefix_nht_supported(struct prefix *pfx) +bool bgp_evpn_is_prefix_nht_supported(const struct prefix *pfx) { struct prefix_evpn *evp = (struct prefix_evpn *)pfx; diff --git a/bgpd/bgp_evpn.h b/bgpd/bgp_evpn.h index b030f0a33e..a48a707b94 100644 --- a/bgpd/bgp_evpn.h +++ b/bgpd/bgp_evpn.h @@ -140,11 +140,12 @@ static inline bool is_route_injectable_into_evpn(struct bgp_path_info *pi) } extern void bgp_evpn_advertise_type5_route(struct bgp *bgp_vrf, - struct prefix *p, + const struct prefix *p, struct attr *src_attr, afi_t afi, safi_t safi); -extern void bgp_evpn_withdraw_type5_route(struct bgp *bgp_vrf, struct prefix *p, - afi_t afi, safi_t safi); +extern void bgp_evpn_withdraw_type5_route(struct bgp *bgp_vrf, + const struct prefix *p, afi_t afi, + safi_t safi); extern void bgp_evpn_withdraw_type5_routes(struct bgp *bgp_vrf, afi_t afi, safi_t safi); extern void bgp_evpn_advertise_type5_routes(struct bgp *bgp_vrf, afi_t afi, @@ -153,18 +154,22 @@ extern void bgp_evpn_vrf_delete(struct bgp *bgp_vrf); extern void bgp_evpn_handle_router_id_update(struct bgp *bgp, int withdraw); extern char *bgp_evpn_label2str(mpls_label_t *label, uint32_t num_labels, char *buf, int len); -extern char *bgp_evpn_route2str(struct prefix_evpn *p, char *buf, int len); -extern void bgp_evpn_route2json(struct prefix_evpn *p, json_object *json); -extern void bgp_evpn_encode_prefix(struct stream *s, struct prefix *p, - struct prefix_rd *prd, mpls_label_t *label, - uint32_t num_labels, struct attr *attr, - int addpath_encode, uint32_t addpath_tx_id); +extern char *bgp_evpn_route2str(const struct prefix_evpn *p, char *buf, + int len); +extern void bgp_evpn_route2json(const struct prefix_evpn *p, json_object *json); +extern void bgp_evpn_encode_prefix(struct stream *s, const struct prefix *p, + const struct prefix_rd *prd, + mpls_label_t *label, uint32_t num_labels, + struct attr *attr, int addpath_encode, + uint32_t addpath_tx_id); extern int bgp_nlri_parse_evpn(struct peer *peer, struct attr *attr, struct bgp_nlri *packet, int withdraw); extern int bgp_evpn_import_route(struct bgp *bgp, afi_t afi, safi_t safi, - struct prefix *p, struct bgp_path_info *ri); + const struct prefix *p, + struct bgp_path_info *ri); extern int bgp_evpn_unimport_route(struct bgp *bgp, afi_t afi, safi_t safi, - struct prefix *p, struct bgp_path_info *ri); + const struct prefix *p, + struct bgp_path_info *ri); extern int bgp_filter_evpn_routes_upon_martian_nh_change(struct bgp *bgp); extern int bgp_evpn_local_macip_del(struct bgp *bgp, vni_t vni, struct ethaddr *mac, struct ipaddr *ip, @@ -191,8 +196,8 @@ extern void bgp_evpn_flood_control_change(struct bgp *bgp); extern void bgp_evpn_cleanup_on_disable(struct bgp *bgp); extern void bgp_evpn_cleanup(struct bgp *bgp); extern void bgp_evpn_init(struct bgp *bgp); -extern int bgp_evpn_get_type5_prefixlen(struct prefix *pfx); -extern bool bgp_evpn_is_prefix_nht_supported(struct prefix *pfx); +extern int bgp_evpn_get_type5_prefixlen(const struct prefix *pfx); +extern bool bgp_evpn_is_prefix_nht_supported(const struct prefix *pfx); extern void update_advertise_vrf_routes(struct bgp *bgp_vrf); #endif /* _QUAGGA_BGP_EVPN_H */ diff --git a/bgpd/bgp_evpn_private.h b/bgpd/bgp_evpn_private.h index 76cf8b2cd6..ea1ae087f1 100644 --- a/bgpd/bgp_evpn_private.h +++ b/bgpd/bgp_evpn_private.h @@ -366,7 +366,7 @@ static inline void encode_na_flag_extcomm(struct ecommunity_val *eval, eval->val[2] |= ECOMMUNITY_EVPN_SUBTYPE_ND_ROUTER_FLAG; } -static inline void ip_prefix_from_type5_prefix(struct prefix_evpn *evp, +static inline void ip_prefix_from_type5_prefix(const struct prefix_evpn *evp, struct prefix *ip) { memset(ip, 0, sizeof(struct prefix)); @@ -392,7 +392,7 @@ static inline int is_evpn_prefix_default(const struct prefix *evp) 1 : 0); } -static inline void ip_prefix_from_type2_prefix(struct prefix_evpn *evp, +static inline void ip_prefix_from_type2_prefix(const struct prefix_evpn *evp, struct prefix *ip) { memset(ip, 0, sizeof(struct prefix)); @@ -409,7 +409,7 @@ static inline void ip_prefix_from_type2_prefix(struct prefix_evpn *evp, } } -static inline void ip_prefix_from_evpn_prefix(struct prefix_evpn *evp, +static inline void ip_prefix_from_evpn_prefix(const struct prefix_evpn *evp, struct prefix *ip) { if (evp->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE) @@ -432,8 +432,9 @@ static inline void build_evpn_type2_prefix(struct prefix_evpn *p, memcpy(&p->prefix.macip_addr.ip, ip, sizeof(*ip)); } -static inline void build_type5_prefix_from_ip_prefix(struct prefix_evpn *evp, - struct prefix *ip_prefix) +static inline void +build_type5_prefix_from_ip_prefix(struct prefix_evpn *evp, + const struct prefix *ip_prefix) { struct ipaddr ip; diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c index 7ed37319b1..769872f2e3 100644 --- a/bgpd/bgp_evpn_vty.c +++ b/bgpd/bgp_evpn_vty.c @@ -59,7 +59,7 @@ struct vni_walk_ctx { static void display_vrf_import_rt(struct vty *vty, struct vrf_irt_node *irt, json_object *json) { - uint8_t *pnt; + const uint8_t *pnt; uint8_t type, sub_type; struct ecommunity_as eas; struct ecommunity_ip eip; @@ -167,7 +167,7 @@ static void show_vrf_import_rt_entry(struct hash_bucket *bucket, void *args[]) static void display_import_rt(struct vty *vty, struct irt_node *irt, json_object *json) { - uint8_t *pnt; + const uint8_t *pnt; uint8_t type, sub_type; struct ecommunity_as eas; struct ecommunity_ip eip; @@ -281,9 +281,10 @@ static void bgp_evpn_show_route_rd_header(struct vty *vty, uint16_t type; struct rd_as rd_as; struct rd_ip rd_ip; - uint8_t *pnt; + const uint8_t *pnt; + const struct prefix *p = bgp_node_get_prefix(rd_rn); - pnt = rd_rn->p.u.val; + pnt = p->u.val; /* Decode RD type. */ type = decode_rd_type(pnt); @@ -647,8 +648,9 @@ static void show_esi_routes(struct bgp *bgp, char prefix_str[BUFSIZ]; json_object *json_paths = NULL; json_object *json_prefix = NULL; + const struct prefix *p = bgp_node_get_prefix(rn); - bgp_evpn_route2str((struct prefix_evpn *)&rn->p, prefix_str, + bgp_evpn_route2str((struct prefix_evpn *)p, prefix_str, sizeof(prefix_str)); if (json) @@ -678,7 +680,7 @@ static void show_esi_routes(struct bgp *bgp, if (json) json_path = json_object_new_array(); - route_vty_out(vty, &rn->p, pi, 0, SAFI_EVPN, json_path); + route_vty_out(vty, p, pi, 0, SAFI_EVPN, json_path); if (json) json_object_array_add(json_paths, json_path); @@ -692,7 +694,7 @@ static void show_esi_routes(struct bgp *bgp, json_object_string_add(json_prefix, "prefix", prefix_str); json_object_int_add(json_prefix, "prefixLen", - rn->p.prefixlen); + p->prefixlen); json_object_object_add(json_prefix, "paths", json_paths); json_object_object_add(json, prefix_str, @@ -735,13 +737,15 @@ static void show_vni_routes(struct bgp *bgp, struct bgpevpn *vpn, int type, tbl_ver = table->version; for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) { - struct prefix_evpn *evp = (struct prefix_evpn *)&rn->p; + const struct prefix_evpn *evp = + (const struct prefix_evpn *)bgp_node_get_prefix(rn); int add_prefix_to_json = 0; char prefix_str[BUFSIZ]; json_object *json_paths = NULL; json_object *json_prefix = NULL; + const struct prefix *p = bgp_node_get_prefix(rn); - bgp_evpn_route2str((struct prefix_evpn *)&rn->p, prefix_str, + bgp_evpn_route2str((const struct prefix_evpn *)p, prefix_str, sizeof(prefix_str)); if (type && evp->prefix.route_type != type) @@ -784,7 +788,7 @@ static void show_vni_routes(struct bgp *bgp, struct bgpevpn *vpn, int type, AFI_L2VPN, SAFI_EVPN, json_path); else - route_vty_out(vty, &rn->p, pi, 0, SAFI_EVPN, + route_vty_out(vty, p, pi, 0, SAFI_EVPN, json_path); if (json) @@ -799,7 +803,7 @@ static void show_vni_routes(struct bgp *bgp, struct bgpevpn *vpn, int type, json_object_string_add(json_prefix, "prefix", prefix_str); json_object_int_add(json_prefix, "prefixLen", - rn->p.prefixlen); + p->prefixlen); json_object_object_add(json_prefix, "paths", json_paths); json_object_object_add(json, prefix_str, @@ -1188,8 +1192,9 @@ static int bgp_show_ethernet_vpn(struct vty *vty, struct prefix_rd *prd, rn = bgp_route_next(rn)) { uint64_t tbl_ver; json_object *json_nroute = NULL; + const struct prefix *p = bgp_node_get_prefix(rn); - if (prd && memcmp(rn->p.u.val, prd->val, 8) != 0) + if (prd && memcmp(p->u.val, prd->val, 8) != 0) continue; table = bgp_node_get_bgp_table_info(rn); @@ -1290,16 +1295,18 @@ static int bgp_show_ethernet_vpn(struct vty *vty, struct prefix_rd *prd, json_array = json_object_new_array(); if (option == SHOW_DISPLAY_TAGS) - route_vty_out_tag(vty, &rm->p, pi, - no_display, SAFI_EVPN, - json_array); + route_vty_out_tag( + vty, bgp_node_get_prefix(rm), + pi, no_display, SAFI_EVPN, + json_array); else if (option == SHOW_DISPLAY_OVERLAY) - route_vty_out_overlay(vty, &rm->p, pi, - no_display, - json_array); + route_vty_out_overlay( + vty, bgp_node_get_prefix(rm), + pi, no_display, json_array); else - route_vty_out(vty, &rm->p, pi, - no_display, SAFI_EVPN, + route_vty_out(vty, + bgp_node_get_prefix(rm), + pi, no_display, SAFI_EVPN, json_array); no_display = 1; } @@ -1308,15 +1315,19 @@ static int bgp_show_ethernet_vpn(struct vty *vty, struct prefix_rd *prd, output_count++; if (use_json && json_array) { + const struct prefix *p = + bgp_node_get_prefix(rm); + json_prefix_info = json_object_new_object(); - json_object_string_add(json_prefix_info, - "prefix", bgp_evpn_route2str( - (struct prefix_evpn *)&rm->p, buf, - BUFSIZ)); + json_object_string_add( + json_prefix_info, "prefix", + bgp_evpn_route2str( + (struct prefix_evpn *)p, buf, + BUFSIZ)); json_object_int_add(json_prefix_info, - "prefixLen", rm->p.prefixlen); + "prefixLen", p->prefixlen); json_object_object_add(json_prefix_info, "paths", json_array); @@ -2210,13 +2221,13 @@ static struct bgpevpn *evpn_create_update_vni(struct bgp *bgp, vni_t vni) * appropriate action) and the VNI marked as unconfigured; the * VNI will continue to exist, purely as a "learnt" entity. */ -static int evpn_delete_vni(struct bgp *bgp, struct bgpevpn *vpn) +static void evpn_delete_vni(struct bgp *bgp, struct bgpevpn *vpn) { assert(bgp->vnihash); if (!is_vni_live(vpn)) { bgp_evpn_free(bgp, vpn); - return 0; + return; } /* We need to take the unconfigure action for each parameter of this VNI @@ -2234,8 +2245,6 @@ static int evpn_delete_vni(struct bgp *bgp, struct bgpevpn *vpn) /* Next, deal with the import side. */ if (is_import_rt_configured(vpn)) evpn_unconfigure_import_rt(bgp, vpn, NULL); - - return 0; } /* @@ -2506,8 +2515,7 @@ static void evpn_show_route_rd_macip(struct vty *vty, struct bgp *bgp, return; } - bgp_evpn_route2str((struct prefix_evpn *)&p, prefix_str, - sizeof(prefix_str)); + bgp_evpn_route2str(&p, prefix_str, sizeof(prefix_str)); /* Prefix and num paths displayed once per prefix. */ route_vty_out_detail_header(vty, bgp, rn, prd, afi, safi, json); @@ -2565,7 +2573,7 @@ static void evpn_show_route_rd(struct vty *vty, struct bgp *bgp, safi = SAFI_EVPN; prefix_cnt = path_cnt = 0; - prefix_rd2str((struct prefix_rd *)prd, rd_str, sizeof(rd_str)); + prefix_rd2str(prd, rd_str, sizeof(rd_str)); rd_rn = bgp_node_lookup(bgp->rib[afi][safi], (struct prefix *)prd); if (!rd_rn) @@ -2582,13 +2590,14 @@ static void evpn_show_route_rd(struct vty *vty, struct bgp *bgp, /* Display all prefixes with this RD. */ for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) { - struct prefix_evpn *evp = (struct prefix_evpn *)&rn->p; + const struct prefix_evpn *evp = + (const struct prefix_evpn *)bgp_node_get_prefix(rn); json_object *json_prefix = NULL; json_object *json_paths = NULL; char prefix_str[BUFSIZ]; int add_prefix_to_json = 0; - bgp_evpn_route2str((struct prefix_evpn *)&rn->p, prefix_str, + bgp_evpn_route2str((struct prefix_evpn *)evp, prefix_str, sizeof(prefix_str)); if (type && evp->prefix.route_type != type) @@ -2705,13 +2714,14 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type, json_object *json_rd = NULL; /* contains routes for an RD */ int add_rd_to_json = 0; uint64_t tbl_ver; + const struct prefix *rd_rnp = bgp_node_get_prefix(rd_rn); table = bgp_node_get_bgp_table_info(rd_rn); if (table == NULL) continue; tbl_ver = table->version; - prefix_rd2str((struct prefix_rd *)&rd_rn->p, rd_str, + prefix_rd2str((struct prefix_rd *)rd_rnp, rd_str, sizeof(rd_str)); if (json) @@ -2725,12 +2735,15 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type, NULL; /* contains prefix under a RD */ json_object *json_paths = NULL; /* array of paths under a prefix*/ - struct prefix_evpn *evp = (struct prefix_evpn *)&rn->p; + const struct prefix_evpn *evp = + (const struct prefix_evpn *)bgp_node_get_prefix( + rn); char prefix_str[BUFSIZ]; int add_prefix_to_json = 0; + const struct prefix *p = bgp_node_get_prefix(rn); - bgp_evpn_route2str((struct prefix_evpn *)&rn->p, - prefix_str, sizeof(prefix_str)); + bgp_evpn_route2str((struct prefix_evpn *)p, prefix_str, + sizeof(prefix_str)); if (type && evp->prefix.route_type != type) continue; @@ -2766,15 +2779,15 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type, json_object_string_add(json_prefix, "prefix", prefix_str); json_object_int_add(json_prefix, "prefixLen", - rn->p.prefixlen); + p->prefixlen); } /* Prefix and num paths displayed once per prefix. */ if (detail) route_vty_out_detail_header( vty, bgp, rn, - (struct prefix_rd *)&rd_rn->p, - AFI_L2VPN, SAFI_EVPN, json_prefix); + (struct prefix_rd *)rd_rnp, AFI_L2VPN, + SAFI_EVPN, json_prefix); /* For EVPN, the prefix is displayed for each path (to * fit in @@ -2794,8 +2807,8 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type, vty, bgp, rn, pi, AFI_L2VPN, SAFI_EVPN, json_path); } else - route_vty_out(vty, &rn->p, pi, 0, - SAFI_EVPN, json_path); + route_vty_out(vty, p, pi, 0, SAFI_EVPN, + json_path); if (json) json_object_array_add(json_paths, diff --git a/bgpd/bgp_filter.c b/bgpd/bgp_filter.c index a03551e79d..7de8dc2c80 100644 --- a/bgpd/bgp_filter.c +++ b/bgpd/bgp_filter.c @@ -302,12 +302,9 @@ static void as_list_delete(struct as_list *aslist) as_list_free(aslist); } -static int as_list_empty(struct as_list *aslist) +static bool as_list_empty(struct as_list *aslist) { - if (aslist->head == NULL && aslist->tail == NULL) - return 1; - else - return 0; + return aslist->head == NULL && aslist->tail == NULL; } static void as_list_filter_delete(struct as_list *aslist, @@ -337,11 +334,9 @@ static void as_list_filter_delete(struct as_list *aslist, XFREE(MTYPE_AS_STR, name); } -static int as_filter_match(struct as_filter *asfilter, struct aspath *aspath) +static bool as_filter_match(struct as_filter *asfilter, struct aspath *aspath) { - if (bgp_regexec(asfilter->reg, aspath) != REG_NOMATCH) - return 1; - return 0; + return bgp_regexec(asfilter->reg, aspath) != REG_NOMATCH; } /* Apply AS path filter to AS. */ @@ -374,26 +369,25 @@ void as_list_delete_hook(void (*func)(const char *)) as_list_master.delete_hook = func; } -static int as_list_dup_check(struct as_list *aslist, struct as_filter *new) +static bool as_list_dup_check(struct as_list *aslist, struct as_filter *new) { struct as_filter *asfilter; for (asfilter = aslist->head; asfilter; asfilter = asfilter->next) { if (asfilter->type == new->type && strcmp(asfilter->reg_str, new->reg_str) == 0) - return 1; + return true; } - return 0; + return false; } -int config_bgp_aspath_validate(const char *regstr) +bool config_bgp_aspath_validate(const char *regstr) { char valid_chars[] = "1234567890_^|[,{}() ]$*+.?-\\"; if (strspn(regstr, valid_chars) == strlen(regstr)) - return 1; - - return 0; + return true; + return false; } DEFUN(as_path, bgp_as_path_cmd, diff --git a/bgpd/bgp_filter.h b/bgpd/bgp_filter.h index 3c49e357ff..9357a2d382 100644 --- a/bgpd/bgp_filter.h +++ b/bgpd/bgp_filter.h @@ -31,6 +31,6 @@ extern enum as_filter_type as_list_apply(struct as_list *, void *); extern struct as_list *as_list_lookup(const char *); extern void as_list_add_hook(void (*func)(char *)); extern void as_list_delete_hook(void (*func)(const char *)); -extern int config_bgp_aspath_validate(const char *regstr); +extern bool config_bgp_aspath_validate(const char *regstr); #endif /* _QUAGGA_BGP_FILTER_H */ diff --git a/bgpd/bgp_flowspec.h b/bgpd/bgp_flowspec.h index bc201b739f..94c571f2fc 100644 --- a/bgpd/bgp_flowspec.h +++ b/bgpd/bgp_flowspec.h @@ -43,7 +43,7 @@ extern void bgp_fs_nlri_get_string(unsigned char *nlri_content, size_t len, char *return_string, int format, json_object *json_path); -extern void route_vty_out_flowspec(struct vty *vty, struct prefix *p, +extern void route_vty_out_flowspec(struct vty *vty, const struct prefix *p, struct bgp_path_info *path, int display, json_object *json_paths); extern int bgp_fs_config_write_pbr(struct vty *vty, struct bgp *bgp, diff --git a/bgpd/bgp_flowspec_util.c b/bgpd/bgp_flowspec_util.c index 002aae561a..9d824a8641 100644 --- a/bgpd/bgp_flowspec_util.c +++ b/bgpd/bgp_flowspec_util.c @@ -76,9 +76,8 @@ static int bgp_flowspec_call_non_opaque_decode(uint8_t *nlri_content, int len, return ret; } -bool bgp_flowspec_contains_prefix(struct prefix *pfs, - struct prefix *input, - int prefix_check) +bool bgp_flowspec_contains_prefix(const struct prefix *pfs, + struct prefix *input, int prefix_check) { uint32_t offset = 0; int type; @@ -599,8 +598,8 @@ int bgp_flowspec_match_rules_fill(uint8_t *nlri_content, int len, } /* return 1 if FS entry invalid or no NH IP */ -int bgp_flowspec_get_first_nh(struct bgp *bgp, struct bgp_path_info *pi, - struct prefix *p) +bool bgp_flowspec_get_first_nh(struct bgp *bgp, struct bgp_path_info *pi, + struct prefix *p) { struct bgp_pbr_entry_main api; int i; @@ -608,8 +607,9 @@ int bgp_flowspec_get_first_nh(struct bgp *bgp, struct bgp_path_info *pi, struct bgp_pbr_entry_action *api_action; memset(&api, 0, sizeof(struct bgp_pbr_entry_main)); - if (bgp_pbr_build_and_validate_entry(&rn->p, pi, &api) < 0) - return 1; + if (bgp_pbr_build_and_validate_entry(bgp_node_get_prefix(rn), pi, &api) + < 0) + return true; for (i = 0; i < api.action_num; i++) { api_action = &api.actions[i]; if (api_action->action != ACTION_REDIRECT_IP) @@ -617,7 +617,7 @@ int bgp_flowspec_get_first_nh(struct bgp *bgp, struct bgp_path_info *pi, p->family = AF_INET; p->prefixlen = IPV4_MAX_BITLEN; p->u.prefix4 = api_action->u.zr.redirect_ip_v4; - return 0; + return false; } - return 1; + return true; } diff --git a/bgpd/bgp_flowspec_util.h b/bgpd/bgp_flowspec_util.h index 2ce911da4e..0e78c7a53c 100644 --- a/bgpd/bgp_flowspec_util.h +++ b/bgpd/bgp_flowspec_util.h @@ -50,12 +50,11 @@ struct bgp_pbr_entry_main; extern int bgp_flowspec_match_rules_fill(uint8_t *nlri_content, int len, struct bgp_pbr_entry_main *bpem); -extern bool bgp_flowspec_contains_prefix(struct prefix *pfs, +extern bool bgp_flowspec_contains_prefix(const struct prefix *pfs, struct prefix *input, int prefix_check); -extern int bgp_flowspec_get_first_nh(struct bgp *bgp, - struct bgp_path_info *pi, - struct prefix *nh); +extern bool bgp_flowspec_get_first_nh(struct bgp *bgp, struct bgp_path_info *pi, + struct prefix *nh); #endif /* _FRR_BGP_FLOWSPEC_UTIL_H */ diff --git a/bgpd/bgp_flowspec_vty.c b/bgpd/bgp_flowspec_vty.c index 80384c12c6..c852e18c46 100644 --- a/bgpd/bgp_flowspec_vty.c +++ b/bgpd/bgp_flowspec_vty.c @@ -252,7 +252,7 @@ void bgp_fs_nlri_get_string(unsigned char *nlri_content, size_t len, } } -void route_vty_out_flowspec(struct vty *vty, struct prefix *p, +void route_vty_out_flowspec(struct vty *vty, const struct prefix *p, struct bgp_path_info *path, int display, json_object *json_paths) { @@ -409,8 +409,8 @@ int bgp_show_table_flowspec(struct vty *vty, struct bgp *bgp, afi_t afi, } for (; pi; pi = pi->next) { total_count++; - route_vty_out_flowspec(vty, &rn->p, pi, display, - json_paths); + route_vty_out_flowspec(vty, bgp_node_get_prefix(rn), pi, + display, json_paths); } if (use_json) { vty_out(vty, "%s\n", @@ -554,18 +554,18 @@ extern int bgp_flowspec_display_match_per_ip(afi_t afi, struct bgp_table *rib, json_object *json_paths) { struct bgp_node *rn; - struct prefix *prefix; + const struct prefix *prefix; int display = 0; for (rn = bgp_table_top(rib); rn; rn = bgp_route_next(rn)) { - prefix = &rn->p; + prefix = bgp_node_get_prefix(rn); if (prefix->family != AF_FLOWSPEC) continue; if (bgp_flowspec_contains_prefix(prefix, match, prefix_check)) { route_vty_out_flowspec( - vty, &rn->p, bgp_node_get_bgp_path_info(rn), + vty, prefix, bgp_node_get_bgp_path_info(rn), use_json ? NLRI_STRING_FORMAT_JSON : NLRI_STRING_FORMAT_LARGE, json_paths); diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index 71e2b02602..fdffe374c0 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -130,8 +130,8 @@ static struct peer *peer_xfer_conn(struct peer *from_peer) afi_t afi; safi_t safi; int fd; - int status, pstatus; - unsigned char last_evt, last_maj_evt; + enum bgp_fsm_status status, pstatus; + enum bgp_fsm_events last_evt, last_maj_evt; assert(from_peer != NULL); @@ -456,6 +456,10 @@ void bgp_timer_set(struct peer *peer) bgp_keepalives_off(peer); BGP_TIMER_OFF(peer->t_routeadv); break; + case BGP_STATUS_MAX: + flog_err(EC_LIB_DEVELOPMENT, + "BGP_STATUS_MAX while a legal state is not valid state for the FSM"); + break; } } @@ -660,32 +664,29 @@ static int bgp_graceful_deferral_timer_expire(struct thread *thread) return bgp_best_path_select_defer(bgp, afi, safi); } -static int bgp_update_delay_applicable(struct bgp *bgp) +static bool bgp_update_delay_applicable(struct bgp *bgp) { /* update_delay_over flag should be reset (set to 0) for any new applicability of the update-delay during BGP process lifetime. And it should be set after an occurence of the update-delay is over)*/ if (!bgp->update_delay_over) - return 1; - - return 0; + return true; + return false; } -int bgp_update_delay_active(struct bgp *bgp) +bool bgp_update_delay_active(struct bgp *bgp) { if (bgp->t_update_delay) - return 1; - - return 0; + return true; + return false; } -int bgp_update_delay_configured(struct bgp *bgp) +bool bgp_update_delay_configured(struct bgp *bgp) { if (bgp->v_update_delay) - return 1; - - return 0; + return true; + return false; } /* Do the post-processing needed when bgp comes out of the read-only mode @@ -836,28 +837,25 @@ void bgp_adjust_routeadv(struct peer *peer) } } -static int bgp_maxmed_onstartup_applicable(struct bgp *bgp) +static bool bgp_maxmed_onstartup_applicable(struct bgp *bgp) { if (!bgp->maxmed_onstartup_over) - return 1; - - return 0; + return true; + return false; } -int bgp_maxmed_onstartup_configured(struct bgp *bgp) +bool bgp_maxmed_onstartup_configured(struct bgp *bgp) { if (bgp->v_maxmed_onstartup != BGP_MAXMED_ONSTARTUP_UNCONFIGURED) - return 1; - - return 0; + return true; + return false; } -int bgp_maxmed_onstartup_active(struct bgp *bgp) +bool bgp_maxmed_onstartup_active(struct bgp *bgp) { if (bgp->t_maxmed_onstartup) - return 1; - - return 0; + return true; + return false; } void bgp_maxmed_update(struct bgp *bgp) @@ -885,6 +883,27 @@ void bgp_maxmed_update(struct bgp *bgp) } } +int bgp_fsm_error_subcode(int status) +{ + int fsm_err_subcode = BGP_NOTIFY_FSM_ERR_SUBCODE_UNSPECIFIC; + + switch (status) { + case OpenSent: + fsm_err_subcode = BGP_NOTIFY_FSM_ERR_SUBCODE_OPENSENT; + break; + case OpenConfirm: + fsm_err_subcode = BGP_NOTIFY_FSM_ERR_SUBCODE_OPENCONFIRM; + break; + case Established: + fsm_err_subcode = BGP_NOTIFY_FSM_ERR_SUBCODE_ESTABLISHED; + break; + default: + break; + } + + return fsm_err_subcode; +} + /* The maxmed onstartup timer expiry callback. */ static int bgp_maxmed_onstartup_timer(struct thread *thread) { @@ -1455,9 +1474,8 @@ static int bgp_connect_success(struct peer *peer) flog_err_sys(EC_LIB_SOCKET, "%s: bgp_getsockname(): failed for peer %s, fd %d", __func__, peer->host, peer->fd); - bgp_notify_send( - peer, BGP_NOTIFY_FSM_ERR, - BGP_NOTIFY_SUBCODE_UNSPECIFIC); /* internal error */ + bgp_notify_send(peer, BGP_NOTIFY_FSM_ERR, + bgp_fsm_error_subcode(peer->status)); bgp_writes_on(peer); return -1; } @@ -1657,7 +1675,8 @@ static int bgp_fsm_event_error(struct peer *peer) flog_err(EC_BGP_FSM, "%s [FSM] unexpected packet received in state %s", peer->host, lookup_msg(bgp_status_msg, peer->status, NULL)); - return bgp_stop_with_notify(peer, BGP_NOTIFY_FSM_ERR, 0); + return bgp_stop_with_notify(peer, BGP_NOTIFY_FSM_ERR, + bgp_fsm_error_subcode(peer->status)); } /* Hold timer expire. This is error of BGP connection. So cut the @@ -1935,8 +1954,7 @@ static int bgp_establish(struct peer *peer) hash_release(peer->bgp->peerhash, peer); hash_get(peer->bgp->peerhash, peer, hash_alloc_intern); - bgp_bfd_deregister_peer(peer); - bgp_bfd_register_peer(peer); + bgp_bfd_reset_peer(peer); return ret; } @@ -2017,7 +2035,7 @@ void bgp_fsm_event_update(struct peer *peer, int valid) /* Finite State Machine structure */ static const struct { int (*func)(struct peer *); - int next_state; + enum bgp_fsm_status next_state; } FSM[BGP_STATUS_MAX - 1][BGP_EVENTS_MAX - 1] = { { /* Idle state: In Idle state, all events other than BGP_Start is @@ -2164,7 +2182,7 @@ static const struct { /* Execute event process. */ int bgp_event(struct thread *thread) { - int event; + enum bgp_fsm_events event; struct peer *peer; int ret; @@ -2176,9 +2194,9 @@ int bgp_event(struct thread *thread) return (ret); } -int bgp_event_update(struct peer *peer, int event) +int bgp_event_update(struct peer *peer, enum bgp_fsm_events event) { - int next; + enum bgp_fsm_status next; int ret = 0; struct peer *other; int passive_conn = 0; @@ -2517,7 +2535,7 @@ int bgp_neighbor_graceful_restart(struct peer *peer, int peer_gr_cmd) peer->peer_gr_present_state = peer_new_state; if (BGP_DEBUG(graceful_restart, GRACEFUL_RESTART)) zlog_debug( - "[BGP_GR] Succesfully change the state of the peer to : %s : !", + "[BGP_GR] Successfully change the state of the peer to : %s : !", print_peer_gr_mode(peer_new_state)); return BGP_GR_SUCCESS; diff --git a/bgpd/bgp_fsm.h b/bgpd/bgp_fsm.h index 6feabbf570..2fd5f6fc47 100644 --- a/bgpd/bgp_fsm.h +++ b/bgpd/bgp_fsm.h @@ -111,7 +111,7 @@ /* Prototypes. */ extern void bgp_fsm_event_update(struct peer *peer, int valid); extern int bgp_event(struct thread *); -extern int bgp_event_update(struct peer *, int event); +extern int bgp_event_update(struct peer *, enum bgp_fsm_events event); extern int bgp_stop(struct peer *peer); extern void bgp_timer_set(struct peer *); extern int bgp_routeadv_timer(struct thread *); @@ -119,8 +119,9 @@ extern void bgp_fsm_change_status(struct peer *peer, int status); extern const char *const peer_down_str[]; extern void bgp_update_delay_end(struct bgp *); extern void bgp_maxmed_update(struct bgp *); -extern int bgp_maxmed_onstartup_configured(struct bgp *); -extern int bgp_maxmed_onstartup_active(struct bgp *); +extern bool bgp_maxmed_onstartup_configured(struct bgp *); +extern bool bgp_maxmed_onstartup_active(struct bgp *); +extern int bgp_fsm_error_subcode(int status); /** * Start the route advertisement timer (that honors MRAI) for all the diff --git a/bgpd/bgp_label.c b/bgpd/bgp_label.c index ff1ab1a37d..ec44037bf7 100644 --- a/bgpd/bgp_label.c +++ b/bgpd/bgp_label.c @@ -132,7 +132,6 @@ int bgp_reg_for_label_callback(mpls_label_t new_label, void *labelid, { struct bgp_path_info *pi; struct bgp_node *rn; - char addr[PREFIX_STRLEN]; pi = labelid; /* Is this path still valid? */ @@ -145,10 +144,9 @@ int bgp_reg_for_label_callback(mpls_label_t new_label, void *labelid, } rn = pi->net; - prefix2str(&rn->p, addr, PREFIX_STRLEN); if (BGP_DEBUG(labelpool, LABELPOOL)) - zlog_debug("%s: FEC %s label=%u, allocated=%d", __func__, addr, + zlog_debug("%s: FEC %pRN label=%u, allocated=%d", __func__, rn, new_label, allocated); if (!allocated) { @@ -174,8 +172,8 @@ int bgp_reg_for_label_callback(mpls_label_t new_label, void *labelid, if (pi->attr->label_index != MPLS_INVALID_LABEL_INDEX) { flog_err( EC_BGP_LABEL, - "%s: FEC %s Rejecting allocated label %u as Label Index is %u", - __func__, addr, new_label, pi->attr->label_index); + "%s: FEC %pRN Rejecting allocated label %u as Label Index is %u", + __func__, rn, new_label, pi->attr->label_index); bgp_register_for_label(pi->net, pi); @@ -189,8 +187,8 @@ int bgp_reg_for_label_callback(mpls_label_t new_label, void *labelid, } /* Shouldn't happen: different label allocation */ flog_err(EC_BGP_LABEL, - "%s: %s had label %u but got new assignment %u", - __func__, addr, pi->attr->label, new_label); + "%s: %pRN had label %u but got new assignment %u", + __func__, rn, pi->attr->label, new_label); /* continue means use new one */ } @@ -210,14 +208,14 @@ void bgp_reg_dereg_for_label(struct bgp_node *rn, struct bgp_path_info *pi, { bool with_label_index = false; struct stream *s; - struct prefix *p; + const struct prefix *p; mpls_label_t *local_label; int command; uint16_t flags = 0; size_t flags_pos = 0; char addr[PREFIX_STRLEN]; - p = &(rn->p); + p = bgp_node_get_prefix(rn); local_label = &(rn->local_label); /* this prevents the loop when we're called by * bgp_reg_for_label_callback() @@ -473,7 +471,7 @@ int bgp_nlri_parse_label(struct peer *peer, struct attr *attr, if (pnt != lim) { flog_err( EC_BGP_UPDATE_RCV, - "%s [Error] Update packet error / L-U (%zu data remaining after parsing)", + "%s [Error] Update packet error / L-U (%td data remaining after parsing)", peer->host, lim - pnt); return BGP_NLRI_PARSE_ERROR_PACKET_LENGTH; } diff --git a/bgpd/bgp_lcommunity.c b/bgpd/bgp_lcommunity.c index 7a4435f6f2..f47ae91663 100644 --- a/bgpd/bgp_lcommunity.c +++ b/bgpd/bgp_lcommunity.c @@ -2,7 +2,7 @@ * * Copyright (C) 2016 Keyur Patel <keyur@arrcus.com> * - * This file is part of FreeRangeRouting (FRR). + * This file is part of FRRouting (FRR). * * FRR is free software; you can redistribute it and/or modify it under the * terms of the GNU General Public License as published by the Free Software @@ -46,6 +46,8 @@ void lcommunity_free(struct lcommunity **lcom) { XFREE(MTYPE_LCOMMUNITY_VAL, (*lcom)->val); XFREE(MTYPE_LCOMMUNITY_STR, (*lcom)->str); + if ((*lcom)->json) + json_object_free((*lcom)->json); XFREE(MTYPE_LCOMMUNITY, *lcom); } @@ -59,8 +61,8 @@ static void lcommunity_hash_free(struct lcommunity *lcom) structure, we don't add the value. Newly added value is sorted by numerical order. When the value is added to the structure return 1 else return 0. */ -static int lcommunity_add_val(struct lcommunity *lcom, - struct lcommunity_val *lval) +static bool lcommunity_add_val(struct lcommunity *lcom, + struct lcommunity_val *lval) { uint8_t *p; int ret; @@ -71,7 +73,7 @@ static int lcommunity_add_val(struct lcommunity *lcom, lcom->size++; lcom->val = XMALLOC(MTYPE_LCOMMUNITY_VAL, lcom_length(lcom)); memcpy(lcom->val, lval->val, LCOMMUNITY_SIZE); - return 1; + return true; } /* If the value already exists in the structure return 0. */ @@ -79,7 +81,7 @@ static int lcommunity_add_val(struct lcommunity *lcom, for (p = lcom->val; c < lcom->size; p += LCOMMUNITY_SIZE, c++) { ret = memcmp(p, lval->val, LCOMMUNITY_SIZE); if (ret == 0) - return 0; + return false; if (ret > 0) break; } @@ -94,7 +96,7 @@ static int lcommunity_add_val(struct lcommunity *lcom, (lcom->size - 1 - c) * LCOMMUNITY_SIZE); memcpy(lcom->val + c * LCOMMUNITY_SIZE, lval->val, LCOMMUNITY_SIZE); - return 1; + return true; } /* This function takes pointer to Large Communites strucutre then @@ -178,7 +180,7 @@ static void set_lcommunity_string(struct lcommunity *lcom, bool make_json) int i; int len; char *str_buf; - uint8_t *pnt; + const uint8_t *pnt; uint32_t global, local1, local2; json_object *json_lcommunity_list = NULL; json_object *json_string = NULL; @@ -456,7 +458,7 @@ struct lcommunity *lcommunity_str2com(const char *str) return lcom; } -int lcommunity_include(struct lcommunity *lcom, uint8_t *ptr) +bool lcommunity_include(struct lcommunity *lcom, uint8_t *ptr) { int i; uint8_t *lcom_ptr; @@ -464,25 +466,25 @@ int lcommunity_include(struct lcommunity *lcom, uint8_t *ptr) for (i = 0; i < lcom->size; i++) { lcom_ptr = lcom->val + (i * LCOMMUNITY_SIZE); if (memcmp(ptr, lcom_ptr, LCOMMUNITY_SIZE) == 0) - return 1; + return true; } - return 0; + return false; } -int lcommunity_match(const struct lcommunity *lcom1, - const struct lcommunity *lcom2) +bool lcommunity_match(const struct lcommunity *lcom1, + const struct lcommunity *lcom2) { int i = 0; int j = 0; if (lcom1 == NULL && lcom2 == NULL) - return 1; + return true; if (lcom1 == NULL || lcom2 == NULL) - return 0; + return false; if (lcom1->size < lcom2->size) - return 0; + return false; /* Every community on com2 needs to be on com1 for this to match */ while (i < lcom1->size && j < lcom2->size) { @@ -494,9 +496,9 @@ int lcommunity_match(const struct lcommunity *lcom1, } if (j == lcom2->size) - return 1; + return true; else - return 0; + return false; } /* Delete one lcommunity. */ diff --git a/bgpd/bgp_lcommunity.h b/bgpd/bgp_lcommunity.h index 7d63f4d26a..c96df8482d 100644 --- a/bgpd/bgp_lcommunity.h +++ b/bgpd/bgp_lcommunity.h @@ -2,7 +2,7 @@ * * Copyright (C) 2016 Keyur Patel <keyur@arrcus.com> * - * This file is part of FreeRangeRouting (FRR). + * This file is part of FRRouting (FRR). * * FRR is free software; you can redistribute it and/or modify it under the * terms of the GNU General Public License as published by the Free Software @@ -66,10 +66,10 @@ extern void lcommunity_unintern(struct lcommunity **); extern unsigned int lcommunity_hash_make(const void *); extern struct hash *lcommunity_hash(void); extern struct lcommunity *lcommunity_str2com(const char *); -extern int lcommunity_match(const struct lcommunity *, - const struct lcommunity *); +extern bool lcommunity_match(const struct lcommunity *, + const struct lcommunity *); extern char *lcommunity_str(struct lcommunity *, bool make_json); -extern int lcommunity_include(struct lcommunity *lcom, uint8_t *ptr); +extern bool lcommunity_include(struct lcommunity *lcom, uint8_t *ptr); extern void lcommunity_del_val(struct lcommunity *lcom, uint8_t *ptr); extern void bgp_compute_aggregate_lcommunity( diff --git a/bgpd/bgp_mac.c b/bgpd/bgp_mac.c index 537bb45455..af20e5fdd7 100644 --- a/bgpd/bgp_mac.c +++ b/bgpd/bgp_mac.c @@ -142,13 +142,15 @@ static void bgp_process_mac_rescan_table(struct bgp *bgp, struct peer *peer, for (prn = bgp_table_top(table); prn; prn = bgp_route_next(prn)) { struct bgp_table *sub = prn->info; + const struct prefix *prn_p = bgp_node_get_prefix(prn); if (!sub) continue; for (rn = bgp_table_top(sub); rn; rn = bgp_route_next(rn)) { bool rn_affected; - struct prefix_evpn *pevpn = (struct prefix_evpn *)&rn->p; + const struct prefix *p = bgp_node_get_prefix(rn); + const struct prefix_evpn *pevpn = (const struct prefix_evpn *)p; struct prefix_rd prd; uint32_t num_labels = 0; mpls_label_t *label_pnt = NULL; @@ -156,7 +158,7 @@ static void bgp_process_mac_rescan_table(struct bgp *bgp, struct peer *peer, if (pevpn->family == AF_EVPN && pevpn->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE && - memcmp(&rn->p.u.prefix_evpn.macip_addr.mac, + memcmp(&p->u.prefix_evpn.macip_addr.mac, macaddr, ETH_ALEN) == 0) rn_affected = true; else @@ -185,15 +187,15 @@ static void bgp_process_mac_rescan_table(struct bgp *bgp, struct peer *peer, prd.family = AF_UNSPEC; prd.prefixlen = 64; - memcpy(&prd.val, &prn->p.u.val, 8); + memcpy(&prd.val, prn_p->u.val, 8); if (CHECK_FLAG(pi->flags, BGP_PATH_REMOVED)) { - if (bgp_debug_update(peer, &rn->p, NULL, 1)) { + if (bgp_debug_update(peer, p, NULL, 1)) { char pfx_buf[BGP_PRD_PATH_STRLEN]; bgp_debug_rdpfxpath2str( AFI_L2VPN, SAFI_EVPN, &prd, - &rn->p, label_pnt, num_labels, + p, label_pnt, num_labels, pi->addpath_rx_id ? 1 : 0, pi->addpath_rx_id, pfx_buf, sizeof(pfx_buf)); @@ -205,7 +207,7 @@ static void bgp_process_mac_rescan_table(struct bgp *bgp, struct peer *peer, } memcpy(&evpn, &pi->attr->evpn_overlay, sizeof(evpn)); - int32_t ret = bgp_update(peer, &rn->p, + int32_t ret = bgp_update(peer, p, pi->addpath_rx_id, pi->attr, AFI_L2VPN, SAFI_EVPN, ZEBRA_ROUTE_BGP, @@ -358,7 +360,7 @@ void bgp_mac_del_mac_entry(struct interface *ifp) * An example: router-mac attribute in any of evpn update * requires to compare against local mac. */ -bool bgp_mac_exist(struct ethaddr *mac) +bool bgp_mac_exist(const struct ethaddr *mac) { struct bgp_self_mac lookup; struct bgp_self_mac *bsm; @@ -379,9 +381,9 @@ bool bgp_mac_exist(struct ethaddr *mac) * mac against any of local assigned (SVIs) MAC * address. */ -bool bgp_mac_entry_exists(struct prefix *p) +bool bgp_mac_entry_exists(const struct prefix *p) { - struct prefix_evpn *pevpn = (struct prefix_evpn *)p; + const struct prefix_evpn *pevpn = (const struct prefix_evpn *)p; if (pevpn->family != AF_EVPN) return false; diff --git a/bgpd/bgp_mac.h b/bgpd/bgp_mac.h index 68449b574a..4b94d80d1a 100644 --- a/bgpd/bgp_mac.h +++ b/bgpd/bgp_mac.h @@ -36,7 +36,7 @@ void bgp_mac_dump_table(struct vty *vty); /* * Function to lookup the prefix and see if we have a matching mac */ -bool bgp_mac_entry_exists(struct prefix *p); -bool bgp_mac_exist(struct ethaddr *mac); +bool bgp_mac_entry_exists(const struct prefix *p); +bool bgp_mac_exist(const struct ethaddr *mac); #endif diff --git a/bgpd/bgp_main.c b/bgpd/bgp_main.c index c4ece2f082..8f0ccca742 100644 --- a/bgpd/bgp_main.c +++ b/bgpd/bgp_main.c @@ -228,7 +228,7 @@ static __attribute__((__noreturn__)) void bgp_exit(int status) community_list_terminate(bgp_clist); bgp_vrf_terminate(); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC vnc_zebra_destroy(); #endif bgp_zebra_destroy(); @@ -275,7 +275,7 @@ static int bgp_vrf_enable(struct vrf *vrf) XFREE(MTYPE_BGP, bgp->name_pretty); bgp->name_pretty = XSTRDUP(MTYPE_BGP, "VRF default"); bgp->inst_type = BGP_INSTANCE_TYPE_DEFAULT; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (!bgp->rfapi) { bgp->rfapi = bgp_rfapi_new(bgp); assert(bgp->rfapi); @@ -425,17 +425,21 @@ int main(int argc, char **argv) else bgp_port = tmp_port; break; - case 'e': - multipath_num = atoi(optarg); - if (multipath_num > MULTIPATH_NUM - || multipath_num <= 0) { + case 'e': { + unsigned long int parsed_multipath = + strtoul(optarg, NULL, 10); + if (parsed_multipath == 0 + || parsed_multipath > MULTIPATH_NUM + || parsed_multipath > UINT_MAX) { flog_err( EC_BGP_MULTIPATH, - "Multipath Number specified must be less than %d and greater than 0", + "Multipath Number specified must be less than %u and greater than 0", MULTIPATH_NUM); return 1; } + multipath_num = parsed_multipath; break; + } case 'l': bgp_address = optarg; /* listenon implies -n */ diff --git a/bgpd/bgp_mpath.c b/bgpd/bgp_mpath.c index 9e73acdc01..f66f56cb49 100644 --- a/bgpd/bgp_mpath.c +++ b/bgpd/bgp_mpath.c @@ -390,7 +390,7 @@ uint32_t bgp_path_info_mpath_count(struct bgp_path_info *path) * Sets the count of multipaths into bestpath's mpath element */ static void bgp_path_info_mpath_count_set(struct bgp_path_info *path, - uint32_t count) + uint16_t count) { struct bgp_path_info_mpath *mpath; if (!count && !path->mpath) @@ -402,6 +402,39 @@ static void bgp_path_info_mpath_count_set(struct bgp_path_info *path, } /* + * bgp_path_info_mpath_lb_update + * + * Update cumulative info related to link-bandwidth + */ +static void bgp_path_info_mpath_lb_update(struct bgp_path_info *path, bool set, + bool all_paths_lb, uint64_t cum_bw) +{ + struct bgp_path_info_mpath *mpath; + + if ((mpath = path->mpath) == NULL) { + if (!set) + return; + mpath = bgp_path_info_mpath_get(path); + if (!mpath) + return; + } + if (set) { + if (cum_bw) + SET_FLAG(mpath->mp_flags, BGP_MP_LB_PRESENT); + else + UNSET_FLAG(mpath->mp_flags, BGP_MP_LB_PRESENT); + if (all_paths_lb) + SET_FLAG(mpath->mp_flags, BGP_MP_LB_ALL); + else + UNSET_FLAG(mpath->mp_flags, BGP_MP_LB_ALL); + mpath->cum_bw = cum_bw; + } else { + mpath->mp_flags = 0; + mpath->cum_bw = 0; + } +} + +/* * bgp_path_info_mpath_attr * * Given bestpath bgp_path_info, return aggregated attribute set used @@ -415,6 +448,42 @@ struct attr *bgp_path_info_mpath_attr(struct bgp_path_info *path) } /* + * bgp_path_info_chkwtd + * + * Return if we should attempt to do weighted ECMP or not + * The path passed in is the bestpath. + */ +bool bgp_path_info_mpath_chkwtd(struct bgp *bgp, struct bgp_path_info *path) +{ + /* Check if told to ignore weights or not multipath */ + if (bgp->lb_handling == BGP_LINK_BW_IGNORE_BW || !path->mpath) + return false; + + /* All paths in multipath should have associated weight (bandwidth) + * unless told explicitly otherwise. + */ + if (bgp->lb_handling != BGP_LINK_BW_SKIP_MISSING && + bgp->lb_handling != BGP_LINK_BW_DEFWT_4_MISSING) + return (path->mpath->mp_flags & BGP_MP_LB_ALL); + + /* At least one path should have bandwidth. */ + return (path->mpath->mp_flags & BGP_MP_LB_PRESENT); +} + +/* + * bgp_path_info_mpath_attr + * + * Given bestpath bgp_path_info, return cumulative bandwidth + * computed for all multipaths with bandwidth info + */ +uint64_t bgp_path_info_mpath_cumbw(struct bgp_path_info *path) +{ + if (!path->mpath) + return 0; + return path->mpath->cum_bw; +} + +/* * bgp_path_info_mpath_attr_set * * Sets the aggregated attribute into bestpath's mpath element @@ -444,10 +513,13 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, struct bgp_maxpaths_cfg *mpath_cfg) { uint16_t maxpaths, mpath_count, old_mpath_count; + uint32_t bwval; + uint64_t cum_bw, old_cum_bw; struct listnode *mp_node, *mp_next_node; struct bgp_path_info *cur_mpath, *new_mpath, *next_mpath, *prev_mpath; int mpath_changed, debug; - char pfx_buf[PREFIX2STR_BUFFER], nh_buf[2][INET6_ADDRSTRLEN]; + char nh_buf[2][INET6_ADDRSTRLEN]; + bool all_paths_lb; char path_buf[PATH_ADDPATH_STR_BUFFER]; mpath_changed = 0; @@ -455,12 +527,10 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, mpath_count = 0; cur_mpath = NULL; old_mpath_count = 0; + old_cum_bw = cum_bw = 0; prev_mpath = new_best; mp_node = listhead(mp_list); - debug = bgp_debug_bestpath(&rn->p); - - if (debug) - prefix2str(&rn->p, pfx_buf, sizeof(pfx_buf)); + debug = bgp_debug_bestpath(rn); if (new_best) { mpath_count++; @@ -474,15 +544,18 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, if (old_best) { cur_mpath = bgp_path_info_mpath_first(old_best); old_mpath_count = bgp_path_info_mpath_count(old_best); + old_cum_bw = bgp_path_info_mpath_cumbw(old_best); bgp_path_info_mpath_count_set(old_best, 0); + bgp_path_info_mpath_lb_update(old_best, false, false, 0); bgp_path_info_mpath_dequeue(old_best); } if (debug) zlog_debug( - "%s: starting mpath update, newbest %s num candidates %d old-mpath-count %d", - pfx_buf, new_best ? new_best->peer->host : "NONE", - mp_list ? listcount(mp_list) : 0, old_mpath_count); + "%pRN: starting mpath update, newbest %s num candidates %d old-mpath-count %d old-cum-bw u%" PRIu64, + rn, new_best ? new_best->peer->host : "NONE", + mp_list ? listcount(mp_list) : 0, + old_mpath_count, old_cum_bw); /* * We perform an ordered walk through both lists in parallel. @@ -495,6 +568,7 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, * Note that new_best might be somewhere in the mp_list, so we need * to skip over it */ + all_paths_lb = true; /* We'll reset if any path doesn't have LB. */ while (mp_node || cur_mpath) { struct bgp_path_info *tmp_info; @@ -513,8 +587,8 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, if (debug) zlog_debug( - "%s: comparing candidate %s with existing mpath %s", - pfx_buf, + "%pRN: comparing candidate %s with existing mpath %s", + rn, tmp_info ? tmp_info->peer->host : "NONE", cur_mpath ? cur_mpath->peer->host : "NONE"); @@ -533,12 +607,17 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, cur_mpath); prev_mpath = cur_mpath; mpath_count++; + if (ecommunity_linkbw_present( + cur_mpath->attr->ecommunity, &bwval)) + cum_bw += bwval; + else + all_paths_lb = false; if (debug) { bgp_path_info_path_with_addpath_rx_str( cur_mpath, path_buf); zlog_debug( - "%s: %s is still multipath, cur count %d", - pfx_buf, path_buf, mpath_count); + "%pRN: %s is still multipath, cur count %d", + rn, path_buf, mpath_count); } } else { mpath_changed = 1; @@ -546,8 +625,8 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, bgp_path_info_path_with_addpath_rx_str( cur_mpath, path_buf); zlog_debug( - "%s: remove mpath %s nexthop %s, cur count %d", - pfx_buf, path_buf, + "%pRN: remove mpath %s nexthop %s, cur count %d", + rn, path_buf, inet_ntop(AF_INET, &cur_mpath->attr ->nexthop, @@ -579,8 +658,8 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, bgp_path_info_path_with_addpath_rx_str( cur_mpath, path_buf); zlog_debug( - "%s: remove mpath %s nexthop %s, cur count %d", - pfx_buf, path_buf, + "%pRN: remove mpath %s nexthop %s, cur count %d", + rn, path_buf, inet_ntop(AF_INET, &cur_mpath->attr->nexthop, nh_buf[0], sizeof(nh_buf[0])), @@ -620,12 +699,17 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, prev_mpath = new_mpath; mpath_changed = 1; mpath_count++; + if (ecommunity_linkbw_present( + new_mpath->attr->ecommunity, &bwval)) + cum_bw += bwval; + else + all_paths_lb = false; if (debug) { bgp_path_info_path_with_addpath_rx_str( new_mpath, path_buf); zlog_debug( - "%s: add mpath %s nexthop %s, cur count %d", - pfx_buf, path_buf, + "%pRN: add mpath %s nexthop %s, cur count %d", + rn, path_buf, inet_ntop(AF_INET, &new_mpath->attr ->nexthop, @@ -639,16 +723,30 @@ void bgp_path_info_mpath_update(struct bgp_node *rn, } if (new_best) { + bgp_path_info_mpath_count_set(new_best, mpath_count - 1); + if (mpath_count <= 1 || + !ecommunity_linkbw_present( + new_best->attr->ecommunity, &bwval)) + all_paths_lb = false; + else + cum_bw += bwval; + bgp_path_info_mpath_lb_update(new_best, true, + all_paths_lb, cum_bw); + if (debug) zlog_debug( - "%s: New mpath count (incl newbest) %d mpath-change %s", - pfx_buf, mpath_count, - mpath_changed ? "YES" : "NO"); + "%pRN: New mpath count (incl newbest) %d mpath-change %s" + " all_paths_lb %d cum_bw u%" PRIu64, + rn, mpath_count, + mpath_changed ? "YES" : "NO", + all_paths_lb, cum_bw); - bgp_path_info_mpath_count_set(new_best, mpath_count - 1); if (mpath_changed || (bgp_path_info_mpath_count(new_best) != old_mpath_count)) SET_FLAG(new_best->flags, BGP_PATH_MULTIPATH_CHG); + if ((mpath_count - 1) != old_mpath_count || + old_cum_bw != cum_bw) + SET_FLAG(new_best->flags, BGP_PATH_LINK_BW_CHG); } } @@ -673,6 +771,7 @@ void bgp_mp_dmed_deselect(struct bgp_path_info *dmed_best) bgp_path_info_mpath_count_set(dmed_best, 0); UNSET_FLAG(dmed_best->flags, BGP_PATH_MULTIPATH_CHG); + UNSET_FLAG(dmed_best->flags, BGP_PATH_LINK_BW_CHG); assert(bgp_path_info_mpath_first(dmed_best) == NULL); } diff --git a/bgpd/bgp_mpath.h b/bgpd/bgp_mpath.h index d15f3c9035..34f94b256b 100644 --- a/bgpd/bgp_mpath.h +++ b/bgpd/bgp_mpath.h @@ -36,10 +36,18 @@ struct bgp_path_info_mpath { struct bgp_path_info *mp_info; /* When attached to best path, the number of selected multipaths */ - uint32_t mp_count; + uint16_t mp_count; + + /* Flags - relevant as noted. */ + uint16_t mp_flags; +#define BGP_MP_LB_PRESENT 0x1 /* Link-bandwidth present for >= 1 path */ +#define BGP_MP_LB_ALL 0x2 /* Link-bandwidth present for all multipaths */ /* Aggregated attribute for advertising multipath route */ struct attr *mp_attr; + + /* Cumulative bandiwdth of all multipaths - attached to best path. */ + uint64_t cum_bw; }; /* Functions to support maximum-paths configuration */ @@ -78,5 +86,8 @@ bgp_path_info_mpath_next(struct bgp_path_info *path); /* Accessors for multipath information */ extern uint32_t bgp_path_info_mpath_count(struct bgp_path_info *path); extern struct attr *bgp_path_info_mpath_attr(struct bgp_path_info *path); +extern bool bgp_path_info_mpath_chkwtd(struct bgp *bgp, + struct bgp_path_info *path); +extern uint64_t bgp_path_info_mpath_cumbw(struct bgp_path_info *path); #endif /* _QUAGGA_BGP_MPATH_H */ diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c index 8758d0ca78..46dcd2864e 100644 --- a/bgpd/bgp_mplsvpn.c +++ b/bgpd/bgp_mplsvpn.c @@ -48,7 +48,7 @@ #include "bgpd/bgp_nht.h" #include "bgpd/bgp_evpn.h" -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/rfapi_backend.h" #endif @@ -214,7 +214,7 @@ int bgp_nlri_parse_vpn(struct peer *peer, struct attr *attr, decode_rd_ip(pnt + 5, &rd_ip); break; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC case RD_TYPE_VNC_ETH: break; #endif @@ -244,7 +244,7 @@ int bgp_nlri_parse_vpn(struct peer *peer, struct attr *attr, if (pnt != lim) { flog_err( EC_BGP_UPDATE_RCV, - "%s [Error] Update packet error / VPN (%zu data remaining after parsing)", + "%s [Error] Update packet error / VPN (%td data remaining after parsing)", peer->host, lim - pnt); return BGP_NLRI_PARSE_ERROR_PACKET_LENGTH; } @@ -385,13 +385,13 @@ int vpn_leak_label_callback( return 0; } -static int ecom_intersect(struct ecommunity *e1, struct ecommunity *e2) +static bool ecom_intersect(struct ecommunity *e1, struct ecommunity *e2) { int i; int j; if (!e1 || !e2) - return 0; + return false; for (i = 0; i < e1->size; ++i) { for (j = 0; j < e2->size; ++j) { @@ -399,11 +399,11 @@ static int ecom_intersect(struct ecommunity *e1, struct ecommunity *e2) e2->val + (j * ECOMMUNITY_SIZE), ECOMMUNITY_SIZE)) { - return 1; + return true; } } } - return 0; + return false; } static bool labels_same(struct bgp_path_info *bpi, mpls_label_t *label, @@ -468,18 +468,16 @@ leak_update(struct bgp *bgp, /* destination bgp instance */ struct bgp *bgp_orig, struct prefix *nexthop_orig, int nexthop_self_flag, int debug) { - struct prefix *p = &bn->p; + const struct prefix *p = bgp_node_get_prefix(bn); struct bgp_path_info *bpi; struct bgp_path_info *bpi_ultimate; struct bgp_path_info *new; - char buf_prefix[PREFIX_STRLEN]; - if (debug) { - prefix2str(&bn->p, buf_prefix, sizeof(buf_prefix)); - zlog_debug("%s: entry: leak-to=%s, p=%s, type=%d, sub_type=%d", - __func__, bgp->name_pretty, buf_prefix, - source_bpi->type, source_bpi->sub_type); - } + if (debug) + zlog_debug( + "%s: entry: leak-to=%s, p=%pRN, type=%d, sub_type=%d", + __func__, bgp->name_pretty, bn, source_bpi->type, + source_bpi->sub_type); /* * Routes that are redistributed into BGP from zebra do not get @@ -518,9 +516,8 @@ leak_update(struct bgp *bgp, /* destination bgp instance */ bgp_attr_unintern(&new_attr); if (debug) zlog_debug( - "%s: ->%s: %s: Found route, no change", - __func__, bgp->name_pretty, - buf_prefix); + "%s: ->%s: %pRN: Found route, no change", + __func__, bgp->name_pretty, bn); return NULL; } @@ -580,8 +577,8 @@ leak_update(struct bgp *bgp, /* destination bgp instance */ bgp_unlock_node(bn); if (debug) - zlog_debug("%s: ->%s: %s Found route, changed attr", - __func__, bgp->name_pretty, buf_prefix); + zlog_debug("%s: ->%s: %pRN Found route, changed attr", + __func__, bgp->name_pretty, bn); return bpi; } @@ -645,8 +642,8 @@ leak_update(struct bgp *bgp, /* destination bgp instance */ bgp_process(bgp, bn, afi, safi); if (debug) - zlog_debug("%s: ->%s: %s: Added new route", __func__, - bgp->name_pretty, buf_prefix); + zlog_debug("%s: ->%s: %pRN: Added new route", __func__, + bgp->name_pretty, bn); return new; } @@ -657,7 +654,7 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */ struct bgp_path_info *path_vrf) /* route */ { int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF); - struct prefix *p = &path_vrf->net->p; + const struct prefix *p = bgp_node_get_prefix(path_vrf->net); afi_t afi = family2afi(p->family); struct attr static_attr = {0}; struct attr *new_attr = NULL; @@ -744,10 +741,15 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */ struct ecommunity *old_ecom; struct ecommunity *new_ecom; + /* Export with the 'from' instance's export RTs. */ + /* If doing VRF-to-VRF leaking, strip existing RTs first. */ old_ecom = static_attr.ecommunity; if (old_ecom) { - new_ecom = ecommunity_merge( - ecommunity_dup(old_ecom), + new_ecom = ecommunity_dup(old_ecom); + if (CHECK_FLAG(bgp_vrf->af_flags[afi][SAFI_UNICAST], + BGP_CONFIG_VRF_TO_VRF_EXPORT)) + ecommunity_strip_rts(new_ecom); + new_ecom = ecommunity_merge(new_ecom, bgp_vrf->vpn_policy[afi] .rtlist[BGP_VPN_POLICY_DIR_TOVPN]); if (!old_ecom->refcnt) @@ -886,19 +888,17 @@ void vpn_leak_from_vrf_withdraw(struct bgp *bgp_vpn, /* to */ struct bgp_path_info *path_vrf) /* route */ { int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF); - struct prefix *p = &path_vrf->net->p; + const struct prefix *p = bgp_node_get_prefix(path_vrf->net); afi_t afi = family2afi(p->family); safi_t safi = SAFI_MPLS_VPN; struct bgp_path_info *bpi; struct bgp_node *bn; const char *debugmsg; - char buf_prefix[PREFIX_STRLEN]; if (debug) { - prefix2str(p, buf_prefix, sizeof(buf_prefix)); zlog_debug( - "%s: entry: leak-from=%s, p=%s, type=%d, sub_type=%d", - __func__, bgp_vrf->name_pretty, buf_prefix, + "%s: entry: leak-from=%s, p=%pRN, type=%d, sub_type=%d", + __func__, bgp_vrf->name_pretty, path_vrf->net, path_vrf->type, path_vrf->sub_type); } @@ -975,14 +975,10 @@ void vpn_leak_from_vrf_withdraw_all(struct bgp *bgp_vpn, /* to */ continue; for (bn = bgp_table_top(table); bn; bn = bgp_route_next(bn)) { - - char buf[PREFIX2STR_BUFFER]; - bpi = bgp_node_get_bgp_path_info(bn); if (debug && bpi) { - zlog_debug( - "%s: looking at prefix %s", __func__, - prefix2str(&bn->p, buf, sizeof(buf))); + zlog_debug("%s: looking at prefix %pRN", + __func__, bn); } for (; bpi; bpi = bpi->next) { @@ -1000,8 +996,10 @@ void vpn_leak_from_vrf_withdraw_all(struct bgp *bgp_vpn, /* to */ if (debug) zlog_debug("%s: deleting it", __func__); - bgp_aggregate_decrement(bgp_vpn, &bn->p, - bpi, afi, safi); + bgp_aggregate_decrement( + bgp_vpn, + bgp_node_get_prefix(bn), bpi, + afi, safi); bgp_path_info_delete(bn, bpi); bgp_process(bgp_vpn, bn, afi, safi); } @@ -1044,7 +1042,7 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */ struct bgp *bgp_vpn, /* from */ struct bgp_path_info *path_vpn) /* route */ { - struct prefix *p = &path_vpn->net->p; + const struct prefix *p = bgp_node_get_prefix(path_vpn->net); afi_t afi = family2afi(p->family); struct attr static_attr = {0}; @@ -1087,6 +1085,20 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */ /* shallow copy */ static_attr = *path_vpn->attr; + struct ecommunity *old_ecom; + struct ecommunity *new_ecom; + + /* If doing VRF-to-VRF leaking, strip RTs. */ + old_ecom = static_attr.ecommunity; + if (old_ecom && CHECK_FLAG(bgp_vrf->af_flags[afi][safi], + BGP_CONFIG_VRF_TO_VRF_IMPORT)) { + new_ecom = ecommunity_dup(old_ecom); + ecommunity_strip_rts(new_ecom); + static_attr.ecommunity = new_ecom; + if (!old_ecom->refcnt) + ecommunity_free(&old_ecom); + } + /* * Nexthop: stash and clear * @@ -1209,12 +1221,9 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */ } } - if (debug) { - char buf_prefix[PREFIX_STRLEN]; - prefix2str(p, buf_prefix, sizeof(buf_prefix)); - zlog_debug("%s: pfx %s: num_labels %d", __func__, buf_prefix, - num_labels); - } + if (debug) + zlog_debug("%s: pfx %pRN: num_labels %d", __func__, + path_vpn->net, num_labels); /* * For VRF-2-VRF route-leaking, @@ -1254,7 +1263,7 @@ void vpn_leak_to_vrf_update(struct bgp *bgp_vpn, /* from */ void vpn_leak_to_vrf_withdraw(struct bgp *bgp_vpn, /* from */ struct bgp_path_info *path_vpn) /* route */ { - struct prefix *p; + const struct prefix *p; afi_t afi; safi_t safi = SAFI_UNICAST; struct bgp *bgp; @@ -1262,21 +1271,18 @@ void vpn_leak_to_vrf_withdraw(struct bgp *bgp_vpn, /* from */ struct bgp_node *bn; struct bgp_path_info *bpi; const char *debugmsg; - char buf_prefix[PREFIX_STRLEN]; int debug = BGP_DEBUG(vpn, VPN_LEAK_TO_VRF); - if (debug) { - prefix2str(&path_vpn->net->p, buf_prefix, sizeof(buf_prefix)); - zlog_debug("%s: entry: p=%s, type=%d, sub_type=%d", __func__, - buf_prefix, path_vpn->type, path_vpn->sub_type); - } + if (debug) + zlog_debug("%s: entry: p=%pRN, type=%d, sub_type=%d", __func__, + path_vpn->net, path_vpn->type, path_vpn->sub_type); if (debug) zlog_debug("%s: start (path_vpn=%p)", __func__, path_vpn); if (!path_vpn->net) { -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC /* BGP_ROUTE_RFP routes do not have path_vpn->net set (yet) */ if (path_vpn->type == ZEBRA_ROUTE_BGP && path_vpn->sub_type == BGP_ROUTE_RFP) { @@ -1291,7 +1297,7 @@ void vpn_leak_to_vrf_withdraw(struct bgp *bgp_vpn, /* from */ return; } - p = &path_vpn->net->p; + p = bgp_node_get_prefix(path_vpn->net); afi = family2afi(p->family); /* Loop over VRFs */ @@ -1362,8 +1368,9 @@ void vpn_leak_to_vrf_withdraw_all(struct bgp *bgp_vrf, /* to */ && is_pi_family_vpn(bpi->extra->parent)) { /* delete route */ - bgp_aggregate_decrement(bgp_vrf, &bn->p, bpi, - afi, safi); + bgp_aggregate_decrement(bgp_vrf, + bgp_node_get_prefix(bn), + bpi, afi, safi); bgp_path_info_delete(bn, bpi); bgp_process(bgp_vrf, bn, afi, safi); } @@ -1386,7 +1393,7 @@ void vpn_leak_to_vrf_update_all(struct bgp *bgp_vrf, /* to */ */ for (prn = bgp_table_top(bgp_vpn->rib[afi][safi]); prn; prn = bgp_route_next(prn)) { - + const struct prefix *p = bgp_node_get_prefix(prn); struct bgp_table *table; struct bgp_node *bn; struct bgp_path_info *bpi; @@ -1394,7 +1401,7 @@ void vpn_leak_to_vrf_update_all(struct bgp *bgp_vrf, /* to */ memset(&prd, 0, sizeof(prd)); prd.family = AF_UNSPEC; prd.prefixlen = 64; - memcpy(prd.val, prn->p.u.val, 8); + memcpy(prd.val, &p->u.val, 8); /* This is the per-RD table of prefixes */ table = bgp_node_get_bgp_table_info(prn); @@ -1786,8 +1793,9 @@ void vrf_unimport_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp, vpn_leak_prechange(idir, afi, bgp_get_default(), to_bgp); if (to_bgp->vpn_policy[afi].import_vrf->count == 0) { - UNSET_FLAG(to_bgp->af_flags[afi][safi], - BGP_CONFIG_VRF_TO_VRF_IMPORT); + if (!to_bgp->vpn_policy[afi].rmap[idir]) + UNSET_FLAG(to_bgp->af_flags[afi][safi], + BGP_CONFIG_VRF_TO_VRF_IMPORT); if (to_bgp->vpn_policy[afi].rtlist[idir]) ecommunity_free(&to_bgp->vpn_policy[afi].rtlist[idir]); } else { diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c index ab0c3a3f11..bfce61c2af 100644 --- a/bgpd/bgp_nexthop.c +++ b/bgpd/bgp_nexthop.c @@ -49,7 +49,7 @@ DEFINE_MTYPE_STATIC(BGPD, MARTIAN_STRING, "BGP Martian Address Intf String"); char *bnc_str(struct bgp_nexthop_cache *bnc, char *buf, int size) { - prefix2str(&(bnc->node->p), buf, size); + prefix2str(bgp_node_get_prefix(bnc->node), buf, size); return buf; } @@ -436,8 +436,7 @@ void bgp_connected_delete(struct bgp *bgp, struct connected *ifc) bgp_address_del(bgp, ifc, addr); - rn = bgp_node_lookup(bgp->connected_table[AFI_IP6], - (struct prefix *)&p); + rn = bgp_node_lookup(bgp->connected_table[AFI_IP6], &p); } if (!rn) @@ -470,13 +469,13 @@ static void bgp_connected_cleanup(struct route_table *table, } } -int bgp_nexthop_self(struct bgp *bgp, afi_t afi, uint8_t type, uint8_t sub_type, - struct attr *attr, struct bgp_node *rn) +bool bgp_nexthop_self(struct bgp *bgp, afi_t afi, uint8_t type, + uint8_t sub_type, struct attr *attr, struct bgp_node *rn) { uint8_t new_afi = afi == AFI_IP ? AF_INET : AF_INET6; struct bgp_addr tmp_addr = {{0}}, *addr = NULL; struct tip_addr tmp_tip, *tip = NULL; - + const struct prefix *p = bgp_node_get_prefix(rn); bool is_bgp_static_route = ((type == ZEBRA_ROUTE_BGP) && (sub_type == BGP_ROUTE_STATIC)) ? true @@ -489,8 +488,8 @@ int bgp_nexthop_self(struct bgp *bgp, afi_t afi, uint8_t type, uint8_t sub_type, switch (new_afi) { case AF_INET: if (is_bgp_static_route) { - tmp_addr.p.u.prefix4 = rn->p.u.prefix4; - tmp_addr.p.prefixlen = rn->p.prefixlen; + tmp_addr.p.u.prefix4 = p->u.prefix4; + tmp_addr.p.prefixlen = p->prefixlen; } else { /* Here we need to find out which nexthop to be used*/ if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)) { @@ -505,13 +504,13 @@ int bgp_nexthop_self(struct bgp *bgp, afi_t afi, uint8_t type, uint8_t sub_type, attr->mp_nexthop_global_in; tmp_addr.p.prefixlen = IPV4_MAX_BITLEN; } else - return 0; + return false; } break; case AF_INET6: if (is_bgp_static_route) { - tmp_addr.p.u.prefix6 = rn->p.u.prefix6; - tmp_addr.p.prefixlen = rn->p.prefixlen; + tmp_addr.p.u.prefix6 = p->u.prefix6; + tmp_addr.p.prefixlen = p->prefixlen; } else { tmp_addr.p.u.prefix6 = attr->mp_nexthop_global; tmp_addr.p.prefixlen = IPV6_MAX_BITLEN; @@ -523,7 +522,7 @@ int bgp_nexthop_self(struct bgp *bgp, afi_t afi, uint8_t type, uint8_t sub_type, addr = hash_lookup(bgp->address_hash, &tmp_addr); if (addr) - return 1; + return true; if (new_afi == AF_INET) { memset(&tmp_tip, 0, sizeof(struct tip_addr)); @@ -539,13 +538,13 @@ int bgp_nexthop_self(struct bgp *bgp, afi_t afi, uint8_t type, uint8_t sub_type, tip = hash_lookup(bgp->tip_hash, &tmp_tip); if (tip) - return 1; + return true; } - return 0; + return false; } -int bgp_multiaccess_check_v4(struct in_addr nexthop, struct peer *peer) +bool bgp_multiaccess_check_v4(struct in_addr nexthop, struct peer *peer) { struct bgp_node *rn1; struct bgp_node *rn2; @@ -558,7 +557,7 @@ int bgp_multiaccess_check_v4(struct in_addr nexthop, struct peer *peer) rn1 = bgp_node_match(peer->bgp->connected_table[AFI_IP], &p); if (!rn1) - return 0; + return false; p.family = AF_INET; p.prefixlen = IPV4_MAX_BITLEN; @@ -567,18 +566,18 @@ int bgp_multiaccess_check_v4(struct in_addr nexthop, struct peer *peer) rn2 = bgp_node_match(peer->bgp->connected_table[AFI_IP], &p); if (!rn2) { bgp_unlock_node(rn1); - return 0; + return false; } - ret = (rn1 == rn2) ? 1 : 0; + ret = (rn1 == rn2); bgp_unlock_node(rn1); bgp_unlock_node(rn2); - return (ret); + return ret; } -int bgp_multiaccess_check_v6(struct in6_addr nexthop, struct peer *peer) +bool bgp_multiaccess_check_v6(struct in6_addr nexthop, struct peer *peer) { struct bgp_node *rn1; struct bgp_node *rn2; @@ -591,7 +590,7 @@ int bgp_multiaccess_check_v6(struct in6_addr nexthop, struct peer *peer) rn1 = bgp_node_match(peer->bgp->connected_table[AFI_IP6], &p); if (!rn1) - return 0; + return false; p.family = AF_INET6; p.prefixlen = IPV6_MAX_BITLEN; @@ -600,10 +599,10 @@ int bgp_multiaccess_check_v6(struct in6_addr nexthop, struct peer *peer) rn2 = bgp_node_match(peer->bgp->connected_table[AFI_IP6], &p); if (!rn2) { bgp_unlock_node(rn1); - return 0; + return false; } - ret = (rn1 == rn2) ? 1 : 0; + ret = (rn1 == rn2); bgp_unlock_node(rn1); bgp_unlock_node(rn2); @@ -611,8 +610,9 @@ int bgp_multiaccess_check_v6(struct in6_addr nexthop, struct peer *peer) return ret; } -int bgp_subgrp_multiaccess_check_v6(struct in6_addr nexthop, - struct update_subgroup *subgrp) +bool bgp_subgrp_multiaccess_check_v6(struct in6_addr nexthop, + struct update_subgroup *subgrp, + struct peer *exclude) { struct bgp_node *rn1 = NULL, *rn2 = NULL; struct peer_af *paf = NULL; @@ -629,16 +629,19 @@ int bgp_subgrp_multiaccess_check_v6(struct in6_addr nexthop, bgp = SUBGRP_INST(subgrp); rn1 = bgp_node_match(bgp->connected_table[AFI_IP6], &np); if (!rn1) - return 0; + return false; SUBGRP_FOREACH_PEER (subgrp, paf) { + /* Skip peer we're told to exclude - e.g., source of route. */ + if (paf->peer == exclude) + continue; p.u.prefix6 = paf->peer->su.sin6.sin6_addr; rn2 = bgp_node_match(bgp->connected_table[AFI_IP6], &p); if (rn1 == rn2) { bgp_unlock_node(rn1); bgp_unlock_node(rn2); - return 1; + return true; } if (rn2) @@ -646,11 +649,12 @@ int bgp_subgrp_multiaccess_check_v6(struct in6_addr nexthop, } bgp_unlock_node(rn1); - return 0; + return false; } -int bgp_subgrp_multiaccess_check_v4(struct in_addr nexthop, - struct update_subgroup *subgrp) +bool bgp_subgrp_multiaccess_check_v4(struct in_addr nexthop, + struct update_subgroup *subgrp, + struct peer *exclude) { struct bgp_node *rn1, *rn2; struct peer_af *paf; @@ -667,16 +671,20 @@ int bgp_subgrp_multiaccess_check_v4(struct in_addr nexthop, bgp = SUBGRP_INST(subgrp); rn1 = bgp_node_match(bgp->connected_table[AFI_IP], &np); if (!rn1) - return 0; + return false; SUBGRP_FOREACH_PEER (subgrp, paf) { + /* Skip peer we're told to exclude - e.g., source of route. */ + if (paf->peer == exclude) + continue; + p.u.prefix4 = paf->peer->su.sin.sin_addr; rn2 = bgp_node_match(bgp->connected_table[AFI_IP], &p); if (rn1 == rn2) { bgp_unlock_node(rn1); bgp_unlock_node(rn2); - return 1; + return true; } if (rn2) @@ -684,7 +692,7 @@ int bgp_subgrp_multiaccess_check_v4(struct in_addr nexthop, } bgp_unlock_node(rn1); - return 0; + return false; } static void bgp_show_nexthops_detail(struct vty *vty, struct bgp *bgp, @@ -754,6 +762,7 @@ static void bgp_show_nexthops(struct vty *vty, struct bgp *bgp, int detail, for (rn = bgp_table_top(table[afi]); rn; rn = bgp_route_next(rn)) { struct peer *peer; + const struct prefix *p = bgp_node_get_prefix(rn); bnc = bgp_node_get_bgp_nexthop_info(rn); if (!bnc) @@ -763,8 +772,7 @@ static void bgp_show_nexthops(struct vty *vty, struct bgp *bgp, int detail, if (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_VALID)) { vty_out(vty, " %s valid [IGP metric %d], #paths %d", - inet_ntop(rn->p.family, - &rn->p.u.prefix, buf, + inet_ntop(p->family, &p->u.prefix, buf, sizeof(buf)), bnc->metric, bnc->path_count); if (peer) @@ -778,8 +786,7 @@ static void bgp_show_nexthops(struct vty *vty, struct bgp *bgp, int detail, } else { vty_out(vty, " %s invalid", - inet_ntop(rn->p.family, - &rn->p.u.prefix, buf, + inet_ntop(p->family, &p->u.prefix, buf, sizeof(buf))); if (peer) vty_out(vty, ", peer %s", peer->host); diff --git a/bgpd/bgp_nexthop.h b/bgpd/bgp_nexthop.h index af4c0bc047..461e772119 100644 --- a/bgpd/bgp_nexthop.h +++ b/bgpd/bgp_nexthop.h @@ -81,16 +81,19 @@ struct bgp_addrv6 { extern void bgp_connected_add(struct bgp *bgp, struct connected *c); extern void bgp_connected_delete(struct bgp *bgp, struct connected *c); -extern int bgp_subgrp_multiaccess_check_v4(struct in_addr nexthop, - struct update_subgroup *subgrp); -extern int bgp_subgrp_multiaccess_check_v6(struct in6_addr nexthop, - struct update_subgroup *subgrp); -extern int bgp_multiaccess_check_v4(struct in_addr nexthop, struct peer *peer); -extern int bgp_multiaccess_check_v6(struct in6_addr nexthop, struct peer *peer); +extern bool bgp_subgrp_multiaccess_check_v4(struct in_addr nexthop, + struct update_subgroup *subgrp, + struct peer *exclude); +extern bool bgp_subgrp_multiaccess_check_v6(struct in6_addr nexthop, + struct update_subgroup *subgrp, + struct peer *exclude); +extern bool bgp_multiaccess_check_v4(struct in_addr nexthop, struct peer *peer); +extern bool bgp_multiaccess_check_v6(struct in6_addr nexthop, + struct peer *peer); extern int bgp_config_write_scan_time(struct vty *); -extern int bgp_nexthop_self(struct bgp *bgp, afi_t afi, uint8_t type, - uint8_t sub_type, struct attr *attr, - struct bgp_node *rn); +extern bool bgp_nexthop_self(struct bgp *bgp, afi_t afi, uint8_t type, + uint8_t sub_type, struct attr *attr, + struct bgp_node *rn); extern struct bgp_nexthop_cache *bnc_new(void); extern void bnc_free(struct bgp_nexthop_cache *bnc); extern void bnc_nexthop_free(struct bgp_nexthop_cache *bnc); diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c index dfa9ac9398..0531542a38 100644 --- a/bgpd/bgp_nht.c +++ b/bgpd/bgp_nht.c @@ -130,6 +130,7 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop, struct bgp_nexthop_cache *bnc; struct prefix p; int is_bgp_static_route = 0; + const struct prefix *bnc_p; if (pi) { is_bgp_static_route = ((pi->type == ZEBRA_ROUTE_BGP) @@ -181,6 +182,8 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop, } } + bnc_p = bgp_node_get_prefix(bnc->node); + bgp_unlock_node(rn); if (is_bgp_static_route) { SET_FLAG(bnc->flags, BGP_STATIC_ROUTE); @@ -226,8 +229,8 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop, if (bgp_route->inst_type == BGP_INSTANCE_TYPE_VIEW) { SET_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED); SET_FLAG(bnc->flags, BGP_NEXTHOP_VALID); - } else if (!CHECK_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED) && - !is_default_host_route(&bnc->node->p)) + } else if (!CHECK_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED) + && !is_default_host_route(bnc_p)) register_zebra_rnh(bnc, is_bgp_static_route); if (pi && pi->nexthop != bnc) { @@ -528,7 +531,7 @@ static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p) ? 1 : 0; struct bgp_node *net = pi->net; - struct prefix *p_orig = &net->p; + const struct prefix *p_orig = bgp_node_get_prefix(net); if (p_orig->family == AF_FLOWSPEC) { if (!pi->peer) @@ -541,8 +544,8 @@ static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p) case AFI_IP: p->family = AF_INET; if (is_bgp_static) { - p->u.prefix4 = pi->net->p.u.prefix4; - p->prefixlen = pi->net->p.prefixlen; + p->u.prefix4 = p_orig->u.prefix4; + p->prefixlen = p_orig->prefixlen; } else { p->u.prefix4 = pi->attr->nexthop; p->prefixlen = IPV4_MAX_BITLEN; @@ -552,8 +555,8 @@ static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p) p->family = AF_INET6; if (is_bgp_static) { - p->u.prefix6 = pi->net->p.u.prefix6; - p->prefixlen = pi->net->p.prefixlen; + p->u.prefix6 = p_orig->u.prefix6; + p->prefixlen = p_orig->prefixlen; } else { p->u.prefix6 = pi->attr->mp_nexthop_global; p->prefixlen = IPV6_MAX_BITLEN; @@ -581,7 +584,7 @@ static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p) */ static void sendmsg_zebra_rnh(struct bgp_nexthop_cache *bnc, int command) { - struct prefix *p; + const struct prefix *p; bool exact_match = false; int ret; @@ -603,7 +606,7 @@ static void sendmsg_zebra_rnh(struct bgp_nexthop_cache *bnc, int command) "%s: We have not connected yet, cannot send nexthops", __func__); } - p = &(bnc->node->p); + p = bgp_node_get_prefix(bnc->node); if ((command == ZEBRA_NEXTHOP_REGISTER || command == ZEBRA_IMPORT_ROUTE_REGISTER) && (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_CONNECTED) @@ -691,6 +694,7 @@ static void evaluate_paths(struct bgp_nexthop_cache *bnc) struct bgp_table *table; safi_t safi; struct bgp *bgp_path; + const struct prefix *p; if (BGP_DEBUG(nht, NHT)) { char buf[PREFIX2STR_BUFFER]; @@ -710,7 +714,8 @@ static void evaluate_paths(struct bgp_nexthop_cache *bnc) rn = path->net; assert(rn && bgp_node_table(rn)); - afi = family2afi(rn->p.family); + p = bgp_node_get_prefix(rn); + afi = family2afi(p->family); table = bgp_node_table(rn); safi = table->safi; @@ -744,27 +749,23 @@ static void evaluate_paths(struct bgp_nexthop_cache *bnc) bgp_isvalid_nexthop(bnc) ? 1 : 0; } - if (BGP_DEBUG(nht, NHT)) { - char buf[PREFIX_STRLEN]; - - prefix2str(&rn->p, buf, PREFIX_STRLEN); - zlog_debug("%s: prefix %s (vrf %s) %svalid", - __func__, buf, bgp_path->name, - (bnc_is_valid_nexthop ? "" : "not ")); - } + if (BGP_DEBUG(nht, NHT)) + zlog_debug("%s: prefix %pRN (vrf %s) %svalid", __func__, + rn, bgp_path->name, + (bnc_is_valid_nexthop ? "" : "not ")); if ((CHECK_FLAG(path->flags, BGP_PATH_VALID) ? 1 : 0) != bnc_is_valid_nexthop) { if (CHECK_FLAG(path->flags, BGP_PATH_VALID)) { - bgp_aggregate_decrement(bgp_path, &rn->p, - path, afi, safi); + bgp_aggregate_decrement(bgp_path, p, path, afi, + safi); bgp_path_info_unset_flag(rn, path, BGP_PATH_VALID); } else { bgp_path_info_set_flag(rn, path, BGP_PATH_VALID); - bgp_aggregate_increment(bgp_path, &rn->p, - path, afi, safi); + bgp_aggregate_increment(bgp_path, p, path, afi, + safi); } } @@ -780,14 +781,13 @@ static void evaluate_paths(struct bgp_nexthop_cache *bnc) || CHECK_FLAG(bnc->change_flags, BGP_NEXTHOP_CHANGED)) SET_FLAG(path->flags, BGP_PATH_IGP_CHANGED); - if (safi == SAFI_EVPN && - bgp_evpn_is_prefix_nht_supported(&rn->p)) { + if (safi == SAFI_EVPN && bgp_evpn_is_prefix_nht_supported(p)) { if (CHECK_FLAG(path->flags, BGP_PATH_VALID)) - bgp_evpn_import_route(bgp_path, afi, safi, - &rn->p, path); + bgp_evpn_import_route(bgp_path, afi, safi, p, + path); else - bgp_evpn_unimport_route(bgp_path, afi, safi, - &rn->p, path); + bgp_evpn_unimport_route(bgp_path, afi, safi, p, + path); } bgp_process(bgp_path, rn, afi, safi); diff --git a/bgpd/bgp_open.c b/bgpd/bgp_open.c index 81bb45aa76..4a2f7d5882 100644 --- a/bgpd/bgp_open.c +++ b/bgpd/bgp_open.c @@ -1013,15 +1013,15 @@ static int bgp_auth_parse(struct peer *peer, size_t length) return -1; } -static int strict_capability_same(struct peer *peer) +static bool strict_capability_same(struct peer *peer) { int i, j; for (i = AFI_IP; i < AFI_MAX; i++) for (j = SAFI_UNICAST; j < SAFI_MAX; j++) if (peer->afc[i][j] != peer->afc_nego[i][j]) - return 0; - return 1; + return false; + return true; } /* peek into option, stores ASN to *as4 if the AS4 capability was found. diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c index 0e251dced8..7137c1a784 100644 --- a/bgpd/bgp_packet.c +++ b/bgpd/bgp_packet.c @@ -587,7 +587,7 @@ void bgp_open_send(struct peer *peer) * @param peer * @return 0 */ -static int bgp_write_notify(struct peer *peer) +static void bgp_write_notify(struct peer *peer) { int ret, val; uint8_t type; @@ -597,7 +597,7 @@ static int bgp_write_notify(struct peer *peer) s = stream_fifo_pop(peer->obuf); if (!s) - return 0; + return; assert(stream_get_endp(s) >= BGP_HEADER_SIZE); @@ -617,7 +617,7 @@ static int bgp_write_notify(struct peer *peer) if (ret <= 0) { stream_free(s); BGP_EVENT_ADD(peer, TCP_fatal_error); - return 0; + return; } /* Disable Nagle, make NOTIFY packet go out right away */ @@ -649,8 +649,6 @@ static int bgp_write_notify(struct peer *peer) BGP_EVENT_ADD(peer, BGP_Stop); stream_free(s); - - return 0; } /* @@ -976,14 +974,21 @@ static int bgp_collision_detect(struct peer *new, struct in_addr remote_id) return -1; } else if ((peer->status == OpenConfirm) || (peer->status == OpenSent)) { - /* 1. The BGP Identifier of the local system is compared - to - the BGP Identifier of the remote system (as specified - in - the OPEN message). */ - + /* 1. The BGP Identifier of the local system is + * compared to the BGP Identifier of the remote + * system (as specified in the OPEN message). + * + * If the BGP Identifiers of the peers + * involved in the connection collision + * are identical, then the connection + * initiated by the BGP speaker with the + * larger AS number is preserved. + */ if (ntohl(peer->local_id.s_addr) - < ntohl(remote_id.s_addr)) + < ntohl(remote_id.s_addr) + || (ntohl(peer->local_id.s_addr) + == ntohl(remote_id.s_addr) + && peer->local_as < peer->as)) if (!CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER)) { /* 2. If the value of the local BGP @@ -1007,10 +1012,13 @@ static int bgp_collision_detect(struct peer *new, struct in_addr remote_id) return -1; } else { - if (ntohl(peer->local_id.s_addr) == - ntohl(remote_id.s_addr)) - flog_err(EC_BGP_ROUTER_ID_SAME, "Peer's router-id %s is the same as ours", - inet_ntoa(remote_id)); + if (ntohl(peer->local_id.s_addr) + == ntohl(remote_id.s_addr) + && peer->local_as == peer->as) + flog_err( + EC_BGP_ROUTER_ID_SAME, + "Peer's router-id %s is the same as ours", + inet_ntoa(remote_id)); /* 3. Otherwise, the local system closes newly created @@ -1142,6 +1150,15 @@ static int bgp_open_receive(struct peer *peer, bgp_size_t size) return BGP_Stop; } + /* Codification of AS 0 Processing */ + if (remote_as == BGP_AS_ZERO) { + flog_err(EC_BGP_PKT_OPEN, "%s bad OPEN, got AS set to 0", + peer->host); + bgp_notify_send(peer, BGP_NOTIFY_OPEN_ERR, + BGP_NOTIFY_OPEN_BAD_PEER_AS); + return BGP_Stop; + } + if (remote_as == BGP_AS_TRANS) { /* Take the AS4 from the capability. We must have received the * capability now! Otherwise we have a asn16 peer who uses @@ -1190,10 +1207,17 @@ static int bgp_open_receive(struct peer *peer, bgp_size_t size) } } - /* remote router-id check. */ + /* rfc6286: + * If the BGP Identifier field of the OPEN message + * is zero, or if it is the same as the BGP Identifier + * of the local BGP speaker and the message is from an + * internal peer, then the Error Subcode is set to + * "Bad BGP Identifier". + */ if (remote_id.s_addr == INADDR_ANY || IPV4_CLASS_DE(ntohl(remote_id.s_addr)) - || ntohl(peer->local_id.s_addr) == ntohl(remote_id.s_addr)) { + || (peer->sort == BGP_PEER_IBGP + && ntohl(peer->local_id.s_addr) == ntohl(remote_id.s_addr))) { if (bgp_debug_neighbor_events(peer)) zlog_debug("%s bad OPEN, wrong router identifier %s", peer->host, inet_ntoa(remote_id)); @@ -1338,8 +1362,9 @@ static int bgp_open_receive(struct peer *peer, bgp_size_t size) peer->afc[AFI_IP6][SAFI_FLOWSPEC]; } - /* When collision is detected and this peer is closed. Retrun - immidiately. */ + /* When collision is detected and this peer is closed. + * Return immediately. + */ ret = bgp_collision_detect(peer, remote_id); if (ret < 0) return BGP_Stop; @@ -1447,7 +1472,7 @@ static int bgp_update_receive(struct peer *peer, bgp_size_t size) peer->host, lookup_msg(bgp_status_msg, peer->status, NULL)); bgp_notify_send(peer, BGP_NOTIFY_FSM_ERR, - BGP_NOTIFY_SUBCODE_UNSPECIFIC); + bgp_fsm_error_subcode(peer->status)); return BGP_Stop; } @@ -1859,7 +1884,7 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size) peer->host, lookup_msg(bgp_status_msg, peer->status, NULL)); bgp_notify_send(peer, BGP_NOTIFY_FSM_ERR, - BGP_NOTIFY_SUBCODE_UNSPECIFIC); + bgp_fsm_error_subcode(peer->status)); return BGP_Stop; } @@ -2251,7 +2276,7 @@ int bgp_capability_receive(struct peer *peer, bgp_size_t size) peer->host, lookup_msg(bgp_status_msg, peer->status, NULL)); bgp_notify_send(peer, BGP_NOTIFY_FSM_ERR, - BGP_NOTIFY_SUBCODE_UNSPECIFIC); + bgp_fsm_error_subcode(peer->status)); return BGP_Stop; } @@ -2328,7 +2353,7 @@ int bgp_process_packet(struct thread *thread) flog_err( EC_BGP_PKT_OPEN, "%s: BGP OPEN receipt failed for peer: %s", - __FUNCTION__, peer->host); + __func__, peer->host); break; case BGP_MSG_UPDATE: atomic_fetch_add_explicit(&peer->update_in, 1, @@ -2339,7 +2364,7 @@ int bgp_process_packet(struct thread *thread) flog_err( EC_BGP_UPDATE_RCV, "%s: BGP UPDATE receipt failed for peer: %s", - __FUNCTION__, peer->host); + __func__, peer->host); break; case BGP_MSG_NOTIFY: atomic_fetch_add_explicit(&peer->notify_in, 1, @@ -2349,7 +2374,7 @@ int bgp_process_packet(struct thread *thread) flog_err( EC_BGP_NOTIFY_RCV, "%s: BGP NOTIFY receipt failed for peer: %s", - __FUNCTION__, peer->host); + __func__, peer->host); break; case BGP_MSG_KEEPALIVE: peer->readtime = monotime(NULL); @@ -2360,7 +2385,7 @@ int bgp_process_packet(struct thread *thread) flog_err( EC_BGP_KEEP_RCV, "%s: BGP KEEPALIVE receipt failed for peer: %s", - __FUNCTION__, peer->host); + __func__, peer->host); break; case BGP_MSG_ROUTE_REFRESH_NEW: case BGP_MSG_ROUTE_REFRESH_OLD: @@ -2371,7 +2396,7 @@ int bgp_process_packet(struct thread *thread) flog_err( EC_BGP_RFSH_RCV, "%s: BGP ROUTEREFRESH receipt failed for peer: %s", - __FUNCTION__, peer->host); + __func__, peer->host); break; case BGP_MSG_CAPABILITY: atomic_fetch_add_explicit(&peer->dynamic_cap_in, 1, @@ -2381,7 +2406,7 @@ int bgp_process_packet(struct thread *thread) flog_err( EC_BGP_CAP_RCV, "%s: BGP CAPABILITY receipt failed for peer: %s", - __FUNCTION__, peer->host); + __func__, peer->host); break; default: /* Suppress uninitialized variable warning */ diff --git a/bgpd/bgp_pbr.c b/bgpd/bgp_pbr.c index 172ec8b42e..ab134b15c4 100644 --- a/bgpd/bgp_pbr.c +++ b/bgpd/bgp_pbr.c @@ -685,9 +685,9 @@ static int bgp_pbr_validate_policy_route(struct bgp_pbr_entry_main *api) } /* return -1 if build or validation failed */ -int bgp_pbr_build_and_validate_entry(struct prefix *p, - struct bgp_path_info *path, - struct bgp_pbr_entry_main *api) +int bgp_pbr_build_and_validate_entry(const struct prefix *p, + struct bgp_path_info *path, + struct bgp_pbr_entry_main *api) { int ret; int i, action_count = 0; @@ -738,7 +738,8 @@ int bgp_pbr_build_and_validate_entry(struct prefix *p, ecom_copy.val[0] &= ~ECOMMUNITY_ENCODE_TRANS_EXP; ecom_copy.val[1] = ECOMMUNITY_ROUTE_TARGET; - ecommunity_add_val(eckey, &ecom_copy); + ecommunity_add_val(eckey, &ecom_copy, + false, false); api_action->action = ACTION_REDIRECT; api_action->u.redirect_vrf = @@ -2610,7 +2611,7 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path, } } -void bgp_pbr_update_entry(struct bgp *bgp, struct prefix *p, +void bgp_pbr_update_entry(struct bgp *bgp, const struct prefix *p, struct bgp_path_info *info, afi_t afi, safi_t safi, bool nlri_update) { diff --git a/bgpd/bgp_pbr.h b/bgpd/bgp_pbr.h index 393b08da48..47d5e21692 100644 --- a/bgpd/bgp_pbr.h +++ b/bgpd/bgp_pbr.h @@ -291,7 +291,7 @@ void bgp_pbr_print_policy_route(struct bgp_pbr_entry_main *api); struct bgp_node; struct bgp_path_info; -extern void bgp_pbr_update_entry(struct bgp *bgp, struct prefix *p, +extern void bgp_pbr_update_entry(struct bgp *bgp, const struct prefix *p, struct bgp_path_info *new_select, afi_t afi, safi_t safi, bool nlri_update); @@ -301,7 +301,7 @@ extern void bgp_pbr_reset(struct bgp *bgp, afi_t afi); extern struct bgp_pbr_interface *bgp_pbr_interface_lookup(const char *name, struct bgp_pbr_interface_head *head); -extern int bgp_pbr_build_and_validate_entry(struct prefix *p, +extern int bgp_pbr_build_and_validate_entry(const struct prefix *p, struct bgp_path_info *path, struct bgp_pbr_entry_main *api); #endif /* __BGP_PBR_H__ */ diff --git a/bgpd/bgp_rd.c b/bgpd/bgp_rd.c index be950dfa51..66d64066c4 100644 --- a/bgpd/bgp_rd.c +++ b/bgpd/bgp_rd.c @@ -33,16 +33,16 @@ #include "bgpd/bgp_rd.h" #include "bgpd/bgp_attr.h" -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/rfapi_backend.h" #endif -uint16_t decode_rd_type(uint8_t *pnt) +uint16_t decode_rd_type(const uint8_t *pnt) { uint16_t v; v = ((uint16_t)*pnt++ << 8); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC /* * VNC L2 stores LHI in lower byte, so omit it */ @@ -60,7 +60,7 @@ void encode_rd_type(uint16_t v, uint8_t *pnt) } /* type == RD_TYPE_AS */ -void decode_rd_as(uint8_t *pnt, struct rd_as *rd_as) +void decode_rd_as(const uint8_t *pnt, struct rd_as *rd_as) { rd_as->as = (uint16_t)*pnt++ << 8; rd_as->as |= (uint16_t)*pnt++; @@ -68,7 +68,7 @@ void decode_rd_as(uint8_t *pnt, struct rd_as *rd_as) } /* type == RD_TYPE_AS4 */ -void decode_rd_as4(uint8_t *pnt, struct rd_as *rd_as) +void decode_rd_as4(const uint8_t *pnt, struct rd_as *rd_as) { pnt = ptr_get_be32(pnt, &rd_as->as); rd_as->val = ((uint16_t)*pnt++ << 8); @@ -76,7 +76,7 @@ void decode_rd_as4(uint8_t *pnt, struct rd_as *rd_as) } /* type == RD_TYPE_IP */ -void decode_rd_ip(uint8_t *pnt, struct rd_ip *rd_ip) +void decode_rd_ip(const uint8_t *pnt, struct rd_ip *rd_ip) { memcpy(&rd_ip->ip, pnt, 4); pnt += 4; @@ -85,9 +85,9 @@ void decode_rd_ip(uint8_t *pnt, struct rd_ip *rd_ip) rd_ip->val |= (uint16_t)*pnt; } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC /* type == RD_TYPE_VNC_ETH */ -void decode_rd_vnc_eth(uint8_t *pnt, struct rd_vnc_eth *rd_vnc_eth) +void decode_rd_vnc_eth(const uint8_t *pnt, struct rd_vnc_eth *rd_vnc_eth) { rd_vnc_eth->type = RD_TYPE_VNC_ETH; rd_vnc_eth->local_nve_id = pnt[1]; @@ -159,9 +159,9 @@ out: return lret; } -char *prefix_rd2str(struct prefix_rd *prd, char *buf, size_t size) +char *prefix_rd2str(const struct prefix_rd *prd, char *buf, size_t size) { - uint8_t *pnt; + const uint8_t *pnt; uint16_t type; struct rd_as rd_as; struct rd_ip rd_ip; @@ -186,7 +186,7 @@ char *prefix_rd2str(struct prefix_rd *prd, char *buf, size_t size) rd_ip.val); return buf; } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC else if (type == RD_TYPE_VNC_ETH) { snprintf(buf, size, "LHI:%d, %02x:%02x:%02x:%02x:%02x:%02x", *(pnt + 1), /* LHI */ diff --git a/bgpd/bgp_rd.h b/bgpd/bgp_rd.h index c5ea34103f..b5ad9d624d 100644 --- a/bgpd/bgp_rd.h +++ b/bgpd/bgp_rd.h @@ -28,7 +28,7 @@ #define RD_TYPE_IP 1 #define RD_TYPE_AS4 2 -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #define RD_TYPE_VNC_ETH 0xff00 /* VNC L2VPN */ #endif @@ -46,7 +46,7 @@ struct rd_ip { uint16_t val; }; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC struct rd_vnc_eth { uint16_t type; uint8_t local_nve_id; @@ -54,18 +54,19 @@ struct rd_vnc_eth { }; #endif -extern uint16_t decode_rd_type(uint8_t *pnt); +extern uint16_t decode_rd_type(const uint8_t *pnt); extern void encode_rd_type(uint16_t, uint8_t *); -extern void decode_rd_as(uint8_t *pnt, struct rd_as *rd_as); -extern void decode_rd_as4(uint8_t *pnt, struct rd_as *rd_as); -extern void decode_rd_ip(uint8_t *pnt, struct rd_ip *rd_ip); -#if ENABLE_BGP_VNC -extern void decode_rd_vnc_eth(uint8_t *pnt, struct rd_vnc_eth *rd_vnc_eth); +extern void decode_rd_as(const uint8_t *pnt, struct rd_as *rd_as); +extern void decode_rd_as4(const uint8_t *pnt, struct rd_as *rd_as); +extern void decode_rd_ip(const uint8_t *pnt, struct rd_ip *rd_ip); +#ifdef ENABLE_BGP_VNC +extern void decode_rd_vnc_eth(const uint8_t *pnt, + struct rd_vnc_eth *rd_vnc_eth); #endif extern int str2prefix_rd(const char *, struct prefix_rd *); -extern char *prefix_rd2str(struct prefix_rd *, char *, size_t); +extern char *prefix_rd2str(const struct prefix_rd *, char *, size_t); extern void form_auto_rd(struct in_addr router_id, uint16_t rd_id, struct prefix_rd *prd); diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index c2c034d164..1d8be6496d 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -71,7 +71,7 @@ #include "bgpd/bgp_mac.h" #include "bgpd/bgp_network.h" -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/rfapi_backend.h" #include "bgpd/rfapi/vnc_import_bgp.h" #include "bgpd/rfapi/vnc_export_bgp.h" @@ -116,7 +116,7 @@ DEFINE_HOOK(bgp_process, struct bgp_node *bgp_afi_node_get(struct bgp_table *table, afi_t afi, - safi_t safi, struct prefix *p, + safi_t safi, const struct prefix *p, struct prefix_rd *prd) { struct bgp_node *rn; @@ -146,7 +146,7 @@ struct bgp_node *bgp_afi_node_get(struct bgp_table *table, afi_t afi, } struct bgp_node *bgp_afi_node_lookup(struct bgp_table *table, afi_t afi, - safi_t safi, struct prefix *p, + safi_t safi, const struct prefix *p, struct prefix_rd *prd) { struct bgp_node *rn; @@ -303,7 +303,6 @@ static int bgp_node_set_defer_flag(struct bgp_node *rn, bool delete) struct bgp_table *table = NULL; afi_t afi = 0; safi_t safi = 0; - char buf[PREFIX2STR_BUFFER]; /* If the flag BGP_NODE_SELECT_DEFER is set and new path is added * then the route selection is deferred @@ -312,12 +311,11 @@ static int bgp_node_set_defer_flag(struct bgp_node *rn, bool delete) return 0; if (CHECK_FLAG(rn->flags, BGP_NODE_PROCESS_SCHEDULED)) { - if (BGP_DEBUG(update, UPDATE_OUT)) { - prefix2str(&rn->p, buf, PREFIX2STR_BUFFER); + if (BGP_DEBUG(update, UPDATE_OUT)) zlog_debug( - "Route %s is in workqueue and being processed, not deferred.", - buf); - } + "Route %pRN is in workqueue and being processed, not deferred.", + rn); + return 0; } @@ -361,13 +359,12 @@ static int bgp_node_set_defer_flag(struct bgp_node *rn, bool delete) if (set_flag && table) { if (bgp && (bgp->gr_info[afi][safi].t_select_deferral)) { SET_FLAG(rn->flags, BGP_NODE_SELECT_DEFER); - prefix2str(&rn->p, buf, PREFIX2STR_BUFFER); if (rn->rt_node == NULL) rn->rt_node = listnode_add( bgp->gr_info[afi][safi].route_list, rn); if (BGP_DEBUG(update, UPDATE_OUT)) - zlog_debug("DEFER route %s, rn %p, node %p", - buf, rn, rn->rt_node); + zlog_debug("DEFER route %pRN, rn %p, node %p", + rn, rn, rn->rt_node); return 0; } } @@ -594,7 +591,8 @@ static int bgp_path_info_cmp(struct bgp *bgp, struct bgp_path_info *new, */ if (newattr->sticky != existattr->sticky) { if (!debug) { - prefix2str(&new->net->p, pfx_buf, + prefix2str(bgp_node_get_prefix(new->net), + pfx_buf, sizeof(*pfx_buf) * PREFIX2STR_BUFFER); bgp_path_info_path_with_addpath_rx_str(new, @@ -1202,7 +1200,8 @@ int bgp_path_info_cmp_compatible(struct bgp *bgp, struct bgp_path_info *new, return ret; } -static enum filter_type bgp_input_filter(struct peer *peer, struct prefix *p, +static enum filter_type bgp_input_filter(struct peer *peer, + const struct prefix *p, struct attr *attr, afi_t afi, safi_t safi) { @@ -1241,7 +1240,8 @@ static enum filter_type bgp_input_filter(struct peer *peer, struct prefix *p, #undef FILTER_EXIST_WARN } -static enum filter_type bgp_output_filter(struct peer *peer, struct prefix *p, +static enum filter_type bgp_output_filter(struct peer *peer, + const struct prefix *p, struct attr *attr, afi_t afi, safi_t safi) { @@ -1282,30 +1282,30 @@ static enum filter_type bgp_output_filter(struct peer *peer, struct prefix *p, } /* If community attribute includes no_export then return 1. */ -static int bgp_community_filter(struct peer *peer, struct attr *attr) +static bool bgp_community_filter(struct peer *peer, struct attr *attr) { if (attr->community) { /* NO_ADVERTISE check. */ if (community_include(attr->community, COMMUNITY_NO_ADVERTISE)) - return 1; + return true; /* NO_EXPORT check. */ if (peer->sort == BGP_PEER_EBGP && community_include(attr->community, COMMUNITY_NO_EXPORT)) - return 1; + return true; /* NO_EXPORT_SUBCONFED check. */ if (peer->sort == BGP_PEER_EBGP || peer->sort == BGP_PEER_CONFED) if (community_include(attr->community, COMMUNITY_NO_EXPORT_SUBCONFED)) - return 1; + return true; } - return 0; + return false; } /* Route reflection loop check. */ -static int bgp_cluster_filter(struct peer *peer, struct attr *attr) +static bool bgp_cluster_filter(struct peer *peer, struct attr *attr) { struct in_addr cluster_id; @@ -1316,12 +1316,12 @@ static int bgp_cluster_filter(struct peer *peer, struct attr *attr) cluster_id = peer->bgp->router_id; if (cluster_loop_check(attr->cluster, cluster_id)) - return 1; + return true; } - return 0; + return false; } -static int bgp_input_modifier(struct peer *peer, struct prefix *p, +static int bgp_input_modifier(struct peer *peer, const struct prefix *p, struct attr *attr, afi_t afi, safi_t safi, const char *rmap_name, mpls_label_t *label, uint32_t num_labels, struct bgp_node *rn) @@ -1379,7 +1379,7 @@ static int bgp_input_modifier(struct peer *peer, struct prefix *p, return RMAP_PERMIT; } -static int bgp_output_modifier(struct peer *peer, struct prefix *p, +static int bgp_output_modifier(struct peer *peer, const struct prefix *p, struct attr *attr, afi_t afi, safi_t safi, const char *rmap_name) { @@ -1543,9 +1543,9 @@ static void subgroup_announce_reset_nhop(uint8_t family, struct attr *attr) memset(&attr->mp_nexthop_global_in, 0, BGP_ATTR_NHLEN_IPV4); } -int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, - struct update_subgroup *subgrp, struct prefix *p, - struct attr *attr) +bool subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, + struct update_subgroup *subgrp, + const struct prefix *p, struct attr *attr) { struct bgp_filter *filter; struct peer *from; @@ -1560,9 +1560,11 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, afi_t afi; safi_t safi; int samepeer_safe = 0; /* for synthetic mplsvpns routes */ + bool nh_reset = false; + uint64_t cum_bw; if (DISABLE_BGP_ANNOUNCE) - return 0; + return false; afi = SUBGRP_AFI(subgrp); safi = SUBGRP_SAFI(subgrp); @@ -1577,7 +1579,7 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, piattr = bgp_path_info_mpath_count(pi) ? bgp_path_info_mpath_attr(pi) : pi->attr; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (((afi == AFI_IP) || (afi == AFI_IP6)) && (safi == SAFI_MPLS_VPN) && ((pi->type == ZEBRA_ROUTE_BGP_DIRECT) || (pi->type == ZEBRA_ROUTE_BGP_DIRECT_EXT))) { @@ -1609,7 +1611,7 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, if (!CHECK_FLAG(pi->flags, BGP_PATH_VALID) || CHECK_FLAG(pi->flags, BGP_PATH_HISTORY) || CHECK_FLAG(pi->flags, BGP_PATH_REMOVED)) { - return 0; + return false; } /* If this is not the bestpath then check to see if there is an enabled @@ -1617,14 +1619,14 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, * feature that requires us to advertise it */ if (!CHECK_FLAG(pi->flags, BGP_PATH_SELECTED)) { if (!bgp_addpath_tx_path(peer->addpath_type[afi][safi], pi)) { - return 0; + return false; } } /* Aggregate-address suppress check. */ if (pi->extra && pi->extra->suppress) if (!UNSUPPRESS_MAP_NAME(filter)) { - return 0; + return false; } /* @@ -1635,7 +1637,7 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, */ if (safi == SAFI_MPLS_VPN && pi->extra && pi->extra->num_labels && pi->extra->label[0] == BGP_PREVENT_VRF_2_VRF_LEAK) - return 0; + return false; /* If it's labeled safi, make sure the route has a valid label. */ if (safi == SAFI_LABELED_UNICAST) { @@ -1648,13 +1650,13 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, inet_ntop(p->family, &p->u.prefix, buf, SU_ADDRSTRLEN), p->prefixlen, &label); - return 0; + return false; } } /* Do not send back route to sender. */ if (onlypeer && from == onlypeer) { - return 0; + return false; } /* Do not send the default route in the BGP table if the neighbor is @@ -1662,9 +1664,9 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_DEFAULT_ORIGINATE)) { if (p->family == AF_INET && p->u.prefix4.s_addr == INADDR_ANY) - return 0; + return false; else if (p->family == AF_INET6 && p->prefixlen == 0) - return 0; + return false; } /* Transparency check. */ @@ -1679,7 +1681,7 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, if (bgp_debug_update(NULL, p, subgrp->update_group, 0)) zlog_debug( "subgrpannouncecheck: community filter check fail"); - return 0; + return false; } /* If the attribute has originator-id and it is same as remote @@ -1692,7 +1694,7 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, "remote router-id", onlypeer->host, prefix2str(p, buf, sizeof(buf))); - return 0; + return false; } /* ORF prefix-list filter check */ @@ -1710,7 +1712,7 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, peer->host, prefix2str(p, buf, sizeof(buf))); - return 0; + return false; } } @@ -1719,7 +1721,7 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, if (bgp_debug_update(NULL, p, subgrp->update_group, 0)) zlog_debug("%s [Update:SEND] %s is filtered", peer->host, prefix2str(p, buf, sizeof(buf))); - return 0; + return false; } /* AS path loop check. */ @@ -1730,7 +1732,7 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, "%s [Update:SEND] suppress announcement to peer AS %u " "that is part of AS path.", onlypeer->host, onlypeer->as); - return 0; + return false; } /* If we're a CONFED we need to loop check the CONFED ID too */ @@ -1741,7 +1743,7 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, "%s [Update:SEND] suppress announcement to peer AS %u" " is AS path.", peer->host, bgp->confed_id); - return 0; + return false; } } @@ -1765,13 +1767,13 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, BGP_FLAG_NO_CLIENT_TO_CLIENT)) if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_REFLECTOR_CLIENT)) - return 0; + return false; } else { /* A route from a Non-client peer. Reflect to all other clients. */ if (!CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_REFLECTOR_CLIENT)) - return 0; + return false; } } @@ -1875,16 +1877,9 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, struct bgp_path_info_extra dummy_rmap_path_extra = {0}; struct attr dummy_attr = {0}; - memset(&rmap_path, 0, sizeof(struct bgp_path_info)); - rmap_path.peer = peer; - rmap_path.attr = attr; - rmap_path.net = rn; - - if (pi->extra) { - memcpy(&dummy_rmap_path_extra, pi->extra, - sizeof(struct bgp_path_info_extra)); - rmap_path.extra = &dummy_rmap_path_extra; - } + /* Fill temp path_info */ + prep_for_rmap_apply(&rmap_path, &dummy_rmap_path_extra, + rn, pi, peer, attr); /* don't confuse inbound and outbound setting */ RESET_FLAG(attr->rmap_change_flags); @@ -1917,7 +1912,7 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, peer->host, prefix2str(p, buf, sizeof(buf))); bgp_attr_flush(attr); - return 0; + return false; } } @@ -1933,7 +1928,7 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, if (peer->bgp->ebgp_requires_policy == DEFAULT_EBGP_POLICY_ENABLED) if (!bgp_outbound_policy_exists(peer, filter)) - return 0; + return false; /* draft-ietf-idr-deprecate-as-set-confed-set * Filter routes having AS_SET or AS_CONFED_SET in the path. @@ -1943,7 +1938,11 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, */ if (peer->bgp->reject_as_sets == BGP_REJECT_AS_SETS_ENABLED) if (aspath_check_as_sets(attr->aspath)) - return 0; + return false; + + /* Codification of AS 0 Processing */ + if (aspath_check_as_zero(attr->aspath)) + return 0; if (CHECK_FLAG(bgp->flags, BGP_FLAG_GRACEFUL_SHUTDOWN)) { if (peer->sort == BGP_PEER_IBGP @@ -1986,12 +1985,14 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, PEER_FLAG_FORCE_NEXTHOP_SELF)) { if (!reflect || CHECK_FLAG(peer->af_flags[afi][safi], - PEER_FLAG_FORCE_NEXTHOP_SELF)) + PEER_FLAG_FORCE_NEXTHOP_SELF)) { subgroup_announce_reset_nhop( (peer_cap_enhe(peer, afi, safi) ? AF_INET6 : p->family), attr); + nh_reset = true; + } } else if (peer->sort == BGP_PEER_EBGP) { /* Can also reset the nexthop if announcing to EBGP, but * only if @@ -2002,22 +2003,26 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, if ((p->family == AF_INET) && (!bgp_subgrp_multiaccess_check_v4( piattr->nexthop, - subgrp))) + subgrp, from))) { subgroup_announce_reset_nhop( (peer_cap_enhe(peer, afi, safi) ? AF_INET6 : p->family), attr); + nh_reset = true; + } if ((p->family == AF_INET6) && (!bgp_subgrp_multiaccess_check_v6( piattr->mp_nexthop_global, - subgrp))) + subgrp, from))) { subgroup_announce_reset_nhop( (peer_cap_enhe(peer, afi, safi) ? AF_INET6 : p->family), attr); + nh_reset = true; + } @@ -2035,6 +2040,7 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, "%s: BGP_PATH_ANNC_NH_SELF, family=%s", __func__, family2str(family)); subgroup_announce_reset_nhop(family, attr); + nh_reset = true; } } @@ -2047,11 +2053,26 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi, * the same interface. */ if (p->family == AF_INET6 || peer_cap_enhe(peer, afi, safi)) { - if (IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_global)) + if (IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_global)) { subgroup_announce_reset_nhop(AF_INET6, attr); + nh_reset = true; + } } - return 1; + /* + * When the next hop is set to ourselves, if all multipaths have + * link-bandwidth announce the cumulative bandwidth as that makes + * the most sense. However, don't modify if the link-bandwidth has + * been explicitly set by user policy. + */ + if (nh_reset && + bgp_path_info_mpath_chkwtd(bgp, pi) && + (cum_bw = bgp_path_info_mpath_cumbw(pi)) != 0 && + !CHECK_FLAG(attr->rmap_change_flags, BATTR_RMAP_LINK_BW_SET)) + attr->ecommunity = ecommunity_replace_linkbw( + bgp->as, attr->ecommunity, cum_bw); + + return true; } static int bgp_route_select_timer_expire(struct thread *thread) @@ -2098,11 +2119,12 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_node *rn, do_mpath = (mpath_cfg->maxpaths_ebgp > 1 || mpath_cfg->maxpaths_ibgp > 1); - debug = bgp_debug_bestpath(&rn->p); + debug = bgp_debug_bestpath(rn); if (debug) - prefix2str(&rn->p, pfx_buf, sizeof(pfx_buf)); + prefix2str(bgp_node_get_prefix(rn), pfx_buf, sizeof(pfx_buf)); + rn->reason = bgp_path_selection_none; /* bgp deterministic-med */ new_select = NULL; if (CHECK_FLAG(bgp->flags, BGP_FLAG_DETERMINISTIC_MED)) { @@ -2182,6 +2204,8 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_node *rn, new_select = NULL; for (pi = bgp_node_get_bgp_path_info(rn); (pi != NULL) && (nextpi = pi->next, 1); pi = nextpi) { + enum bgp_path_selection_reason reason; + if (CHECK_FLAG(pi->flags, BGP_PATH_SELECTED)) old_select = pi; @@ -2222,8 +2246,12 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_node *rn, bgp_path_info_unset_flag(rn, pi, BGP_PATH_DMED_CHECK); + reason = rn->reason; if (bgp_path_info_cmp(bgp, pi, new_select, &paths_eq, mpath_cfg, debug, pfx_buf, afi, safi, &rn->reason)) { + if (new_select == NULL && + reason != bgp_path_selection_none) + rn->reason = reason; new_select = pi; } } @@ -2309,18 +2337,18 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_node *rn, * A new route/change in bestpath of an existing route. Evaluate the path * for advertisement to the subgroup. */ -int subgroup_process_announce_selected(struct update_subgroup *subgrp, - struct bgp_path_info *selected, - struct bgp_node *rn, - uint32_t addpath_tx_id) +void subgroup_process_announce_selected(struct update_subgroup *subgrp, + struct bgp_path_info *selected, + struct bgp_node *rn, + uint32_t addpath_tx_id) { - struct prefix *p; + const struct prefix *p; struct peer *onlypeer; struct attr attr; afi_t afi; safi_t safi; - p = &rn->p; + p = bgp_node_get_prefix(rn); afi = SUBGRP_AFI(subgrp); safi = SUBGRP_SAFI(subgrp); onlypeer = ((SUBGRP_PCOUNT(subgrp) == 1) ? (SUBGRP_PFIRST(subgrp))->peer @@ -2336,7 +2364,7 @@ int subgroup_process_announce_selected(struct update_subgroup *subgrp, /* First update is deferred until ORF or ROUTE-REFRESH is received */ if (onlypeer && CHECK_FLAG(onlypeer->af_sflags[afi][safi], PEER_STATUS_ORF_WAIT_REFRESH)) - return 0; + return; memset(&attr, 0, sizeof(struct attr)); /* It's initialized in bgp_announce_check() */ @@ -2355,8 +2383,6 @@ int subgroup_process_announce_selected(struct update_subgroup *subgrp, else { bgp_adj_out_unset_subgroup(rn, subgrp, 1, addpath_tx_id); } - - return 0; } /* @@ -2380,8 +2406,8 @@ void bgp_zebra_clear_route_change_flags(struct bgp_node *rn) * if the route selection returns the same best route as earlier - to * determine if we need to update zebra or not. */ -int bgp_zebra_has_route_changed(struct bgp_node *rn, - struct bgp_path_info *selected) +bool bgp_zebra_has_route_changed(struct bgp_node *rn, + struct bgp_path_info *selected) { struct bgp_path_info *mpinfo; @@ -2392,8 +2418,9 @@ int bgp_zebra_has_route_changed(struct bgp_node *rn, * when the best path has an attribute change anyway. */ if (CHECK_FLAG(selected->flags, BGP_PATH_IGP_CHANGED) - || CHECK_FLAG(selected->flags, BGP_PATH_MULTIPATH_CHG)) - return 1; + || CHECK_FLAG(selected->flags, BGP_PATH_MULTIPATH_CHG) + || CHECK_FLAG(selected->flags, BGP_PATH_LINK_BW_CHG)) + return true; /* * If this is multipath, check all selected paths for any nexthop change @@ -2402,11 +2429,11 @@ int bgp_zebra_has_route_changed(struct bgp_node *rn, mpinfo = bgp_path_info_mpath_next(mpinfo)) { if (CHECK_FLAG(mpinfo->flags, BGP_PATH_IGP_CHANGED) || CHECK_FLAG(mpinfo->flags, BGP_PATH_ATTR_CHANGED)) - return 1; + return true; } /* Nothing has changed from the RIB's perspective. */ - return 0; + return false; } struct bgp_process_queue { @@ -2417,6 +2444,54 @@ struct bgp_process_queue { unsigned int queued; }; +static void bgp_process_evpn_route_injection(struct bgp *bgp, afi_t afi, + safi_t safi, struct bgp_node *rn, + struct bgp_path_info *new_select, + struct bgp_path_info *old_select) +{ + const struct prefix *p = bgp_node_get_prefix(rn); + + if ((afi != AFI_IP && afi != AFI_IP6) || (safi != SAFI_UNICAST)) + return; + + if (advertise_type5_routes(bgp, afi) && new_select + && is_route_injectable_into_evpn(new_select)) { + + /* apply the route-map */ + if (bgp->adv_cmd_rmap[afi][safi].map) { + route_map_result_t ret; + struct bgp_path_info rmap_path; + struct bgp_path_info_extra rmap_path_extra; + struct attr dummy_attr; + + dummy_attr = *new_select->attr; + + /* Fill temp path_info */ + prep_for_rmap_apply(&rmap_path, &rmap_path_extra, rn, + new_select, new_select->peer, + &dummy_attr); + + RESET_FLAG(dummy_attr.rmap_change_flags); + + ret = route_map_apply(bgp->adv_cmd_rmap[afi][safi].map, + p, RMAP_BGP, &rmap_path); + + if (ret == RMAP_DENYMATCH) { + bgp_attr_flush(&dummy_attr); + bgp_evpn_withdraw_type5_route(bgp, p, afi, + safi); + } else + bgp_evpn_advertise_type5_route( + bgp, p, &dummy_attr, afi, safi); + } else { + bgp_evpn_advertise_type5_route(bgp, p, new_select->attr, + afi, safi); + } + } else if (advertise_type5_routes(bgp, afi) && old_select + && is_route_injectable_into_evpn(old_select)) + bgp_evpn_withdraw_type5_route(bgp, p, afi, safi); +} + /* * old_select = The old best path * new_select = the new best path @@ -2442,18 +2517,15 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_node *rn, struct bgp_path_info *new_select; struct bgp_path_info *old_select; struct bgp_path_info_pair old_and_new; - char pfx_buf[PREFIX2STR_BUFFER]; int debug = 0; if (CHECK_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS)) { if (rn) - debug = bgp_debug_bestpath(&rn->p); - if (debug) { - prefix2str(&rn->p, pfx_buf, sizeof(pfx_buf)); + debug = bgp_debug_bestpath(rn); + if (debug) zlog_debug( - "%s: bgp delete in progress, ignoring event, p=%s", - __func__, pfx_buf); - } + "%s: bgp delete in progress, ignoring event, p=%pRN", + __func__, rn); return; } /* Is it end of initial update? (after startup) */ @@ -2472,21 +2544,19 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_node *rn, return; } - struct prefix *p = &rn->p; + const struct prefix *p = bgp_node_get_prefix(rn); - debug = bgp_debug_bestpath(&rn->p); - if (debug) { - prefix2str(&rn->p, pfx_buf, sizeof(pfx_buf)); - zlog_debug("%s: p=%s afi=%s, safi=%s start", __func__, pfx_buf, + debug = bgp_debug_bestpath(rn); + if (debug) + zlog_debug("%s: p=%pRN afi=%s, safi=%s start", __func__, rn, afi2str(afi), safi2str(safi)); - } /* The best path calculation for the route is deferred if * BGP_NODE_SELECT_DEFER is set */ if (CHECK_FLAG(rn->flags, BGP_NODE_SELECT_DEFER)) { if (BGP_DEBUG(update, UPDATE_OUT)) - zlog_debug("SELECT_DEFER falg set for route %p", rn); + zlog_debug("SELECT_DEFER flag set for route %p", rn); return; } @@ -2536,13 +2606,11 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_node *rn, bgp_unregister_for_label(rn); } - if (debug) { - prefix2str(&rn->p, pfx_buf, sizeof(pfx_buf)); + if (debug) zlog_debug( - "%s: p=%s afi=%s, safi=%s, old_select=%p, new_select=%p", - __func__, pfx_buf, afi2str(afi), safi2str(safi), + "%s: p=%pRN afi=%s, safi=%s, old_select=%p, new_select=%p", + __func__, rn, afi2str(afi), safi2str(safi), old_select, new_select); - } /* If best route remains the same and this is not due to user-initiated * clear, see exactly what needs to be done. @@ -2552,7 +2620,7 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_node *rn, && !CHECK_FLAG(old_select->flags, BGP_PATH_ATTR_CHANGED) && !bgp_addpath_is_addpath_used(&bgp->tx_addpath, afi, safi)) { if (bgp_zebra_has_route_changed(rn, old_select)) { -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC vnc_import_bgp_add_route(bgp, p, old_select); vnc_import_bgp_exterior_add_route(bgp, p, old_select); #endif @@ -2568,12 +2636,11 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_node *rn, bgp, afi, safi); } } - UNSET_FLAG(old_select->flags, BGP_PATH_MULTIPATH_CHG); - bgp_zebra_clear_route_change_flags(rn); /* If there is a change of interest to peers, reannounce the * route. */ if (CHECK_FLAG(old_select->flags, BGP_PATH_ATTR_CHANGED) + || CHECK_FLAG(old_select->flags, BGP_PATH_LINK_BW_CHG) || CHECK_FLAG(rn->flags, BGP_NODE_LABEL_CHANGED)) { group_announce_route(bgp, afi, safi, rn, new_select); @@ -2588,6 +2655,15 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_node *rn, UNSET_FLAG(rn->flags, BGP_NODE_LABEL_CHANGED); } + /* advertise/withdraw type-5 routes */ + if (CHECK_FLAG(old_select->flags, BGP_PATH_LINK_BW_CHG) + || CHECK_FLAG(old_select->flags, BGP_PATH_MULTIPATH_CHG)) + bgp_process_evpn_route_injection( + bgp, afi, safi, rn, old_select, old_select); + + UNSET_FLAG(old_select->flags, BGP_PATH_MULTIPATH_CHG); + UNSET_FLAG(old_select->flags, BGP_PATH_LINK_BW_CHG); + bgp_zebra_clear_route_change_flags(rn); UNSET_FLAG(rn->flags, BGP_NODE_PROCESS_SCHEDULED); return; } @@ -2618,9 +2694,10 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_node *rn, bgp_path_info_set_flag(rn, new_select, BGP_PATH_SELECTED); bgp_path_info_unset_flag(rn, new_select, BGP_PATH_ATTR_CHANGED); UNSET_FLAG(new_select->flags, BGP_PATH_MULTIPATH_CHG); + UNSET_FLAG(new_select->flags, BGP_PATH_LINK_BW_CHG); } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) { if (old_select != new_select) { if (old_select) { @@ -2673,38 +2750,8 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_node *rn, } } - /* advertise/withdraw type-5 routes */ - if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) { - if (advertise_type5_routes(bgp, afi) && - new_select && - is_route_injectable_into_evpn(new_select)) { - - /* apply the route-map */ - if (bgp->adv_cmd_rmap[afi][safi].map) { - route_map_result_t ret; - - ret = route_map_apply( - bgp->adv_cmd_rmap[afi][safi].map, - &rn->p, RMAP_BGP, new_select); - if (ret == RMAP_PERMITMATCH) - bgp_evpn_advertise_type5_route( - bgp, &rn->p, new_select->attr, - afi, safi); - else - bgp_evpn_withdraw_type5_route( - bgp, &rn->p, afi, safi); - } else { - bgp_evpn_advertise_type5_route(bgp, - &rn->p, - new_select->attr, - afi, safi); - - } - } else if (advertise_type5_routes(bgp, afi) && - old_select && - is_route_injectable_into_evpn(old_select)) - bgp_evpn_withdraw_type5_route(bgp, &rn->p, afi, safi); - } + bgp_process_evpn_route_injection(bgp, afi, safi, rn, new_select, + old_select); /* Clear any route change flags. */ bgp_zebra_clear_route_change_flags(rn); @@ -2930,20 +2977,20 @@ static int bgp_maximum_prefix_restart_timer(struct thread *thread) return 0; } -int bgp_maximum_prefix_overflow(struct peer *peer, afi_t afi, safi_t safi, - int always) +bool bgp_maximum_prefix_overflow(struct peer *peer, afi_t afi, safi_t safi, + int always) { iana_afi_t pkt_afi; iana_safi_t pkt_safi; if (!CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX)) - return 0; + return false; if (peer->pcount[afi][safi] > peer->pmax[afi][safi]) { if (CHECK_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_PREFIX_LIMIT) && !always) - return 0; + return false; zlog_info( "%%MAXPFXEXCEED: No. of %s prefix received from %s %" PRIu32 @@ -2954,7 +3001,7 @@ int bgp_maximum_prefix_overflow(struct peer *peer, afi_t afi, safi_t safi, if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_WARNING)) - return 0; + return false; /* Convert AFI, SAFI to values for packet. */ pkt_afi = afi_int2iana(afi); @@ -2978,7 +3025,7 @@ int bgp_maximum_prefix_overflow(struct peer *peer, afi_t afi, safi_t safi, /* Dynamic peers will just close their connection. */ if (peer_dynamic_neighbor(peer)) - return 1; + return true; /* restart timer start */ if (peer->pmax_restart[afi][safi]) { @@ -2995,7 +3042,7 @@ int bgp_maximum_prefix_overflow(struct peer *peer, afi_t afi, safi_t safi, peer->v_pmax_restart); } - return 1; + return true; } else UNSET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_PREFIX_LIMIT); @@ -3005,7 +3052,7 @@ int bgp_maximum_prefix_overflow(struct peer *peer, afi_t afi, safi_t safi, if (CHECK_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_PREFIX_THRESHOLD) && !always) - return 0; + return false; zlog_info( "%%MAXPFX: No. of %s prefix received from %s reaches %" PRIu32 @@ -3017,7 +3064,7 @@ int bgp_maximum_prefix_overflow(struct peer *peer, afi_t afi, safi_t safi, } else UNSET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_PREFIX_THRESHOLD); - return 0; + return false; } /* Unconditionally remove the route from the RIB, without taking @@ -3030,7 +3077,8 @@ void bgp_rib_remove(struct bgp_node *rn, struct bgp_path_info *pi, struct bgp *bgp = NULL; bool delete_route = false; - bgp_aggregate_decrement(peer->bgp, &rn->p, pi, afi, safi); + bgp_aggregate_decrement(peer->bgp, bgp_node_get_prefix(rn), + pi, afi, safi); if (!CHECK_FLAG(pi->flags, BGP_PATH_HISTORY)) { bgp_path_info_delete(rn, pi); /* keep historical info */ @@ -3065,6 +3113,8 @@ static void bgp_rib_withdraw(struct bgp_node *rn, struct bgp_path_info *pi, struct peer *peer, afi_t afi, safi_t safi, struct prefix_rd *prd) { + const struct prefix *p = bgp_node_get_prefix(rn); + /* apply dampening, if result is suppressed, we'll be retaining * the bgp_path_info in the RIB for historical reference. */ @@ -3072,12 +3122,12 @@ static void bgp_rib_withdraw(struct bgp_node *rn, struct bgp_path_info *pi, && peer->sort == BGP_PEER_EBGP) if ((bgp_damp_withdraw(pi, rn, afi, safi, 0)) == BGP_DAMP_SUPPRESSED) { - bgp_aggregate_decrement(peer->bgp, &rn->p, pi, afi, + bgp_aggregate_decrement(peer->bgp, p, pi, afi, safi); return; } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (safi == SAFI_MPLS_VPN) { struct bgp_node *prn = NULL; struct bgp_table *table = NULL; @@ -3088,23 +3138,22 @@ static void bgp_rib_withdraw(struct bgp_node *rn, struct bgp_path_info *pi, table = bgp_node_get_bgp_table_info(prn); vnc_import_bgp_del_vnc_host_route_mode_resolve_nve( - peer->bgp, prd, table, &rn->p, pi); + peer->bgp, prd, table, p, pi); } bgp_unlock_node(prn); } if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) { if (CHECK_FLAG(pi->flags, BGP_PATH_SELECTED)) { - vnc_import_bgp_del_route(peer->bgp, &rn->p, pi); - vnc_import_bgp_exterior_del_route(peer->bgp, &rn->p, - pi); + vnc_import_bgp_del_route(peer->bgp, p, pi); + vnc_import_bgp_exterior_del_route(peer->bgp, p, pi); } } #endif /* If this is an EVPN route, process for un-import. */ if (safi == SAFI_EVPN) - bgp_evpn_unimport_route(peer->bgp, afi, safi, &rn->p, pi); + bgp_evpn_unimport_route(peer->bgp, afi, safi, p, pi); bgp_rib_remove(rn, pi, peer, afi, safi); } @@ -3186,23 +3235,23 @@ static bool overlay_index_equal(afi_t afi, struct bgp_path_info *path, } /* Check if received nexthop is valid or not. */ -static int bgp_update_martian_nexthop(struct bgp *bgp, afi_t afi, safi_t safi, - uint8_t type, uint8_t stype, - struct attr *attr, struct bgp_node *rn) +static bool bgp_update_martian_nexthop(struct bgp *bgp, afi_t afi, safi_t safi, + uint8_t type, uint8_t stype, + struct attr *attr, struct bgp_node *rn) { - int ret = 0; + bool ret = false; /* Only validated for unicast and multicast currently. */ /* Also valid for EVPN where the nexthop is an IP address. */ if (safi != SAFI_UNICAST && safi != SAFI_MULTICAST && safi != SAFI_EVPN) - return 0; + return false; /* If NEXT_HOP is present, validate it. */ if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)) { if (attr->nexthop.s_addr == INADDR_ANY || IPV4_CLASS_DE(ntohl(attr->nexthop.s_addr)) || bgp_nexthop_self(bgp, afi, type, stype, attr, rn)) - return 1; + return true; } /* If MP_NEXTHOP is present, validate it. */ @@ -3233,7 +3282,7 @@ static int bgp_update_martian_nexthop(struct bgp *bgp, afi_t afi, safi_t safi, break; default: - ret = 1; + ret = true; break; } } @@ -3241,7 +3290,7 @@ static int bgp_update_martian_nexthop(struct bgp *bgp, afi_t afi, safi_t safi, return ret; } -int bgp_update(struct peer *peer, struct prefix *p, uint32_t addpath_id, +int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, struct attr *attr, afi_t afi, safi_t safi, int type, int sub_type, struct prefix_rd *prd, mpls_label_t *label, uint32_t num_labels, int soft_reconfig, @@ -3265,7 +3314,7 @@ int bgp_update(struct peer *peer, struct prefix *p, uint32_t addpath_id, uint8_t pi_type = 0; uint8_t pi_sub_type = 0; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC int vnc_implicit_withdraw = 0; #endif int same_attr = 0; @@ -3565,7 +3614,7 @@ int bgp_update(struct peer *peer, struct prefix *p, uint32_t addpath_id, if (!CHECK_FLAG(pi->flags, BGP_PATH_HISTORY)) bgp_damp_withdraw(pi, rn, afi, safi, 1); } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (safi == SAFI_MPLS_VPN) { struct bgp_node *prn = NULL; struct bgp_table *table = NULL; @@ -3655,7 +3704,7 @@ int bgp_update(struct peer *peer, struct prefix *p, uint32_t addpath_id, } } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) { if (vnc_implicit_withdraw) { @@ -3738,7 +3787,7 @@ int bgp_update(struct peer *peer, struct prefix *p, uint32_t addpath_id, } else bgp_path_info_set_flag(rn, pi, BGP_PATH_VALID); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (safi == SAFI_MPLS_VPN) { struct bgp_node *prn = NULL; struct bgp_table *table = NULL; @@ -3789,7 +3838,7 @@ int bgp_update(struct peer *peer, struct prefix *p, uint32_t addpath_id, vpn_leak_to_vrf_update(bgp, pi); } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (SAFI_MPLS_VPN == safi) { mpls_label_t label_decoded = decode_label(label); @@ -3898,7 +3947,7 @@ int bgp_update(struct peer *peer, struct prefix *p, uint32_t addpath_id, /* route_node_get lock */ bgp_unlock_node(rn); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (safi == SAFI_MPLS_VPN) { struct bgp_node *prn = NULL; struct bgp_table *table = NULL; @@ -3938,7 +3987,7 @@ int bgp_update(struct peer *peer, struct prefix *p, uint32_t addpath_id, vpn_leak_to_vrf_update(bgp, new); } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (SAFI_MPLS_VPN == safi) { mpls_label_t label_decoded = decode_label(label); @@ -3995,7 +4044,7 @@ filtered: bgp_unlock_node(rn); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC /* * Filtered update is treated as an implicit withdrawal (see * bgp_rib_remove() @@ -4010,7 +4059,7 @@ filtered: return 0; } -int bgp_withdraw(struct peer *peer, struct prefix *p, uint32_t addpath_id, +int bgp_withdraw(struct peer *peer, const struct prefix *p, uint32_t addpath_id, struct attr *attr, afi_t afi, safi_t safi, int type, int sub_type, struct prefix_rd *prd, mpls_label_t *label, uint32_t num_labels, struct bgp_route_evpn *evpn) @@ -4020,7 +4069,7 @@ int bgp_withdraw(struct peer *peer, struct prefix *p, uint32_t addpath_id, struct bgp_node *rn; struct bgp_path_info *pi; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if ((SAFI_MPLS_VPN == safi) || (SAFI_ENCAP == safi)) { rfapiProcessWithdraw(peer, NULL, p, prd, NULL, afi, safi, type, 0); @@ -4236,8 +4285,9 @@ static void bgp_soft_reconfig_table(struct peer *peer, afi_t afi, safi_t safi, else memset(&evpn, 0, sizeof(evpn)); - ret = bgp_update(peer, &rn->p, ain->addpath_rx_id, - ain->attr, afi, safi, ZEBRA_ROUTE_BGP, + ret = bgp_update(peer, bgp_node_get_prefix(rn), + ain->addpath_rx_id, ain->attr, + afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, prd, label_pnt, num_labels, 1, &evpn); @@ -4263,16 +4313,18 @@ void bgp_soft_reconfig_in(struct peer *peer, afi_t afi, safi_t safi) for (rn = bgp_table_top(peer->bgp->rib[afi][safi]); rn; rn = bgp_route_next(rn)) { table = bgp_node_get_bgp_table_info(rn); - if (table != NULL) { - struct prefix_rd prd; - prd.family = AF_UNSPEC; - prd.prefixlen = 64; - memcpy(&prd.val, rn->p.u.val, 8); + if (table == NULL) + continue; + + const struct prefix *p = bgp_node_get_prefix(rn); + struct prefix_rd prd; + + prd.family = AF_UNSPEC; + prd.prefixlen = 64; + memcpy(&prd.val, p->u.val, 8); - bgp_soft_reconfig_table(peer, afi, safi, table, - &prd); - } + bgp_soft_reconfig_table(peer, afi, safi, table, &prd); } } @@ -4311,7 +4363,8 @@ static wq_item_status bgp_clear_route_node(struct work_queue *wq, void *data) /* If this is an EVPN route, process for * un-import. */ if (safi == SAFI_EVPN) - bgp_evpn_unimport_route(bgp, afi, safi, &rn->p, + bgp_evpn_unimport_route(bgp, afi, safi, + bgp_node_get_prefix(rn), pi); /* Handle withdraw for VRF route-leaking and L3VPN */ if (SAFI_UNICAST == safi @@ -4514,7 +4567,7 @@ void bgp_clear_route_all(struct peer *peer) FOREACH_AFI_SAFI (afi, safi) bgp_clear_route(peer, afi, safi); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC rfapiProcessPeerDown(peer); #endif } @@ -4592,30 +4645,30 @@ void bgp_clear_stale_route(struct peer *peer, afi_t afi, safi_t safi) } } -int bgp_outbound_policy_exists(struct peer *peer, struct bgp_filter *filter) +bool bgp_outbound_policy_exists(struct peer *peer, struct bgp_filter *filter) { if (peer->sort == BGP_PEER_IBGP) - return 1; + return true; if (peer->sort == BGP_PEER_EBGP && (ROUTE_MAP_OUT_NAME(filter) || PREFIX_LIST_OUT_NAME(filter) || FILTER_LIST_OUT_NAME(filter) || DISTRIBUTE_OUT_NAME(filter))) - return 1; - return 0; + return true; + return false; } -int bgp_inbound_policy_exists(struct peer *peer, struct bgp_filter *filter) +bool bgp_inbound_policy_exists(struct peer *peer, struct bgp_filter *filter) { if (peer->sort == BGP_PEER_IBGP) - return 1; + return true; if (peer->sort == BGP_PEER_EBGP && (ROUTE_MAP_IN_NAME(filter) || PREFIX_LIST_IN_NAME(filter) || FILTER_LIST_IN_NAME(filter) || DISTRIBUTE_IN_NAME(filter))) - return 1; - return 0; + return true; + return false; } static void bgp_cleanup_table(struct bgp *bgp, struct bgp_table *table, @@ -4627,13 +4680,14 @@ static void bgp_cleanup_table(struct bgp *bgp, struct bgp_table *table, for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) for (pi = bgp_node_get_bgp_path_info(rn); pi; pi = next) { + const struct prefix *p = bgp_node_get_prefix(rn); + next = pi->next; /* Unimport EVPN routes from VRFs */ if (safi == SAFI_EVPN) bgp_evpn_unimport_route(bgp, AFI_L2VPN, - SAFI_EVPN, - &rn->p, pi); + SAFI_EVPN, p, pi); if (CHECK_FLAG(pi->flags, BGP_PATH_SELECTED) && pi->type == ZEBRA_ROUTE_BGP @@ -4642,8 +4696,7 @@ static void bgp_cleanup_table(struct bgp *bgp, struct bgp_table *table, || pi->sub_type == BGP_ROUTE_IMPORTED)) { if (bgp_fibupd_safi(safi)) - bgp_zebra_withdraw(&rn->p, pi, bgp, - safi); + bgp_zebra_withdraw(p, pi, bgp, safi); bgp_path_info_reap(rn, pi); } } @@ -4892,7 +4945,7 @@ static void bgp_static_free(struct bgp_static *bgp_static) XFREE(MTYPE_BGP_STATIC, bgp_static); } -void bgp_static_update(struct bgp *bgp, struct prefix *p, +void bgp_static_update(struct bgp *bgp, const struct prefix *p, struct bgp_static *bgp_static, afi_t afi, safi_t safi) { struct bgp_node *rn; @@ -4902,7 +4955,7 @@ void bgp_static_update(struct bgp *bgp, struct prefix *p, struct attr attr; struct attr *attr_new; route_map_result_t ret; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC int vnc_implicit_withdraw = 0; #endif @@ -4984,7 +5037,7 @@ void bgp_static_update(struct bgp *bgp, struct prefix *p, bgp_path_info_restore(rn, pi); else bgp_aggregate_decrement(bgp, p, pi, afi, safi); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) { if (CHECK_FLAG(pi->flags, BGP_PATH_SELECTED)) { @@ -5003,7 +5056,7 @@ void bgp_static_update(struct bgp *bgp, struct prefix *p, bgp_attr_unintern(&pi->attr); pi->attr = attr_new; pi->uptime = bgp_clock(); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) { if (vnc_implicit_withdraw) { @@ -5121,7 +5174,7 @@ void bgp_static_update(struct bgp *bgp, struct prefix *p, aspath_unintern(&attr.aspath); } -void bgp_static_withdraw(struct bgp *bgp, struct prefix *p, afi_t afi, +void bgp_static_withdraw(struct bgp *bgp, const struct prefix *p, afi_t afi, safi_t safi) { struct bgp_node *rn; @@ -5155,7 +5208,7 @@ void bgp_static_withdraw(struct bgp *bgp, struct prefix *p, afi_t afi, /* * Used for SAFI_MPLS_VPN and SAFI_ENCAP */ -static void bgp_static_withdraw_safi(struct bgp *bgp, struct prefix *p, +static void bgp_static_withdraw_safi(struct bgp *bgp, const struct prefix *p, afi_t afi, safi_t safi, struct prefix_rd *prd) { @@ -5172,7 +5225,7 @@ static void bgp_static_withdraw_safi(struct bgp *bgp, struct prefix *p, /* Withdraw static BGP route from routing table. */ if (pi) { -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC rfapiProcessWithdraw( pi->peer, NULL, p, prd, pi->attr, afi, safi, pi->type, 1); /* Kill, since it is an administrative change */ @@ -5190,7 +5243,7 @@ static void bgp_static_withdraw_safi(struct bgp *bgp, struct prefix *p, bgp_unlock_node(rn); } -static void bgp_static_update_safi(struct bgp *bgp, struct prefix *p, +static void bgp_static_update_safi(struct bgp *bgp, const struct prefix *p, struct bgp_static *bgp_static, afi_t afi, safi_t safi) { @@ -5199,7 +5252,7 @@ static void bgp_static_update_safi(struct bgp *bgp, struct prefix *p, struct attr *attr_new; struct attr attr = {0}; struct bgp_path_info *pi; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC mpls_label_t label = 0; #endif uint32_t num_labels = 0; @@ -5301,7 +5354,7 @@ static void bgp_static_update_safi(struct bgp *bgp, struct prefix *p, bgp_attr_unintern(&pi->attr); pi->attr = attr_new; pi->uptime = bgp_clock(); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (pi->extra) label = decode_label(&pi->extra->label[0]); #endif @@ -5314,7 +5367,7 @@ static void bgp_static_update_safi(struct bgp *bgp, struct prefix *p, && bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT) { vpn_leak_to_vrf_update(bgp, pi); } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC rfapiProcessUpdate(pi->peer, NULL, p, &bgp_static->prd, pi->attr, afi, safi, pi->type, pi->sub_type, &label); @@ -5335,7 +5388,7 @@ static void bgp_static_update_safi(struct bgp *bgp, struct prefix *p, new->extra->label[0] = bgp_static->label; new->extra->num_labels = num_labels; } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC label = decode_label(&bgp_static->label); #endif @@ -5354,7 +5407,7 @@ static void bgp_static_update_safi(struct bgp *bgp, struct prefix *p, && bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT) { vpn_leak_to_vrf_update(bgp, new); } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC rfapiProcessUpdate(new->peer, NULL, p, &bgp_static->prd, new->attr, afi, safi, new->type, new->sub_type, &label); #endif @@ -5524,13 +5577,14 @@ void bgp_static_add(struct bgp *bgp) bgp_static = bgp_node_get_bgp_static_info( rm); - bgp_static_update_safi(bgp, &rm->p, + bgp_static_update_safi(bgp, + bgp_node_get_prefix(rm), bgp_static, afi, safi); } } else { bgp_static_update( - bgp, &rn->p, + bgp, bgp_node_get_prefix(rn), bgp_node_get_bgp_static_info(rn), afi, safi); } @@ -5567,15 +5621,20 @@ void bgp_static_delete(struct bgp *bgp) continue; bgp_static_withdraw_safi( - bgp, &rm->p, AFI_IP, safi, - (struct prefix_rd *)&rn->p); + bgp, bgp_node_get_prefix(rm), + AFI_IP, safi, + (struct prefix_rd *) + bgp_node_get_prefix( + rn)); bgp_static_free(bgp_static); bgp_node_set_bgp_static_info(rn, NULL); bgp_unlock_node(rn); } } else { bgp_static = bgp_node_get_bgp_static_info(rn); - bgp_static_withdraw(bgp, &rn->p, afi, safi); + bgp_static_withdraw(bgp, + bgp_node_get_prefix(rn), + afi, safi); bgp_static_free(bgp_static); bgp_node_set_bgp_static_info(rn, NULL); bgp_unlock_node(rn); @@ -5609,13 +5668,15 @@ void bgp_static_redo_import_check(struct bgp *bgp) bgp_static = bgp_node_get_bgp_static_info( rm); - bgp_static_update_safi(bgp, &rm->p, + bgp_static_update_safi(bgp, + bgp_node_get_prefix(rm), bgp_static, afi, safi); } } else { bgp_static = bgp_node_get_bgp_static_info(rn); - bgp_static_update(bgp, &rn->p, bgp_static, afi, + bgp_static_update(bgp, bgp_node_get_prefix(rn), + bgp_static, afi, safi); } } @@ -5646,8 +5707,9 @@ static void bgp_purge_af_static_redist_routes(struct bgp *bgp, afi_t afi, || (pi->type != ZEBRA_ROUTE_BGP && pi->sub_type == BGP_ROUTE_REDISTRIBUTE))) { - bgp_aggregate_decrement(bgp, &rn->p, pi, afi, - safi); + bgp_aggregate_decrement(bgp, + bgp_node_get_prefix(rn), + pi, afi, safi); bgp_unlink_nexthop(pi); bgp_path_info_delete(rn, pi); bgp_process(bgp, rn, afi, safi); @@ -6005,11 +6067,11 @@ static void bgp_aggregate_free(struct bgp_aggregate *aggregate) XFREE(MTYPE_BGP_AGGREGATE, aggregate); } -static int bgp_aggregate_info_same(struct bgp_path_info *pi, uint8_t origin, - struct aspath *aspath, - struct community *comm, - struct ecommunity *ecomm, - struct lcommunity *lcomm) +static bool bgp_aggregate_info_same(struct bgp_path_info *pi, uint8_t origin, + struct aspath *aspath, + struct community *comm, + struct ecommunity *ecomm, + struct lcommunity *lcomm) { static struct aspath *ae = NULL; @@ -6017,37 +6079,34 @@ static int bgp_aggregate_info_same(struct bgp_path_info *pi, uint8_t origin, ae = aspath_empty(); if (!pi) - return 0; + return false; if (origin != pi->attr->origin) - return 0; + return false; if (!aspath_cmp(pi->attr->aspath, (aspath) ? aspath : ae)) - return 0; + return false; if (!community_cmp(pi->attr->community, comm)) - return 0; + return false; if (!ecommunity_cmp(pi->attr->ecommunity, ecomm)) - return 0; + return false; if (!lcommunity_cmp(pi->attr->lcommunity, lcomm)) - return 0; + return false; if (!CHECK_FLAG(pi->flags, BGP_PATH_VALID)) - return 0; + return false; - return 1; + return true; } -static void bgp_aggregate_install(struct bgp *bgp, afi_t afi, safi_t safi, - struct prefix *p, uint8_t origin, - struct aspath *aspath, - struct community *community, - struct ecommunity *ecommunity, - struct lcommunity *lcommunity, - uint8_t atomic_aggregate, - struct bgp_aggregate *aggregate) +static void bgp_aggregate_install( + struct bgp *bgp, afi_t afi, safi_t safi, const struct prefix *p, + uint8_t origin, struct aspath *aspath, struct community *community, + struct ecommunity *ecommunity, struct lcommunity *lcommunity, + uint8_t atomic_aggregate, struct bgp_aggregate *aggregate) { struct bgp_node *rn; struct bgp_table *table; @@ -6124,9 +6183,8 @@ static void bgp_aggregate_install(struct bgp *bgp, afi_t afi, safi_t safi, } /* Update an aggregate as routes are added/removed from the BGP table */ -void bgp_aggregate_route(struct bgp *bgp, struct prefix *p, - afi_t afi, safi_t safi, - struct bgp_aggregate *aggregate) +void bgp_aggregate_route(struct bgp *bgp, const struct prefix *p, afi_t afi, + safi_t safi, struct bgp_aggregate *aggregate) { struct bgp_table *table; struct bgp_node *top; @@ -6162,7 +6220,9 @@ void bgp_aggregate_route(struct bgp *bgp, struct prefix *p, top = bgp_node_get(table, p); for (rn = bgp_node_get(table, p); rn; rn = bgp_route_next_until(rn, top)) { - if (rn->p.prefixlen <= p->prefixlen) + const struct prefix *rn_p = bgp_node_get_prefix(rn); + + if (rn_p->prefixlen <= p->prefixlen) continue; match = 0; @@ -6296,8 +6356,8 @@ void bgp_aggregate_route(struct bgp *bgp, struct prefix *p, aggregate); } -void bgp_aggregate_delete(struct bgp *bgp, struct prefix *p, afi_t afi, - safi_t safi, struct bgp_aggregate *aggregate) +void bgp_aggregate_delete(struct bgp *bgp, const struct prefix *p, afi_t afi, + safi_t safi, struct bgp_aggregate *aggregate) { struct bgp_table *table; struct bgp_node *top; @@ -6311,7 +6371,9 @@ void bgp_aggregate_delete(struct bgp *bgp, struct prefix *p, afi_t afi, top = bgp_node_get(table, p); for (rn = bgp_node_get(table, p); rn; rn = bgp_route_next_until(rn, top)) { - if (rn->p.prefixlen <= p->prefixlen) + const struct prefix *rn_p = bgp_node_get_prefix(rn); + + if (rn_p->prefixlen <= p->prefixlen) continue; match = 0; @@ -6387,7 +6449,8 @@ void bgp_aggregate_delete(struct bgp *bgp, struct prefix *p, afi_t afi, bgp_unlock_node(top); } -static void bgp_add_route_to_aggregate(struct bgp *bgp, struct prefix *aggr_p, +static void bgp_add_route_to_aggregate(struct bgp *bgp, + const struct prefix *aggr_p, struct bgp_path_info *pinew, afi_t afi, safi_t safi, struct bgp_aggregate *aggregate) @@ -6493,7 +6556,7 @@ static void bgp_remove_route_from_aggregate(struct bgp *bgp, afi_t afi, safi_t safi, struct bgp_path_info *pi, struct bgp_aggregate *aggregate, - struct prefix *aggr_p) + const struct prefix *aggr_p) { uint8_t origin; struct aspath *aspath = NULL; @@ -6597,7 +6660,7 @@ static void bgp_remove_route_from_aggregate(struct bgp *bgp, afi_t afi, lcommunity, atomic_aggregate, aggregate); } -void bgp_aggregate_increment(struct bgp *bgp, struct prefix *p, +void bgp_aggregate_increment(struct bgp *bgp, const struct prefix *p, struct bgp_path_info *pi, afi_t afi, safi_t safi) { struct bgp_node *child; @@ -6621,16 +6684,18 @@ void bgp_aggregate_increment(struct bgp *bgp, struct prefix *p, /* Aggregate address configuration check. */ for (rn = child; rn; rn = bgp_node_parent_nolock(rn)) { + const struct prefix *rn_p = bgp_node_get_prefix(rn); + aggregate = bgp_node_get_bgp_aggregate_info(rn); - if (aggregate != NULL && rn->p.prefixlen < p->prefixlen) { - bgp_add_route_to_aggregate(bgp, &rn->p, pi, afi, - safi, aggregate); + if (aggregate != NULL && rn_p->prefixlen < p->prefixlen) { + bgp_add_route_to_aggregate(bgp, rn_p, pi, afi, safi, + aggregate); } } bgp_unlock_node(child); } -void bgp_aggregate_decrement(struct bgp *bgp, struct prefix *p, +void bgp_aggregate_decrement(struct bgp *bgp, const struct prefix *p, struct bgp_path_info *del, afi_t afi, safi_t safi) { struct bgp_node *child; @@ -6651,10 +6716,12 @@ void bgp_aggregate_decrement(struct bgp *bgp, struct prefix *p, /* Aggregate address configuration check. */ for (rn = child; rn; rn = bgp_node_parent_nolock(rn)) { + const struct prefix *rn_p = bgp_node_get_prefix(rn); + aggregate = bgp_node_get_bgp_aggregate_info(rn); - if (aggregate != NULL && rn->p.prefixlen < p->prefixlen) { - bgp_remove_route_from_aggregate(bgp, afi, safi, - del, aggregate, &rn->p); + if (aggregate != NULL && rn_p->prefixlen < p->prefixlen) { + bgp_remove_route_from_aggregate(bgp, afi, safi, del, + aggregate, rn_p); } } bgp_unlock_node(child); @@ -6828,7 +6895,7 @@ static int bgp_aggregate_set(struct vty *vty, const char *prefix_str, afi_t afi, if (as_set == AGGREGATE_AS_SET) { as_set_new = AGGREGATE_AS_UNSET; zlog_warn( - "%s: Ignoring as-set because `bgp reject-as-sets` is enabled.\n", + "%s: Ignoring as-set because `bgp reject-as-sets` is enabled.", __func__); vty_out(vty, "Ignoring as-set because `bgp reject-as-sets` is enabled.\n"); @@ -7317,8 +7384,8 @@ void bgp_redistribute_withdraw(struct bgp *bgp, afi_t afi, int type, vpn_leak_from_vrf_withdraw(bgp_get_default(), bgp, pi); } - bgp_aggregate_decrement(bgp, &rn->p, pi, afi, - SAFI_UNICAST); + bgp_aggregate_decrement(bgp, bgp_node_get_prefix(rn), + pi, afi, SAFI_UNICAST); bgp_path_info_delete(rn, pi); bgp_process(bgp, rn, afi, SAFI_UNICAST); } @@ -7326,7 +7393,7 @@ void bgp_redistribute_withdraw(struct bgp *bgp, afi_t afi, int type, } /* Static function to display route. */ -static void route_vty_out_route(struct prefix *p, struct vty *vty, +static void route_vty_out_route(const struct prefix *p, struct vty *vty, json_object *json) { int len = 0; @@ -7483,7 +7550,7 @@ static char *bgp_nexthop_hostname(struct peer *peer, struct attr *attr) } /* called from terminal list command */ -void route_vty_out(struct vty *vty, struct prefix *p, +void route_vty_out(struct vty *vty, const struct prefix *p, struct bgp_path_info *path, int display, safi_t safi, json_object *json_paths) { @@ -7789,32 +7856,19 @@ void route_vty_out(struct vty *vty, struct prefix *p, /* MED/Metric */ if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC)) - if (json_paths) { - - /* - * Adding "metric" field to match with corresponding - * CLI. "med" will be deprecated in future. - */ - json_object_int_add(json_path, "med", attr->med); + if (json_paths) json_object_int_add(json_path, "metric", attr->med); - } else + else vty_out(vty, "%10u", attr->med); else if (!json_paths) vty_out(vty, " "); /* Local Pref */ if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)) - if (json_paths) { - - /* - * Adding "locPrf" field to match with corresponding - * CLI. "localPref" will be deprecated in future. - */ - json_object_int_add(json_path, "localpref", - attr->local_pref); + if (json_paths) json_object_int_add(json_path, "locPrf", - attr->local_pref); - } else + attr->local_pref); + else vty_out(vty, "%7u", attr->local_pref); else if (!json_paths) vty_out(vty, " "); @@ -7833,17 +7887,10 @@ void route_vty_out(struct vty *vty, struct prefix *p, /* Print aspath */ if (attr->aspath) { - if (json_paths) { - - /* - * Adding "path" field to match with corresponding - * CLI. "aspath" will be deprecated in future. - */ - json_object_string_add(json_path, "aspath", - attr->aspath->str); + if (json_paths) json_object_string_add(json_path, "path", - attr->aspath->str); - } else + attr->aspath->str); + else aspath_print_vty(vty, "%s", attr->aspath, " "); } @@ -7906,7 +7953,7 @@ void route_vty_out(struct vty *vty, struct prefix *p, vty_out(vty, "%s\n", attr->ecommunity->str); } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC /* prints an additional line, indented, with VNC info, if * present */ if ((safi == SAFI_MPLS_VPN) || (safi == SAFI_ENCAP)) @@ -7916,8 +7963,9 @@ void route_vty_out(struct vty *vty, struct prefix *p, } /* called from terminal list command */ -void route_vty_out_tmp(struct vty *vty, struct prefix *p, struct attr *attr, - safi_t safi, bool use_json, json_object *json_ar) +void route_vty_out_tmp(struct vty *vty, const struct prefix *p, + struct attr *attr, safi_t safi, bool use_json, + json_object *json_ar) { json_object *json_status = NULL; json_object *json_net = NULL; @@ -7985,34 +8033,16 @@ void route_vty_out_tmp(struct vty *vty, struct prefix *p, struct attr *attr, json_object_int_add(json_net, "metric", attr->med); - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)) { - - /* - * Adding "locPrf" field to match with - * corresponding CLI. "localPref" will be - * deprecated in future. - */ - json_object_int_add(json_net, "localPref", - attr->local_pref); + if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)) json_object_int_add(json_net, "locPrf", - attr->local_pref); - } + attr->local_pref); json_object_int_add(json_net, "weight", attr->weight); /* Print aspath */ - if (attr->aspath) { - - /* - * Adding "path" field to match with - * corresponding CLI. "localPref" will be - * deprecated in future. - */ - json_object_string_add(json_net, "asPath", - attr->aspath->str); + if (attr->aspath) json_object_string_add(json_net, "path", - attr->aspath->str); - } + attr->aspath->str); /* Print origin */ json_object_string_add(json_net, "bgpOriginCode", @@ -8079,7 +8109,7 @@ void route_vty_out_tmp(struct vty *vty, struct prefix *p, struct attr *attr, vty_out(vty, "\n"); } -void route_vty_out_tag(struct vty *vty, struct prefix *p, +void route_vty_out_tag(struct vty *vty, const struct prefix *p, struct bgp_path_info *path, int display, safi_t safi, json_object *json) { @@ -8172,7 +8202,7 @@ void route_vty_out_tag(struct vty *vty, struct prefix *p, } } -void route_vty_out_overlay(struct vty *vty, struct prefix *p, +void route_vty_out_overlay(struct vty *vty, const struct prefix *p, struct bgp_path_info *path, int display, json_object *json_paths) { @@ -8277,7 +8307,7 @@ void route_vty_out_overlay(struct vty *vty, struct prefix *p, mac = ecom_mac2str((char *)routermac->val); if (mac) { if (!json_path) { - vty_out(vty, "/%s", (char *)mac); + vty_out(vty, "/%s", mac); } else { json_object_string_add(json_overlay, "rmac", mac); @@ -8296,9 +8326,10 @@ void route_vty_out_overlay(struct vty *vty, struct prefix *p, } /* dampening route */ -static void damp_route_vty_out(struct vty *vty, struct prefix *p, - struct bgp_path_info *path, int display, afi_t afi, - safi_t safi, bool use_json, json_object *json) +static void damp_route_vty_out(struct vty *vty, const struct prefix *p, + struct bgp_path_info *path, int display, + afi_t afi, safi_t safi, bool use_json, + json_object *json) { struct attr *attr; int len; @@ -8360,9 +8391,10 @@ static void damp_route_vty_out(struct vty *vty, struct prefix *p, } /* flap route */ -static void flap_route_vty_out(struct vty *vty, struct prefix *p, - struct bgp_path_info *path, int display, afi_t afi, - safi_t safi, bool use_json, json_object *json) +static void flap_route_vty_out(struct vty *vty, const struct prefix *p, + struct bgp_path_info *path, int display, + afi_t afi, safi_t safi, bool use_json, + json_object *json) { struct attr *attr; struct bgp_damp_info *bdi; @@ -8625,8 +8657,10 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, } if (safi == SAFI_EVPN) { if (!json_paths) { - bgp_evpn_route2str((struct prefix_evpn *)&bn->p, - buf2, sizeof(buf2)); + bgp_evpn_route2str( + (struct prefix_evpn *) + bgp_node_get_prefix(bn), + buf2, sizeof(buf2)); vty_out(vty, " Route %s", buf2); if (tag_buf[0] != '\0') vty_out(vty, " VNI %s", tag_buf); @@ -8646,11 +8680,14 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, rn = parent_ri->net; if (rn && rn->prn) { prn = rn->prn; - prefix_rd2str((struct prefix_rd *)&prn->p, + prefix_rd2str((struct prefix_rd *) + bgp_node_get_prefix(prn), buf1, sizeof(buf1)); if (is_pi_family_evpn(parent_ri)) { - bgp_evpn_route2str((struct prefix_evpn *)&rn->p, - buf2, sizeof(buf2)); + bgp_evpn_route2str( + (struct prefix_evpn *) + bgp_node_get_prefix(rn), + buf2, sizeof(buf2)); vty_out(vty, " Imported from %s:%s, VNI %s\n", buf1, buf2, tag_buf); } else vty_out(vty, " Imported from %s:%s\n", buf1, buf2); @@ -8695,10 +8732,22 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, json_object_string_add( json_path, "aggregatorId", inet_ntoa(attr->aggregator_addr)); + if (attr->aggregator_as == BGP_AS_ZERO) + json_object_boolean_true_add( + json_path, "aggregatorAsMalformed"); + else + json_object_boolean_false_add( + json_path, "aggregatorAsMalformed"); } else { - vty_out(vty, ", (aggregated by %u %s)", - attr->aggregator_as, - inet_ntoa(attr->aggregator_addr)); + if (attr->aggregator_as == BGP_AS_ZERO) + vty_out(vty, + ", (aggregated by %u(malformed) %s)", + attr->aggregator_as, + inet_ntoa(attr->aggregator_addr)); + else + vty_out(vty, ", (aggregated by %u %s)", + attr->aggregator_as, + inet_ntoa(attr->aggregator_addr)); } } @@ -8739,8 +8788,10 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, /* Line2 display Next-hop, Neighbor, Router-id */ /* Display the nexthop */ - if ((bn->p.family == AF_INET || bn->p.family == AF_ETHERNET - || bn->p.family == AF_EVPN) + const struct prefix *bn_p = bgp_node_get_prefix(bn); + + if ((bn_p->family == AF_INET || bn_p->family == AF_ETHERNET + || bn_p->family == AF_EVPN) && (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP || safi == SAFI_EVPN || !BGP_ATTR_NEXTHOP_AFI_IP6(attr))) { if (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP @@ -8841,7 +8892,7 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, if (path->peer == bgp->peer_self) { if (safi == SAFI_EVPN - || (bn->p.family == AF_INET + || (bn_p->family == AF_INET && !BGP_ATTR_NEXTHOP_AFI_IP6(attr))) { if (json_paths) json_object_string_add(json_peer, "peerId", @@ -9025,21 +9076,15 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, bgp_origin_long_str[attr->origin]); if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC)) { - if (json_paths) { - /* - * Adding "metric" field to match with - * corresponding CLI. "med" will be - * deprecated in future. - */ - json_object_int_add(json_path, "med", attr->med); + if (json_paths) json_object_int_add(json_path, "metric", attr->med); - } else + else vty_out(vty, ", metric %u", attr->med); } if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)) { if (json_paths) - json_object_int_add(json_path, "localpref", + json_object_int_add(json_path, "locPrf", attr->local_pref); else vty_out(vty, ", localpref %u", attr->local_pref); @@ -9511,6 +9556,8 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi, /* Start processing of routes. */ for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) { + const struct prefix *rn_p = bgp_node_get_prefix(rn); + pi = bgp_node_get_bgp_path_info(rn); if (pi == NULL) continue; @@ -9540,7 +9587,7 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi, if (type == bgp_show_type_prefix_list) { struct prefix_list *plist = output_arg; - if (prefix_list_apply(plist, &rn->p) + if (prefix_list_apply(plist, rn_p) != PREFIX_PERMIT) continue; } @@ -9562,7 +9609,7 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi, path.peer = pi->peer; path.attr = &dummy_attr; - ret = route_map_apply(rmap, &rn->p, RMAP_BGP, + ret = route_map_apply(rmap, rn_p, RMAP_BGP, &path); if (ret == RMAP_DENYMATCH) continue; @@ -9580,20 +9627,20 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi, if (type == bgp_show_type_cidr_only) { uint32_t destination; - destination = ntohl(rn->p.u.prefix4.s_addr); + destination = ntohl(rn_p->u.prefix4.s_addr); if (IN_CLASSC(destination) - && rn->p.prefixlen == 24) + && rn_p->prefixlen == 24) continue; if (IN_CLASSB(destination) - && rn->p.prefixlen == 16) + && rn_p->prefixlen == 16) continue; if (IN_CLASSA(destination) - && rn->p.prefixlen == 8) + && rn_p->prefixlen == 8) continue; } if (type == bgp_show_type_prefix_longer) { p = output_arg; - if (!prefix_match(p, &rn->p)) + if (!prefix_match(p, rn_p)) continue; } if (type == bgp_show_type_community_all) { @@ -9706,14 +9753,16 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi, } if (type == bgp_show_type_dampend_paths || type == bgp_show_type_damp_neighbor) - damp_route_vty_out(vty, &rn->p, pi, display, AFI_IP, - safi, use_json, json_paths); + damp_route_vty_out(vty, rn_p, pi, display, + AFI_IP, safi, use_json, + json_paths); else if (type == bgp_show_type_flap_statistics || type == bgp_show_type_flap_neighbor) - flap_route_vty_out(vty, &rn->p, pi, display, AFI_IP, - safi, use_json, json_paths); + flap_route_vty_out(vty, rn_p, pi, display, + AFI_IP, safi, use_json, + json_paths); else - route_vty_out(vty, &rn->p, pi, display, safi, + route_vty_out(vty, rn_p, pi, display, safi, json_paths); display++; } @@ -9723,28 +9772,25 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi, if (!use_json) continue; - p = &rn->p; /* encode prefix */ - if (p->family == AF_FLOWSPEC) { + if (rn_p->family == AF_FLOWSPEC) { char retstr[BGP_FLOWSPEC_STRING_DISPLAY_MAX]; - bgp_fs_nlri_get_string((unsigned char *) - p->u.prefix_flowspec.ptr, - p->u.prefix_flowspec - .prefixlen, - retstr, - NLRI_STRING_FORMAT_MIN, - NULL); + bgp_fs_nlri_get_string( + (unsigned char *) + rn_p->u.prefix_flowspec.ptr, + rn_p->u.prefix_flowspec.prefixlen, + retstr, NLRI_STRING_FORMAT_MIN, NULL); if (first) - vty_out(vty, "\"%s/%d\": ", - retstr, - p->u.prefix_flowspec.prefixlen); + vty_out(vty, "\"%s/%d\": ", retstr, + rn_p->u.prefix_flowspec + .prefixlen); else - vty_out(vty, ",\"%s/%d\": ", - retstr, - p->u.prefix_flowspec.prefixlen); + vty_out(vty, ",\"%s/%d\": ", retstr, + rn_p->u.prefix_flowspec + .prefixlen); } else { - prefix2str(p, buf2, sizeof(buf2)); + prefix2str(rn_p, buf2, sizeof(buf2)); if (first) vty_out(vty, "\"%s\": ", buf2); else @@ -9810,8 +9856,10 @@ int bgp_show_table_rd(struct vty *vty, struct bgp *bgp, safi_t safi, show_msg = (!use_json && type == bgp_show_type_normal); for (rn = bgp_table_top(table); rn; rn = next) { + const struct prefix *rn_p = bgp_node_get_prefix(rn); + next = bgp_route_next(rn); - if (prd_match && memcmp(rn->p.u.val, prd_match->val, 8) != 0) + if (prd_match && memcmp(rn_p->u.val, prd_match->val, 8) != 0) continue; itable = bgp_node_get_bgp_table_info(rn); @@ -9819,7 +9867,7 @@ int bgp_show_table_rd(struct vty *vty, struct bgp *bgp, safi_t safi, struct prefix_rd prd; char rd[RD_ADDRSTRLEN]; - memcpy(&prd, &(rn->p), sizeof(struct prefix_rd)); + memcpy(&prd, rn_p, sizeof(struct prefix_rd)); prefix_rd2str(&prd, rd, sizeof(rd)); bgp_show_table(vty, bgp, safi, itable, type, output_arg, use_json, rd, next == NULL, &output_cum, @@ -9922,7 +9970,7 @@ void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi, json_object *json) { struct bgp_path_info *pi; - struct prefix *p; + const struct prefix *p; struct peer *peer; struct listnode *node, *nnode; char buf1[RD_ADDRSTRLEN]; @@ -9950,7 +9998,7 @@ void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp, mpls_label_t label = 0; json_object *json_adv_to = NULL; - p = &rn->p; + p = bgp_node_get_prefix(rn); has_valid_label = bgp_is_valid_label(&rn->local_label); if (has_valid_label) @@ -10210,7 +10258,9 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp, if (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP) { for (rn = bgp_table_top(rib); rn; rn = bgp_route_next(rn)) { - if (prd && memcmp(rn->p.u.val, prd->val, 8) != 0) + const struct prefix *rn_p = bgp_node_get_prefix(rn); + + if (prd && memcmp(rn_p->u.val, prd->val, 8) != 0) continue; table = bgp_node_get_bgp_table_info(rn); if (!table) @@ -10219,31 +10269,34 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp, if ((rm = bgp_node_match(table, &match)) == NULL) continue; + const struct prefix *rm_p = bgp_node_get_prefix(rm); if (prefix_check - && rm->p.prefixlen != match.prefixlen) { + && rm_p->prefixlen != match.prefixlen) { bgp_unlock_node(rm); continue; } - bgp_show_path_info((struct prefix_rd *)&rn->p, rm, - vty, bgp, afi, safi, json, - pathtype, &display); + bgp_show_path_info((struct prefix_rd *)rn_p, rm, vty, + bgp, afi, safi, json, pathtype, + &display); bgp_unlock_node(rm); } } else if (safi == SAFI_EVPN) { struct bgp_node *longest_pfx; - bool is_exact_pfxlen_match = FALSE; + bool is_exact_pfxlen_match = false; for (rn = bgp_table_top(rib); rn; rn = bgp_route_next(rn)) { - if (prd && memcmp(rn->p.u.val, prd->val, 8) != 0) + const struct prefix *rn_p = bgp_node_get_prefix(rn); + + if (prd && memcmp(&rn_p->u.val, prd->val, 8) != 0) continue; table = bgp_node_get_bgp_table_info(rn); if (!table) continue; longest_pfx = NULL; - is_exact_pfxlen_match = FALSE; + is_exact_pfxlen_match = false; /* * Search through all the prefixes for a match. The * pfx's are enumerated in ascending order of pfxlens. @@ -10252,17 +10305,20 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp, */ for (rm = bgp_table_top(table); rm; rm = bgp_route_next(rm)) { + const struct prefix *rm_p = + bgp_node_get_prefix(rm); /* * Get prefixlen of the ip-prefix within type5 * evpn route */ - if (evpn_type5_prefix_match(&rm->p, - &match) && rm->info) { + if (evpn_type5_prefix_match(rm_p, &match) + && rm->info) { longest_pfx = rm; int type5_pfxlen = - bgp_evpn_get_type5_prefixlen(&rm->p); + bgp_evpn_get_type5_prefixlen( + rm_p); if (type5_pfxlen == match.prefixlen) { - is_exact_pfxlen_match = TRUE; + is_exact_pfxlen_match = true; bgp_unlock_node(rm); break; } @@ -10278,9 +10334,9 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp, rm = longest_pfx; bgp_lock_node(rm); - bgp_show_path_info((struct prefix_rd *)&rn->p, rm, - vty, bgp, afi, safi, json, - pathtype, &display); + bgp_show_path_info((struct prefix_rd *)rn_p, rm, vty, + bgp, afi, safi, json, pathtype, + &display); bgp_unlock_node(rm); } @@ -10297,8 +10353,9 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp, json_object_object_add(json, "paths", json_paths); } else { if ((rn = bgp_node_match(rib, &match)) != NULL) { + const struct prefix *rn_p = bgp_node_get_prefix(rn); if (!prefix_check - || rn->p.prefixlen == match.prefixlen) { + || rn_p->prefixlen == match.prefixlen) { bgp_show_path_info(NULL, rn, vty, bgp, afi, safi, json, pathtype, &display); @@ -11009,6 +11066,7 @@ static void bgp_table_stats_rn(struct bgp_node *rn, struct bgp_node *top, { struct bgp_node *prn = bgp_node_parent_nolock(rn); struct bgp_path_info *pi; + const struct prefix *rn_p; if (rn == top) return; @@ -11016,14 +11074,15 @@ static void bgp_table_stats_rn(struct bgp_node *rn, struct bgp_node *top, if (!bgp_node_has_bgp_path_info_data(rn)) return; + rn_p = bgp_node_get_prefix(rn); ts->counts[BGP_STATS_PREFIXES]++; - ts->counts[BGP_STATS_TOTPLEN] += rn->p.prefixlen; + ts->counts[BGP_STATS_TOTPLEN] += rn_p->prefixlen; #if 0 ts->counts[BGP_STATS_AVGPLEN] = ravg_tally (ts->counts[BGP_STATS_PREFIXES], ts->counts[BGP_STATS_AVGPLEN], - rn->p.prefixlen); + rn_p->prefixlen); #endif /* check if the prefix is included by any other announcements */ @@ -11034,7 +11093,7 @@ static void bgp_table_stats_rn(struct bgp_node *rn, struct bgp_node *top, ts->counts[BGP_STATS_UNAGGREGATEABLE]++; /* announced address space */ if (space) - ts->total_space += pow(2.0, space - rn->p.prefixlen); + ts->total_space += pow(2.0, space - rn_p->prefixlen); } else if (bgp_node_has_bgp_path_info_data(prn)) ts->counts[BGP_STATS_MAX_AGGREGATEABLE]++; @@ -11544,8 +11603,8 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi, struct bgp_table *table; struct bgp_adj_in *ain; struct bgp_adj_out *adj; - unsigned long output_count; - unsigned long filtered_count; + unsigned long output_count = 0; + unsigned long filtered_count = 0; struct bgp_node *rn; int header1 = 1; struct bgp *bgp; @@ -11706,14 +11765,17 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi, /* Filter prefix using distribute list, * filter list or prefix list */ - if ((bgp_input_filter(peer, &rn->p, &attr, afi, - safi)) == FILTER_DENY) + const struct prefix *rn_p = + bgp_node_get_prefix(rn); + if ((bgp_input_filter(peer, rn_p, &attr, afi, + safi)) + == FILTER_DENY) route_filtered = true; /* Filter prefix using route-map */ - ret = bgp_input_modifier(peer, &rn->p, &attr, - afi, safi, rmap_name, NULL, 0, - NULL); + ret = bgp_input_modifier(peer, rn_p, &attr, afi, + safi, rmap_name, NULL, + 0, NULL); if (type == bgp_show_adj_route_filtered && !route_filtered && ret != RMAP_DENY) { @@ -11725,7 +11787,7 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi, (route_filtered || ret == RMAP_DENY)) filtered_count++; - route_vty_out_tmp(vty, &rn->p, &attr, safi, + route_vty_out_tmp(vty, rn_p, &attr, safi, use_json, json_ar); bgp_attr_undup(&attr, ain->attr); output_count++; @@ -11802,16 +11864,18 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi, header2 = 0; } + const struct prefix *rn_p = + bgp_node_get_prefix(rn); + attr = *adj->attr; ret = bgp_output_modifier( - peer, &rn->p, &attr, afi, safi, + peer, rn_p, &attr, afi, safi, rmap_name); if (ret != RMAP_DENY) { - route_vty_out_tmp(vty, &rn->p, - &attr, safi, - use_json, - json_ar); + route_vty_out_tmp( + vty, rn_p, &attr, safi, + use_json, json_ar); output_count++; } else { filtered_count++; @@ -11830,6 +11894,12 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi, vty_out(vty, "%s\n", json_object_to_json_string_ext( json, JSON_C_TO_STRING_PRETTY)); + + if (!output_count && !filtered_count) { + json_object_free(json_scode); + json_object_free(json_ocode); + } + json_object_free(json); } else if (output_count > 0) { if (filtered_count > 0) @@ -12213,7 +12283,7 @@ static int bgp_distance_set(struct vty *vty, const char *distance_str, distance = atoi(distance_str); /* Get BGP distance node. */ - rn = bgp_node_get(bgp_distance_table[afi][safi], (struct prefix *)&p); + rn = bgp_node_get(bgp_distance_table[afi][safi], &p); bdistance = bgp_node_get_bgp_distance_info(rn); if (bdistance) bgp_unlock_node(rn); @@ -12254,8 +12324,7 @@ static int bgp_distance_unset(struct vty *vty, const char *distance_str, return CMD_WARNING_CONFIG_FAILED; } - rn = bgp_node_lookup(bgp_distance_table[afi][safi], - (struct prefix *)&p); + rn = bgp_node_lookup(bgp_distance_table[afi][safi], &p); if (!rn) { vty_out(vty, "Can't find specified prefix\n"); return CMD_WARNING_CONFIG_FAILED; @@ -12280,7 +12349,7 @@ static int bgp_distance_unset(struct vty *vty, const char *distance_str, } /* Apply BGP information to distance method. */ -uint8_t bgp_distance_apply(struct prefix *p, struct bgp_path_info *pinfo, +uint8_t bgp_distance_apply(const struct prefix *p, struct bgp_path_info *pinfo, afi_t afi, safi_t safi, struct bgp *bgp) { struct bgp_node *rn; @@ -12641,7 +12710,9 @@ static int bgp_clear_damp_route(struct vty *vty, const char *view_name, || (safi == SAFI_EVPN)) { for (rn = bgp_table_top(bgp->rib[AFI_IP][safi]); rn; rn = bgp_route_next(rn)) { - if (prd && memcmp(rn->p.u.val, prd->val, 8) != 0) + const struct prefix *rn_p = bgp_node_get_prefix(rn); + + if (prd && memcmp(rn_p->u.val, prd->val, 8) != 0) continue; table = bgp_node_get_bgp_table_info(rn); if (!table) @@ -12649,8 +12720,10 @@ static int bgp_clear_damp_route(struct vty *vty, const char *view_name, if ((rm = bgp_node_match(table, &match)) == NULL) continue; + const struct prefix *rm_p = bgp_node_get_prefix(rn); + if (!prefix_check - || rm->p.prefixlen == match.prefixlen) { + || rm_p->prefixlen == match.prefixlen) { pi = bgp_node_get_bgp_path_info(rm); while (pi) { if (pi->extra && pi->extra->damp_info) { @@ -12669,8 +12742,10 @@ static int bgp_clear_damp_route(struct vty *vty, const char *view_name, } else { if ((rn = bgp_node_match(bgp->rib[afi][safi], &match)) != NULL) { + const struct prefix *rn_p = bgp_node_get_prefix(rn); + if (!prefix_check - || rn->p.prefixlen == match.prefixlen) { + || rn_p->prefixlen == match.prefixlen) { pi = bgp_node_get_bgp_path_info(rn); while (pi) { if (pi->extra && pi->extra->damp_info) { @@ -12806,8 +12881,8 @@ static void bgp_config_write_network_vpn(struct vty *vty, struct bgp *bgp, struct bgp_node *prn; struct bgp_node *rn; struct bgp_table *table; - struct prefix *p; - struct prefix_rd *prd; + const struct prefix *p; + const struct prefix_rd *prd; struct bgp_static *bgp_static; mpls_label_t label; char buf[SU_ADDRSTRLEN]; @@ -12825,8 +12900,9 @@ static void bgp_config_write_network_vpn(struct vty *vty, struct bgp *bgp, if (bgp_static == NULL) continue; - p = &rn->p; - prd = (struct prefix_rd *)&prn->p; + p = bgp_node_get_prefix(rn); + prd = (const struct prefix_rd *)bgp_node_get_prefix( + prn); /* "network" configuration display. */ prefix_rd2str(prd, rdbuf, sizeof(rdbuf)); @@ -12857,8 +12933,8 @@ static void bgp_config_write_network_evpn(struct vty *vty, struct bgp *bgp, struct bgp_node *prn; struct bgp_node *rn; struct bgp_table *table; - struct prefix *p; - struct prefix_rd *prd; + const struct prefix *p; + const struct prefix_rd *prd; struct bgp_static *bgp_static; char buf[PREFIX_STRLEN * 2]; char buf2[SU_ADDRSTRLEN]; @@ -12884,8 +12960,8 @@ static void bgp_config_write_network_evpn(struct vty *vty, struct bgp *bgp, bgp_static->router_mac, NULL, 0); if (bgp_static->eth_s_id) esi = esi2str(bgp_static->eth_s_id); - p = &rn->p; - prd = (struct prefix_rd *)&prn->p; + p = bgp_node_get_prefix(rn); + prd = (struct prefix_rd *)bgp_node_get_prefix(prn); /* "network" configuration display. */ prefix_rd2str(prd, rdbuf, sizeof(rdbuf)); @@ -12928,7 +13004,7 @@ void bgp_config_write_network(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi) { struct bgp_node *rn; - struct prefix *p; + const struct prefix *p; struct bgp_static *bgp_static; struct bgp_aggregate *bgp_aggregate; char buf[SU_ADDRSTRLEN]; @@ -12950,7 +13026,7 @@ void bgp_config_write_network(struct vty *vty, struct bgp *bgp, afi_t afi, if (bgp_static == NULL) continue; - p = &rn->p; + p = bgp_node_get_prefix(rn); vty_out(vty, " network %s/%d", inet_ntop(p->family, &p->u.prefix, buf, SU_ADDRSTRLEN), @@ -12976,7 +13052,7 @@ void bgp_config_write_network(struct vty *vty, struct bgp *bgp, afi_t afi, if (bgp_aggregate == NULL) continue; - p = &rn->p; + p = bgp_node_get_prefix(rn); vty_out(vty, " aggregate-address %s/%d", inet_ntop(p->family, &p->u.prefix, buf, SU_ADDRSTRLEN), @@ -13021,15 +13097,11 @@ void bgp_config_write_distance(struct vty *vty, struct bgp *bgp, afi_t afi, for (rn = bgp_table_top(bgp_distance_table[afi][safi]); rn; rn = bgp_route_next(rn)) { bdistance = bgp_node_get_bgp_distance_info(rn); - if (bdistance != NULL) { - char buf[PREFIX_STRLEN]; - - vty_out(vty, " distance %d %s %s\n", - bdistance->distance, - prefix2str(&rn->p, buf, sizeof(buf)), + if (bdistance != NULL) + vty_out(vty, " distance %d %pRN %s\n", + bdistance->distance, rn, bdistance->access_list ? bdistance->access_list : ""); - } } } diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h index 0ad656d133..ad08bbf440 100644 --- a/bgpd/bgp_route.h +++ b/bgpd/bgp_route.h @@ -125,7 +125,7 @@ struct bgp_path_info_extra { struct in6_addr sid[BGP_MAX_SIDS]; uint32_t num_sids; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC union { struct { @@ -237,6 +237,7 @@ struct bgp_path_info { #define BGP_PATH_MULTIPATH_CHG (1 << 12) #define BGP_PATH_RIB_ATTR_CHG (1 << 13) #define BGP_PATH_ANNC_NH_SELF (1 << 14) +#define BGP_PATH_LINK_BW_CHG (1 << 15) /* BGP route type. This can be static, RIP, OSPF, BGP etc. */ uint8_t type; @@ -467,6 +468,27 @@ static inline bool is_pi_family_matching(struct bgp_path_info *pi, return false; } +static inline void prep_for_rmap_apply(struct bgp_path_info *dst_pi, + struct bgp_path_info_extra *dst_pie, + struct bgp_node *rn, + struct bgp_path_info *src_pi, + struct peer *peer, struct attr *attr) +{ + memset(dst_pi, 0, sizeof(struct bgp_path_info)); + dst_pi->peer = peer; + dst_pi->attr = attr; + dst_pi->net = rn; + dst_pi->flags = src_pi->flags; + dst_pi->type = src_pi->type; + dst_pi->sub_type = src_pi->sub_type; + dst_pi->mpath = src_pi->mpath; + if (src_pi->extra) { + memcpy(dst_pie, src_pi->extra, + sizeof(struct bgp_path_info_extra)); + dst_pi->extra = dst_pie; + } +} + /* called before bgp_process() */ DECLARE_HOOK(bgp_process, (struct bgp *bgp, afi_t afi, safi_t safi, @@ -489,11 +511,11 @@ extern void bgp_clear_route(struct peer *, afi_t, safi_t); extern void bgp_clear_route_all(struct peer *); extern void bgp_clear_adj_in(struct peer *, afi_t, safi_t); extern void bgp_clear_stale_route(struct peer *, afi_t, safi_t); -extern int bgp_outbound_policy_exists(struct peer *, struct bgp_filter *); -extern int bgp_inbound_policy_exists(struct peer *, struct bgp_filter *); +extern bool bgp_outbound_policy_exists(struct peer *, struct bgp_filter *); +extern bool bgp_inbound_policy_exists(struct peer *, struct bgp_filter *); extern struct bgp_node *bgp_afi_node_get(struct bgp_table *table, afi_t afi, - safi_t safi, struct prefix *p, + safi_t safi, const struct prefix *p, struct prefix_rd *prd); extern struct bgp_path_info *bgp_path_info_lock(struct bgp_path_info *path); extern struct bgp_path_info *bgp_path_info_unlock(struct bgp_path_info *path); @@ -512,7 +534,7 @@ extern void bgp_path_info_path_with_addpath_rx_str(struct bgp_path_info *pi, extern int bgp_nlri_parse_ip(struct peer *, struct attr *, struct bgp_nlri *); -extern int bgp_maximum_prefix_overflow(struct peer *, afi_t, safi_t, int); +extern bool bgp_maximum_prefix_overflow(struct peer *, afi_t, safi_t, int); extern void bgp_redistribute_add(struct bgp *bgp, struct prefix *p, const union g_addr *nexthop, ifindex_t ifindex, @@ -527,9 +549,10 @@ extern void bgp_static_add(struct bgp *); extern void bgp_static_delete(struct bgp *); extern void bgp_static_redo_import_check(struct bgp *); extern void bgp_purge_static_redist_routes(struct bgp *bgp); -extern void bgp_static_update(struct bgp *, struct prefix *, - struct bgp_static *, afi_t, safi_t); -extern void bgp_static_withdraw(struct bgp *, struct prefix *, afi_t, safi_t); +extern void bgp_static_update(struct bgp *bgp, const struct prefix *p, + struct bgp_static *s, afi_t afi, safi_t safi); +extern void bgp_static_withdraw(struct bgp *bgp, const struct prefix *p, + afi_t afi, safi_t safi); extern int bgp_static_set_safi(afi_t afi, safi_t safi, struct vty *vty, const char *, const char *, const char *, @@ -541,12 +564,17 @@ extern int bgp_static_unset_safi(afi_t afi, safi_t safi, struct vty *, const char *, const char *, const char *); /* this is primarily for MPLS-VPN */ -extern int bgp_update(struct peer *, struct prefix *, uint32_t, struct attr *, - afi_t, safi_t, int, int, struct prefix_rd *, - mpls_label_t *, uint32_t, int, struct bgp_route_evpn *); -extern int bgp_withdraw(struct peer *, struct prefix *, uint32_t, struct attr *, - afi_t, safi_t, int, int, struct prefix_rd *, - mpls_label_t *, uint32_t, struct bgp_route_evpn *); +extern int bgp_update(struct peer *peer, const struct prefix *p, + uint32_t addpath_id, struct attr *attr, + afi_t afi, safi_t safi, int type, int sub_type, + struct prefix_rd *prd, mpls_label_t *label, + uint32_t num_labels, int soft_reconfig, + struct bgp_route_evpn *evpn); +extern int bgp_withdraw(struct peer *peer, const struct prefix *p, + uint32_t addpath_id, struct attr *attr, afi_t afi, + safi_t safi, int type, int sub_type, + struct prefix_rd *prd, mpls_label_t *label, + uint32_t num_labels, struct bgp_route_evpn *evpn); /* for bgp_nexthop and bgp_damp */ extern void bgp_process(struct bgp *, struct bgp_node *, afi_t, safi_t); @@ -562,19 +590,22 @@ extern void bgp_config_write_network(struct vty *, struct bgp *, afi_t, safi_t); extern void bgp_config_write_distance(struct vty *, struct bgp *, afi_t, safi_t); -extern void bgp_aggregate_delete(struct bgp *bgp, struct prefix *p, afi_t afi, - safi_t safi, struct bgp_aggregate *aggregate); -extern void bgp_aggregate_route(struct bgp *bgp, struct prefix *p, afi_t afi, - safi_t safi, struct bgp_aggregate *aggregate); -extern void bgp_aggregate_increment(struct bgp *bgp, struct prefix *p, +extern void bgp_aggregate_delete(struct bgp *bgp, const struct prefix *p, + afi_t afi, safi_t safi, + struct bgp_aggregate *aggregate); +extern void bgp_aggregate_route(struct bgp *bgp, const struct prefix *p, + afi_t afi, safi_t safi, + struct bgp_aggregate *aggregate); +extern void bgp_aggregate_increment(struct bgp *bgp, const struct prefix *p, struct bgp_path_info *path, afi_t afi, safi_t safi); -extern void bgp_aggregate_decrement(struct bgp *bgp, struct prefix *p, +extern void bgp_aggregate_decrement(struct bgp *bgp, const struct prefix *p, struct bgp_path_info *path, afi_t afi, safi_t safi); -extern uint8_t bgp_distance_apply(struct prefix *p, struct bgp_path_info *path, - afi_t afi, safi_t safi, struct bgp *bgp); +extern uint8_t bgp_distance_apply(const struct prefix *p, + struct bgp_path_info *path, afi_t afi, + safi_t safi, struct bgp *bgp); extern afi_t bgp_node_afi(struct vty *); extern safi_t bgp_node_safi(struct vty *); @@ -584,35 +615,35 @@ extern struct bgp_path_info *info_make(int type, int sub_type, struct peer *peer, struct attr *attr, struct bgp_node *rn); -extern void route_vty_out(struct vty *vty, struct prefix *p, +extern void route_vty_out(struct vty *vty, const struct prefix *p, struct bgp_path_info *path, int display, safi_t safi, json_object *json_paths); -extern void route_vty_out_tag(struct vty *vty, struct prefix *p, +extern void route_vty_out_tag(struct vty *vty, const struct prefix *p, struct bgp_path_info *path, int display, safi_t safi, json_object *json); -extern void route_vty_out_tmp(struct vty *vty, struct prefix *p, +extern void route_vty_out_tmp(struct vty *vty, const struct prefix *p, struct attr *attr, safi_t safi, bool use_json, json_object *json_ar); -extern void route_vty_out_overlay(struct vty *vty, struct prefix *p, +extern void route_vty_out_overlay(struct vty *vty, const struct prefix *p, struct bgp_path_info *path, int display, json_object *json); -extern int subgroup_process_announce_selected(struct update_subgroup *subgrp, - struct bgp_path_info *selected, - struct bgp_node *rn, - uint32_t addpath_tx_id); +extern void subgroup_process_announce_selected(struct update_subgroup *subgrp, + struct bgp_path_info *selected, + struct bgp_node *rn, + uint32_t addpath_tx_id); -extern int subgroup_announce_check(struct bgp_node *rn, - struct bgp_path_info *pi, - struct update_subgroup *subgrp, - struct prefix *p, struct attr *attr); +extern bool subgroup_announce_check(struct bgp_node *rn, + struct bgp_path_info *pi, + struct update_subgroup *subgrp, + const struct prefix *p, struct attr *attr); extern void bgp_peer_clear_node_queue_drain_immediate(struct peer *peer); extern void bgp_process_queues_drain_immediate(void); /* for encap/vpn */ extern struct bgp_node *bgp_afi_node_lookup(struct bgp_table *table, afi_t afi, - safi_t safi, struct prefix *p, + safi_t safi, const struct prefix *p, struct prefix_rd *prd); extern void bgp_path_info_restore(struct bgp_node *rn, struct bgp_path_info *path); @@ -629,8 +660,8 @@ extern void bgp_best_selection(struct bgp *bgp, struct bgp_node *rn, struct bgp_path_info_pair *result, afi_t afi, safi_t safi); extern void bgp_zebra_clear_route_change_flags(struct bgp_node *rn); -extern int bgp_zebra_has_route_changed(struct bgp_node *rn, - struct bgp_path_info *selected); +extern bool bgp_zebra_has_route_changed(struct bgp_node *rn, + struct bgp_path_info *selected); extern void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp, struct bgp_node *rn, diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c index 52b5402737..f30fcc195f 100644 --- a/bgpd/bgp_routemap.c +++ b/bgpd/bgp_routemap.c @@ -63,8 +63,9 @@ #include "bgpd/bgp_pbr.h" #include "bgpd/bgp_flowspec_util.h" #include "bgpd/bgp_encap_types.h" +#include "bgpd/bgp_mpath.h" -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/bgp_rfapi_cfg.h" #endif @@ -1015,7 +1016,7 @@ route_match_rd(void *rule, const struct prefix *prefix, route_map_object_t type, void *object) { struct prefix_rd *prd_rule = NULL; - struct prefix_rd *prd_route = NULL; + const struct prefix_rd *prd_route = NULL; struct bgp_path_info *path = NULL; if (type == RMAP_BGP) { @@ -1028,7 +1029,8 @@ route_match_rd(void *rule, const struct prefix *prefix, if (path->net == NULL || path->net->prn == NULL) return RMAP_NOMATCH; - prd_route = (struct prefix_rd *)&path->net->prn->p; + prd_route = + (struct prefix_rd *)bgp_node_get_prefix(path->net->prn); if (memcmp(prd_route->val, prd_rule->val, ECOMMUNITY_SIZE) == 0) return RMAP_MATCH; } @@ -2530,6 +2532,146 @@ static const struct route_map_rule_cmd route_set_ecommunity_soo_cmd = { route_set_ecommunity_free, }; +/* `set extcommunity bandwidth' */ + +struct rmap_ecomm_lb_set { + uint8_t lb_type; +#define RMAP_ECOMM_LB_SET_VALUE 1 +#define RMAP_ECOMM_LB_SET_CUMUL 2 +#define RMAP_ECOMM_LB_SET_NUM_MPATH 3 + bool non_trans; + uint32_t bw; +}; + +static enum route_map_cmd_result_t +route_set_ecommunity_lb(void *rule, const struct prefix *prefix, + route_map_object_t type, void *object) +{ + struct rmap_ecomm_lb_set *rels = rule; + struct bgp_path_info *path; + struct peer *peer; + struct ecommunity ecom_lb = {0}; + struct ecommunity_val lb_eval; + uint32_t bw_bytes = 0; + uint16_t mpath_count = 0; + struct ecommunity *new_ecom; + struct ecommunity *old_ecom; + as_t as; + + if (type != RMAP_BGP) + return RMAP_OKAY; + + path = object; + peer = path->peer; + if (!peer || !peer->bgp) + return RMAP_ERROR; + + /* Build link bandwidth extended community */ + as = (peer->bgp->as > BGP_AS_MAX) ? BGP_AS_TRANS : peer->bgp->as; + if (rels->lb_type == RMAP_ECOMM_LB_SET_VALUE) { + bw_bytes = ((uint64_t)rels->bw * 1000 * 1000) / 8; + } else if (rels->lb_type == RMAP_ECOMM_LB_SET_CUMUL) { + /* process this only for the best path. */ + if (!CHECK_FLAG(path->flags, BGP_PATH_SELECTED)) + return RMAP_OKAY; + + bw_bytes = (uint32_t)bgp_path_info_mpath_cumbw(path); + if (!bw_bytes) + return RMAP_OKAY; + + } else if (rels->lb_type == RMAP_ECOMM_LB_SET_NUM_MPATH) { + + /* process this only for the best path. */ + if (!CHECK_FLAG(path->flags, BGP_PATH_SELECTED)) + return RMAP_OKAY; + + bw_bytes = ((uint64_t)peer->bgp->lb_ref_bw * 1000 * 1000) / 8; + mpath_count = bgp_path_info_mpath_count(path) + 1; + bw_bytes *= mpath_count; + } + + encode_lb_extcomm(as, bw_bytes, rels->non_trans, &lb_eval); + + /* add to route or merge with existing */ + old_ecom = path->attr->ecommunity; + if (old_ecom) { + new_ecom = ecommunity_dup(old_ecom); + ecommunity_add_val(new_ecom, &lb_eval, true, true); + if (!old_ecom->refcnt) + ecommunity_free(&old_ecom); + } else { + ecom_lb.size = 1; + ecom_lb.val = (uint8_t *)lb_eval.val; + new_ecom = ecommunity_dup(&ecom_lb); + } + + /* new_ecom will be intern()'d or attr_flush()'d in call stack */ + path->attr->ecommunity = new_ecom; + path->attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES); + + /* Mark that route-map has set link bandwidth; used in attribute + * setting decisions. + */ + SET_FLAG(path->attr->rmap_change_flags, BATTR_RMAP_LINK_BW_SET); + + return RMAP_OKAY; +} + +static void *route_set_ecommunity_lb_compile(const char *arg) +{ + struct rmap_ecomm_lb_set *rels; + uint8_t lb_type; + uint32_t bw = 0; + char bw_str[40] = {0}; + char *p, *str; + bool non_trans = false; + + str = (char *)arg; + p = strchr(arg, ' '); + if (p) { + int len; + + len = p - arg; + memcpy(bw_str, arg, len); + non_trans = true; + str = bw_str; + } + + if (strcmp(str, "cumulative") == 0) + lb_type = RMAP_ECOMM_LB_SET_CUMUL; + else if (strcmp(str, "num-multipaths") == 0) + lb_type = RMAP_ECOMM_LB_SET_NUM_MPATH; + else { + char *end = NULL; + + bw = strtoul(str, &end, 10); + if (*end != '\0') + return NULL; + lb_type = RMAP_ECOMM_LB_SET_VALUE; + } + + rels = XCALLOC(MTYPE_ROUTE_MAP_COMPILED, + sizeof(struct rmap_ecomm_lb_set)); + rels->lb_type = lb_type; + rels->bw = bw; + rels->non_trans = non_trans; + + return rels; +} + +static void route_set_ecommunity_lb_free(void *rule) +{ + XFREE(MTYPE_ROUTE_MAP_COMPILED, rule); +} + +/* Set community rule structure. */ +struct route_map_rule_cmd route_set_ecommunity_lb_cmd = { + "extcommunity bandwidth", + route_set_ecommunity_lb, + route_set_ecommunity_lb_compile, + route_set_ecommunity_lb_free, +}; + /* `set origin ORIGIN' */ /* For origin set. */ @@ -2830,6 +2972,57 @@ static const struct route_map_rule_cmd route_match_ipv6_next_hop_cmd = { route_match_ipv6_next_hop_free }; +/* `match ip next-hop IP_ADDRESS' */ + +static enum route_map_cmd_result_t +route_match_ipv4_next_hop(void *rule, const struct prefix *prefix, + route_map_object_t type, void *object) +{ + struct in_addr *addr = rule; + struct bgp_path_info *path; + + if (type == RMAP_BGP) { + path = object; + + if (path->attr->nexthop.s_addr == addr->s_addr || + (path->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV4 && + IPV4_ADDR_SAME(&path->attr->mp_nexthop_global_in, addr))) + return RMAP_MATCH; + + return RMAP_NOMATCH; + } + + return RMAP_NOMATCH; +} + +static void *route_match_ipv4_next_hop_compile(const char *arg) +{ + struct in_addr *address; + int ret; + + address = XMALLOC(MTYPE_ROUTE_MAP_COMPILED, sizeof(struct in_addr)); + + ret = inet_pton(AF_INET, arg, address); + if (!ret) { + XFREE(MTYPE_ROUTE_MAP_COMPILED, address); + return NULL; + } + + return address; +} + +static void route_match_ipv4_next_hop_free(void *rule) +{ + XFREE(MTYPE_ROUTE_MAP_COMPILED, rule); +} + +static const struct route_map_rule_cmd route_match_ipv4_next_hop_cmd = { + "ip next-hop address", + route_match_ipv4_next_hop, + route_match_ipv4_next_hop_compile, + route_match_ipv4_next_hop_free +}; + /* `match ipv6 address prefix-list PREFIX_LIST' */ static enum route_map_cmd_result_t @@ -3586,14 +3779,17 @@ static void bgp_route_map_process_update(struct bgp *bgp, const char *rmap_name, bgp_static->rmap.map = map; if (route_update && !bgp_static->backdoor) { - if (bgp_debug_zebra(&bn->p)) + const struct prefix *bn_p = + bgp_node_get_prefix(bn); + + if (bgp_debug_zebra(bn_p)) zlog_debug( "Processing route_map %s update on static route %s", rmap_name, - inet_ntop(bn->p.family, - &bn->p.u.prefix, buf, + inet_ntop(bn_p->family, + &bn_p->u.prefix, buf, INET6_ADDRSTRLEN)); - bgp_static_update(bgp, &bn->p, bgp_static, afi, + bgp_static_update(bgp, bn_p, bgp_static, afi, safi); } } @@ -3615,14 +3811,17 @@ static void bgp_route_map_process_update(struct bgp *bgp, const char *rmap_name, aggregate->rmap.map = map; if (route_update) { - if (bgp_debug_zebra(&bn->p)) + const struct prefix *bn_p = + bgp_node_get_prefix(bn); + + if (bgp_debug_zebra(bn_p)) zlog_debug( "Processing route_map %s update on aggregate-address route %s", rmap_name, - inet_ntop(bn->p.family, - &bn->p.u.prefix, buf, + inet_ntop(bn_p->family, + &bn_p->u.prefix, buf, INET6_ADDRSTRLEN)); - bgp_aggregate_route(bgp, &bn->p, afi, safi, + bgp_aggregate_route(bgp, bn_p, afi, safi, aggregate); } } @@ -3691,7 +3890,7 @@ static void bgp_route_map_process_update_cb(char *rmap_name) for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) { bgp_route_map_process_update(bgp, rmap_name, 1); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC /* zlog_debug("%s: calling vnc_routemap_update", __func__); */ vnc_routemap_update(bgp, __func__); #endif @@ -3735,7 +3934,7 @@ static void bgp_route_map_mark_update(const char *rmap_name) } else { for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) bgp_route_map_process_update(bgp, rmap_name, 0); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC zlog_debug("%s: calling vnc_routemap_update", __func__); vnc_routemap_update(bgp, __func__); #endif @@ -4946,6 +5145,53 @@ ALIAS (no_set_ecommunity_soo, "GP extended community attribute\n" "Site-of-Origin extended community\n") +DEFUN (set_ecommunity_lb, + set_ecommunity_lb_cmd, + "set extcommunity bandwidth <(1-25600)|cumulative|num-multipaths> [non-transitive]", + SET_STR + "BGP extended community attribute\n" + "Link bandwidth extended community\n" + "Bandwidth value in Mbps\n" + "Cumulative bandwidth of all multipaths (outbound-only)\n" + "Internally computed bandwidth based on number of multipaths (outbound-only)\n" + "Attribute is set as non-transitive\n") +{ + int idx_lb = 3; + int ret; + char *str; + + str = argv_concat(argv, argc, idx_lb); + ret = generic_set_add(vty, VTY_GET_CONTEXT(route_map_index), + "extcommunity bandwidth", str); + XFREE(MTYPE_TMP, str); + return ret; +} + + +DEFUN (no_set_ecommunity_lb, + no_set_ecommunity_lb_cmd, + "no set extcommunity bandwidth <(1-25600)|cumulative|num-multipaths> [non-transitive]", + NO_STR + SET_STR + "BGP extended community attribute\n" + "Link bandwidth extended community\n" + "Bandwidth value in Mbps\n" + "Cumulative bandwidth of all multipaths (outbound-only)\n" + "Internally computed bandwidth based on number of multipaths (outbound-only)\n" + "Attribute is set as non-transitive\n") +{ + return generic_set_delete(vty, VTY_GET_CONTEXT(route_map_index), + "extcommunity bandwidth", NULL); +} + +ALIAS (no_set_ecommunity_lb, + no_set_ecommunity_lb_short_cmd, + "no set extcommunity bandwidth", + NO_STR + SET_STR + "BGP extended community attribute\n" + "Link bandwidth extended community\n") + DEFUN (set_origin, set_origin_cmd, "set origin <egp|igp|incomplete>", @@ -5110,6 +5356,28 @@ DEFUN (no_match_ipv6_next_hop, RMAP_EVENT_MATCH_DELETED); } +DEFPY (match_ipv4_next_hop, + match_ipv4_next_hop_cmd, + "[no$no] match ip next-hop address [A.B.C.D]", + NO_STR + MATCH_STR + IP_STR + "Match IP next-hop address of route\n" + "IP address\n" + "IP address of next-hop\n") +{ + int idx_ipv4 = 4; + + if (no) + return bgp_route_match_delete(vty, "ip next-hop address", NULL, + RMAP_EVENT_MATCH_DELETED); + + if (argv[idx_ipv4]->arg) + return bgp_route_match_add(vty, "ip next-hop address", + argv[idx_ipv4]->arg, + RMAP_EVENT_MATCH_ADDED); + return CMD_SUCCESS; +} DEFUN (set_ipv6_nexthop_peer, set_ipv6_nexthop_peer_cmd, @@ -5469,6 +5737,7 @@ void bgp_route_map_init(void) route_map_install_set(&route_set_originator_id_cmd); route_map_install_set(&route_set_ecommunity_rt_cmd); route_map_install_set(&route_set_ecommunity_soo_cmd); + route_map_install_set(&route_set_ecommunity_lb_cmd); route_map_install_set(&route_set_tag_cmd); route_map_install_set(&route_set_label_index_cmd); @@ -5552,6 +5821,9 @@ void bgp_route_map_init(void) install_element(RMAP_NODE, &set_ecommunity_soo_cmd); install_element(RMAP_NODE, &no_set_ecommunity_soo_cmd); install_element(RMAP_NODE, &no_set_ecommunity_soo_short_cmd); + install_element(RMAP_NODE, &set_ecommunity_lb_cmd); + install_element(RMAP_NODE, &no_set_ecommunity_lb_cmd); + install_element(RMAP_NODE, &no_set_ecommunity_lb_short_cmd); #ifdef KEEP_OLD_VPN_COMMANDS install_element(RMAP_NODE, &set_vpn_nexthop_cmd); install_element(RMAP_NODE, &no_set_vpn_nexthop_cmd); @@ -5563,6 +5835,7 @@ void bgp_route_map_init(void) route_map_install_match(&route_match_ipv6_address_cmd); route_map_install_match(&route_match_ipv6_next_hop_cmd); + route_map_install_match(&route_match_ipv4_next_hop_cmd); route_map_install_match(&route_match_ipv6_address_prefix_list_cmd); route_map_install_match(&route_match_ipv6_next_hop_type_cmd); route_map_install_set(&route_set_ipv6_nexthop_global_cmd); @@ -5572,6 +5845,7 @@ void bgp_route_map_init(void) install_element(RMAP_NODE, &match_ipv6_next_hop_cmd); install_element(RMAP_NODE, &no_match_ipv6_next_hop_cmd); + install_element(RMAP_NODE, &match_ipv4_next_hop_cmd); install_element(RMAP_NODE, &set_ipv6_nexthop_global_cmd); install_element(RMAP_NODE, &no_set_ipv6_nexthop_global_cmd); install_element(RMAP_NODE, &set_ipv6_nexthop_prefer_global_cmd); diff --git a/bgpd/bgp_rpki.c b/bgpd/bgp_rpki.c index 2ca0c7b96d..e40c7231a7 100644 --- a/bgpd/bgp_rpki.c +++ b/bgpd/bgp_rpki.c @@ -94,6 +94,7 @@ enum return_values { SUCCESS = 0, ERROR = -1 }; struct rpki_for_each_record_arg { struct vty *vty; unsigned int *prefix_amount; + as_t as; }; static int start(void); @@ -273,6 +274,17 @@ static void print_record(const struct pfx_record *record, struct vty *vty) record->max_len, record->asn); } +static void print_record_by_asn(const struct pfx_record *record, void *data) +{ + struct rpki_for_each_record_arg *arg = data; + struct vty *vty = arg->vty; + + if (record->asn == arg->as) { + (*arg->prefix_amount)++; + print_record(record, vty); + } +} + static void print_record_cb(const struct pfx_record *record, void *data) { struct rpki_for_each_record_arg *arg = data; @@ -426,10 +438,10 @@ static void revalidate_bgp_node(struct bgp_node *bgp_node, afi_t afi, label = path->extra->label; num_labels = path->extra->num_labels; } - ret = bgp_update(ain->peer, &bgp_node->p, ain->addpath_rx_id, - ain->attr, afi, safi, ZEBRA_ROUTE_BGP, - BGP_ROUTE_NORMAL, NULL, label, num_labels, 1, - NULL); + ret = bgp_update(ain->peer, bgp_node_get_prefix(bgp_node), + ain->addpath_rx_id, ain->attr, afi, safi, + ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, NULL, label, + num_labels, 1, NULL); if (ret < 0) return; @@ -621,6 +633,36 @@ static struct rtr_mgr_group *get_connected_group(void) return rtr_mgr_get_first_group(rtr_config); } +static void print_prefix_table_by_asn(struct vty *vty, as_t as) +{ + unsigned int number_of_ipv4_prefixes = 0; + unsigned int number_of_ipv6_prefixes = 0; + struct rtr_mgr_group *group = get_connected_group(); + struct rpki_for_each_record_arg arg; + + arg.vty = vty; + arg.as = as; + + if (!group) { + vty_out(vty, "Cannot find a connected group.\n"); + return; + } + + struct pfx_table *pfx_table = group->sockets[0]->pfx_table; + + vty_out(vty, "RPKI/RTR prefix table\n"); + vty_out(vty, "%-40s %s %s\n", "Prefix", "Prefix Length", "Origin-AS"); + + arg.prefix_amount = &number_of_ipv4_prefixes; + pfx_table_for_each_ipv4_record(pfx_table, print_record_by_asn, &arg); + + arg.prefix_amount = &number_of_ipv6_prefixes; + pfx_table_for_each_ipv6_record(pfx_table, print_record_by_asn, &arg); + + vty_out(vty, "Number of IPv4 Prefixes: %u\n", number_of_ipv4_prefixes); + vty_out(vty, "Number of IPv6 Prefixes: %u\n", number_of_ipv6_prefixes); +} + static void print_prefix_table(struct vty *vty) { struct rpki_for_each_record_arg arg; @@ -1190,6 +1232,21 @@ DEFUN (show_rpki_prefix_table, return CMD_SUCCESS; } +DEFPY(show_rpki_as_number, show_rpki_as_number_cmd, + "show rpki as-number (1-4294967295)$by_asn", + SHOW_STR RPKI_OUTPUT_STRING + "Lookup by ASN in prefix table\n" + "AS Number\n") +{ + if (!is_synchronized()) { + vty_out(vty, "No Connection to RPKI cache server.\n"); + return CMD_WARNING; + } + + print_prefix_table_by_asn(vty, by_asn); + return CMD_SUCCESS; +} + DEFPY (show_rpki_prefix, show_rpki_prefix_cmd, "show rpki prefix <A.B.C.D/M|X:X::X:X/M> [(1-4294967295)$asn]", @@ -1523,6 +1580,7 @@ static void install_cli_commands(void) install_element(VIEW_NODE, &show_rpki_cache_connection_cmd); install_element(VIEW_NODE, &show_rpki_cache_server_cmd); install_element(VIEW_NODE, &show_rpki_prefix_cmd); + install_element(VIEW_NODE, &show_rpki_as_number_cmd); /* Install debug commands */ install_element(CONFIG_NODE, &debug_rpki_cmd); diff --git a/bgpd/bgp_snmp.c b/bgpd/bgp_snmp.c index d507161052..e7aa02863c 100644 --- a/bgpd/bgp_snmp.c +++ b/bgpd/bgp_snmp.c @@ -330,7 +330,7 @@ static uint8_t *bgpVersion(struct variable *v, oid name[], size_t *length, /* Return octet string length 1. */ *var_len = 1; - return (uint8_t *)&version; + return &version; } static uint8_t *bgpLocalAs(struct variable *v, oid name[], size_t *length, @@ -761,20 +761,23 @@ static struct bgp_path_info *bgp4PathAttrLookup(struct variable *v, oid name[], } if (min) { + const struct prefix *rn_p = + bgp_node_get_prefix(rn); + *length = v->namelen + BGP_PATHATTR_ENTRY_OFFSET; offset = name + v->namelen; - oid_copy_addr(offset, &rn->p.u.prefix4, + oid_copy_addr(offset, &rn_p->u.prefix4, IN_ADDR_SIZE); offset += IN_ADDR_SIZE; - *offset = rn->p.prefixlen; + *offset = rn_p->prefixlen; offset++; oid_copy_addr(offset, &min->peer->su.sin.sin_addr, IN_ADDR_SIZE); - addr->prefix = rn->p.u.prefix4; - addr->prefixlen = rn->p.prefixlen; + addr->prefix = rn_p->u.prefix4; + addr->prefixlen = rn_p->prefixlen; bgp_unlock_node(rn); @@ -858,7 +861,7 @@ static int bgpTrapEstablished(struct peer *peer) oid index[sizeof(oid) * IN_ADDR_SIZE]; /* Check if this peer just went to Established */ - if ((peer->last_major_event != OpenConfirm) || !(peer_established(peer))) + if ((peer->ostatus != OpenConfirm) || !(peer_established(peer))) return 0; ret = inet_aton(peer->host, &addr); diff --git a/bgpd/bgp_table.c b/bgpd/bgp_table.c index 04181d38be..dcf9852a67 100644 --- a/bgpd/bgp_table.c +++ b/bgpd/bgp_table.c @@ -168,13 +168,22 @@ static struct bgp_node * bgp_route_next_until_maxlen(struct bgp_node *node, const struct bgp_node *limit, const uint8_t maxlen) { - if (node->l_left && node->p.prefixlen < maxlen - && node->l_left->p.prefixlen <= maxlen) { - return bgp_node_from_rnode(node->l_left); + const struct prefix *p = bgp_node_get_prefix(node); + + if (node->l_left) { + const struct prefix *left_p = + bgp_node_get_prefix(bgp_node_from_rnode(node->l_left)); + + if (p->prefixlen < maxlen && left_p->prefixlen <= maxlen) + return bgp_node_from_rnode(node->l_left); } - if (node->l_right && node->p.prefixlen < maxlen - && node->l_right->p.prefixlen <= maxlen) { - return bgp_node_from_rnode(node->l_right); + + if (node->l_right) { + const struct prefix *right_p = + bgp_node_get_prefix(bgp_node_from_rnode(node->l_right)); + + if (p->prefixlen < maxlen && right_p->prefixlen <= maxlen) + return bgp_node_from_rnode(node->l_right); } while (node->parent && node != limit) { @@ -187,7 +196,8 @@ bgp_route_next_until_maxlen(struct bgp_node *node, const struct bgp_node *limit, return NULL; } -void bgp_table_range_lookup(const struct bgp_table *table, struct prefix *p, +void bgp_table_range_lookup(const struct bgp_table *table, + const struct prefix *p, uint8_t maxlen, struct list *matches) { struct bgp_node *node = bgp_node_from_rnode(table->route_table->top); @@ -196,24 +206,29 @@ void bgp_table_range_lookup(const struct bgp_table *table, struct prefix *p, if (node == NULL) return; - while (node && - node->p.prefixlen <= p->prefixlen && prefix_match(&node->p, p)) { + const struct prefix *node_p = bgp_node_get_prefix(node); + + while (node && node_p->prefixlen <= p->prefixlen + && prefix_match(node_p, p)) { if (bgp_node_has_bgp_path_info_data(node) - && node->p.prefixlen == p->prefixlen) { + && node_p->prefixlen == p->prefixlen) { matched = node; break; } node = bgp_node_from_rnode(node->link[prefix_bit( - &p->u.prefix, node->p.prefixlen)]); + &p->u.prefix, node_p->prefixlen)]); + node_p = bgp_node_get_prefix(node); } if (!node) return; - if (matched == NULL && node->p.prefixlen <= maxlen - && prefix_match(p, &node->p) && node->parent == NULL) + node_p = bgp_node_get_prefix(node); + if (matched == NULL && node_p->prefixlen <= maxlen + && prefix_match(p, node_p) && node->parent == NULL) matched = node; - else if ((matched == NULL && node->p.prefixlen > maxlen) || !node->parent) + else if ((matched == NULL && node_p->prefixlen > maxlen) + || !node->parent) return; else if (matched == NULL && node->parent) matched = node = bgp_node_from_rnode(node->parent); @@ -227,7 +242,8 @@ void bgp_table_range_lookup(const struct bgp_table *table, struct prefix *p, } while ((node = bgp_route_next_until_maxlen(node, matched, maxlen))) { - if (prefix_match(p, &node->p)) { + node_p = bgp_node_get_prefix(node); + if (prefix_match(p, node_p)) { if (bgp_node_has_bgp_path_info_data(node)) { bgp_lock_node(node); listnode_add(matches, node); diff --git a/bgpd/bgp_table.h b/bgpd/bgp_table.h index 69cca9eee4..da2ca3181a 100644 --- a/bgpd/bgp_table.h +++ b/bgpd/bgp_table.h @@ -217,7 +217,7 @@ static inline struct bgp_node *bgp_route_next_until(struct bgp_node *node, * bgp_node_get */ static inline struct bgp_node *bgp_node_get(struct bgp_table *const table, - struct prefix *p) + const struct prefix *p) { return bgp_node_from_rnode(route_node_get(table->route_table, p)); } @@ -226,7 +226,7 @@ static inline struct bgp_node *bgp_node_get(struct bgp_table *const table, * bgp_node_lookup */ static inline struct bgp_node * -bgp_node_lookup(const struct bgp_table *const table, struct prefix *p) +bgp_node_lookup(const struct bgp_table *const table, const struct prefix *p) { return bgp_node_from_rnode(route_node_lookup(table->route_table, p)); } @@ -243,7 +243,7 @@ static inline struct bgp_node *bgp_lock_node(struct bgp_node *node) * bgp_node_match */ static inline struct bgp_node *bgp_node_match(const struct bgp_table *table, - struct prefix *p) + const struct prefix *p) { return bgp_node_from_rnode(route_node_match(table->route_table, p)); } @@ -277,7 +277,7 @@ static inline unsigned long bgp_table_count(const struct bgp_table *const table) * bgp_table_get_next */ static inline struct bgp_node *bgp_table_get_next(const struct bgp_table *table, - struct prefix *p) + const struct prefix *p) { return bgp_node_from_rnode(route_table_get_next(table->route_table, p)); } @@ -347,7 +347,8 @@ static inline uint64_t bgp_table_version(struct bgp_table *table) return table->version; } -void bgp_table_range_lookup(const struct bgp_table *table, struct prefix *p, +void bgp_table_range_lookup(const struct bgp_table *table, + const struct prefix *p, uint8_t maxlen, struct list *matches); @@ -442,4 +443,13 @@ static inline bool bgp_node_has_bgp_path_info_data(struct bgp_node *node) return !!node->info; } +static inline const struct prefix *bgp_node_get_prefix(struct bgp_node *node) +{ + return &node->p; +} + +#ifdef _FRR_ATTRIBUTE_PRINTFRR +#pragma FRR printfrr_ext "%pRN" (struct bgp_node *) +#endif + #endif /* _QUAGGA_BGP_TABLE_H */ diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c index 50824cd6dd..a29721988e 100644 --- a/bgpd/bgp_updgrp.c +++ b/bgpd/bgp_updgrp.c @@ -831,17 +831,17 @@ void update_subgroup_inherit_info(struct update_subgroup *to, * * Returns true if the subgroup was deleted. */ -static int update_subgroup_check_delete(struct update_subgroup *subgrp) +static bool update_subgroup_check_delete(struct update_subgroup *subgrp) { if (!subgrp) - return 0; + return false; if (!LIST_EMPTY(&(subgrp->peers))) - return 0; + return false; update_subgroup_delete(subgrp); - return 1; + return true; } /* @@ -982,7 +982,7 @@ static struct update_subgroup *update_subgroup_find(struct update_group *updgrp, * Returns true if this subgroup is in a state that allows it to be * merged into another subgroup. */ -static int update_subgroup_ready_for_merge(struct update_subgroup *subgrp) +static bool update_subgroup_ready_for_merge(struct update_subgroup *subgrp) { /* @@ -990,13 +990,13 @@ static int update_subgroup_ready_for_merge(struct update_subgroup *subgrp) * out to peers. */ if (!bpacket_queue_is_empty(SUBGRP_PKTQ(subgrp))) - return 0; + return false; /* * Not ready if there enqueued updates waiting to be encoded. */ if (!advertise_list_is_empty(subgrp)) - return 0; + return false; /* * Don't attempt to merge a subgroup that needs a refresh. For one, @@ -1004,9 +1004,9 @@ static int update_subgroup_ready_for_merge(struct update_subgroup *subgrp) * another group. */ if (update_subgroup_needs_refresh(subgrp)) - return 0; + return false; - return 1; + return true; } /* @@ -1095,13 +1095,13 @@ static void update_subgroup_merge(struct update_subgroup *subgrp, * Returns true if the subgroup has been merged. The subgroup pointer * should not be accessed in this case. */ -int update_subgroup_check_merge(struct update_subgroup *subgrp, - const char *reason) +bool update_subgroup_check_merge(struct update_subgroup *subgrp, + const char *reason) { struct update_subgroup *target; if (!update_subgroup_ready_for_merge(subgrp)) - return 0; + return false; /* * Look for a subgroup to merge into. @@ -1112,10 +1112,10 @@ int update_subgroup_check_merge(struct update_subgroup *subgrp, } if (!target) - return 0; + return false; update_subgroup_merge(subgrp, target, reason); - return 1; + return true; } /* @@ -1143,14 +1143,14 @@ static int update_subgroup_merge_check_thread_cb(struct thread *thread) * * Returns true if a merge check will be performed shortly. */ -int update_subgroup_trigger_merge_check(struct update_subgroup *subgrp, - int force) +bool update_subgroup_trigger_merge_check(struct update_subgroup *subgrp, + int force) { if (subgrp->t_merge_check) - return 1; + return true; if (!force && !update_subgroup_ready_for_merge(subgrp)) - return 0; + return false; subgrp->t_merge_check = NULL; thread_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb, @@ -1158,7 +1158,7 @@ int update_subgroup_trigger_merge_check(struct update_subgroup *subgrp, SUBGRP_INCR_STAT(subgrp, merge_checks_triggered); - return 1; + return true; } /* @@ -1212,8 +1212,8 @@ static int update_subgroup_copy_packets(struct update_subgroup *dest, return count; } -static int updgrp_prefix_list_update(struct update_group *updgrp, - const char *name) +static bool updgrp_prefix_list_update(struct update_group *updgrp, + const char *name) { struct peer *peer; struct bgp_filter *filter; @@ -1225,13 +1225,13 @@ static int updgrp_prefix_list_update(struct update_group *updgrp, && (strcmp(name, PREFIX_LIST_OUT_NAME(filter)) == 0)) { PREFIX_LIST_OUT(filter) = prefix_list_lookup( UPDGRP_AFI(updgrp), PREFIX_LIST_OUT_NAME(filter)); - return 1; + return true; } - return 0; + return false; } -static int updgrp_filter_list_update(struct update_group *updgrp, - const char *name) +static bool updgrp_filter_list_update(struct update_group *updgrp, + const char *name) { struct peer *peer; struct bgp_filter *filter; @@ -1243,13 +1243,13 @@ static int updgrp_filter_list_update(struct update_group *updgrp, && (strcmp(name, FILTER_LIST_OUT_NAME(filter)) == 0)) { FILTER_LIST_OUT(filter) = as_list_lookup(FILTER_LIST_OUT_NAME(filter)); - return 1; + return true; } - return 0; + return false; } -static int updgrp_distribute_list_update(struct update_group *updgrp, - const char *name) +static bool updgrp_distribute_list_update(struct update_group *updgrp, + const char *name) { struct peer *peer; struct bgp_filter *filter; @@ -1261,9 +1261,9 @@ static int updgrp_distribute_list_update(struct update_group *updgrp, && (strcmp(name, DISTRIBUTE_OUT_NAME(filter)) == 0)) { DISTRIBUTE_OUT(filter) = access_list_lookup( UPDGRP_AFI(updgrp), DISTRIBUTE_OUT_NAME(filter)); - return 1; + return true; } - return 0; + return false; } static int updgrp_route_map_update(struct update_group *updgrp, diff --git a/bgpd/bgp_updgrp.h b/bgpd/bgp_updgrp.h index fe654bb3e3..403ca139f6 100644 --- a/bgpd/bgp_updgrp.h +++ b/bgpd/bgp_updgrp.h @@ -373,9 +373,9 @@ extern void update_subgroup_remove_peer(struct update_subgroup *, struct peer_af *); extern struct bgp_table *update_subgroup_rib(struct update_subgroup *); extern void update_subgroup_split_peer(struct peer_af *, struct update_group *); -extern int update_subgroup_check_merge(struct update_subgroup *, const char *); -extern int update_subgroup_trigger_merge_check(struct update_subgroup *, - int force); +extern bool update_subgroup_check_merge(struct update_subgroup *, const char *); +extern bool update_subgroup_trigger_merge_check(struct update_subgroup *, + int force); extern void update_group_policy_update(struct bgp *bgp, bgp_policy_type_e ptype, const char *pname, int route_update, int start_event); @@ -404,13 +404,13 @@ extern struct bpacket *bpacket_queue_first(struct bpacket_queue *q); struct bpacket *bpacket_queue_last(struct bpacket_queue *q); unsigned int bpacket_queue_length(struct bpacket_queue *q); unsigned int bpacket_queue_hwm_length(struct bpacket_queue *q); -int bpacket_queue_is_full(struct bgp *bgp, struct bpacket_queue *q); +bool bpacket_queue_is_full(struct bgp *bgp, struct bpacket_queue *q); extern void bpacket_queue_advance_peer(struct peer_af *paf); extern void bpacket_queue_remove_peer(struct peer_af *paf); extern void bpacket_add_peer(struct bpacket *pkt, struct peer_af *paf); unsigned int bpacket_queue_virtual_length(struct peer_af *paf); extern void bpacket_queue_show_vty(struct bpacket_queue *q, struct vty *vty); -int subgroup_packets_to_build(struct update_subgroup *subgrp); +bool subgroup_packets_to_build(struct update_subgroup *subgrp); extern struct bpacket *subgroup_update_packet(struct update_subgroup *s); extern struct bpacket *subgroup_withdraw_packet(struct update_subgroup *s); extern struct stream *bpacket_reformat_for_peer(struct bpacket *pkt, diff --git a/bgpd/bgp_updgrp_adv.c b/bgpd/bgp_updgrp_adv.c index 34f80def8c..e40b3320ea 100644 --- a/bgpd/bgp_updgrp_adv.c +++ b/bgpd/bgp_updgrp_adv.c @@ -148,12 +148,9 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg) peer = UPDGRP_PEER(updgrp); addpath_capable = bgp_addpath_encode_tx(peer, afi, safi); - if (BGP_DEBUG(update, UPDATE_OUT)) { - char buf_prefix[PREFIX_STRLEN]; - prefix2str(&ctx->rn->p, buf_prefix, sizeof(buf_prefix)); - zlog_debug("%s: afi=%s, safi=%s, p=%s", __func__, afi2str(afi), - safi2str(safi), buf_prefix); - } + if (BGP_DEBUG(update, UPDATE_OUT)) + zlog_debug("%s: afi=%s, safi=%s, p=%pRN", __func__, + afi2str(afi), safi2str(safi), ctx->rn); UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { @@ -242,7 +239,9 @@ static void subgrp_show_adjq_vty(struct update_subgroup *subgrp, output_count = 0; - for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) + for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) { + const struct prefix *rn_p = bgp_node_get_prefix(rn); + RB_FOREACH (adj, bgp_adj_out_rb, &rn->adj_out) if (adj->subgroup == subgrp) { if (header1) { @@ -261,20 +260,20 @@ static void subgrp_show_adjq_vty(struct update_subgroup *subgrp, } if ((flags & UPDWALK_FLAGS_ADVQUEUE) && adj->adv && adj->adv->baa) { - route_vty_out_tmp(vty, &rn->p, - adj->adv->baa->attr, - SUBGRP_SAFI(subgrp), - 0, NULL); + route_vty_out_tmp( + vty, rn_p, adj->adv->baa->attr, + SUBGRP_SAFI(subgrp), 0, NULL); output_count++; } if ((flags & UPDWALK_FLAGS_ADVERTISED) && adj->attr) { - route_vty_out_tmp( - vty, &rn->p, adj->attr, - SUBGRP_SAFI(subgrp), 0, NULL); + route_vty_out_tmp(vty, rn_p, adj->attr, + SUBGRP_SAFI(subgrp), + 0, NULL); output_count++; } } + } if (output_count != 0) vty_out(vty, "\nTotal number of prefixes %ld\n", output_count); } @@ -623,7 +622,9 @@ void subgroup_announce_table(struct update_subgroup *subgrp, PEER_FLAG_DEFAULT_ORIGINATE)) subgroup_default_originate(subgrp, 0); - for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) + for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) { + const struct prefix *rn_p = bgp_node_get_prefix(rn); + for (ri = bgp_node_get_bgp_path_info(rn); ri; ri = ri->next) if (CHECK_FLAG(ri->flags, BGP_PATH_SELECTED) @@ -632,7 +633,7 @@ void subgroup_announce_table(struct update_subgroup *subgrp, peer->addpath_type[afi][safi], ri))) { if (subgroup_announce_check(rn, ri, subgrp, - &rn->p, &attr)) + rn_p, &attr)) bgp_adj_out_set_subgroup(rn, subgrp, &attr, ri); else @@ -642,6 +643,7 @@ void subgroup_announce_table(struct update_subgroup *subgrp, peer, afi, safi, &ri->tx_addpath)); } + } /* * We walked through the whole table -- make sure our version number @@ -761,7 +763,8 @@ void subgroup_default_originate(struct update_subgroup *subgrp, int withdraw) for (rn = bgp_table_top(bgp->rib[afi][safi]); rn; rn = bgp_route_next(rn)) { ret = route_map_apply(peer->default_rmap[afi][safi].map, - &rn->p, RMAP_BGP, &bpi_rmap); + bgp_node_get_prefix(rn), RMAP_BGP, + &bpi_rmap); if (ret != RMAP_DENYMATCH) break; diff --git a/bgpd/bgp_updgrp_packet.c b/bgpd/bgp_updgrp_packet.c index 4dc9dfa39a..8d6fc1f6a2 100644 --- a/bgpd/bgp_updgrp_packet.c +++ b/bgpd/bgp_updgrp_packet.c @@ -226,11 +226,11 @@ unsigned int bpacket_queue_hwm_length(struct bpacket_queue *q) return q->hwm_count - 1; } -int bpacket_queue_is_full(struct bgp *bgp, struct bpacket_queue *q) +bool bpacket_queue_is_full(struct bgp *bgp, struct bpacket_queue *q) { if (q->curr_count >= bgp->default_subgroup_pkt_queue_max) - return 1; - return 0; + return true; + return false; } void bpacket_add_peer(struct bpacket *pkt, struct peer_af *paf) @@ -656,22 +656,22 @@ static void bpacket_attr_vec_arr_update(struct bpacket_attr_vec_arr *vecarr, /* * Return if there are packets to build for this subgroup. */ -int subgroup_packets_to_build(struct update_subgroup *subgrp) +bool subgroup_packets_to_build(struct update_subgroup *subgrp) { struct bgp_advertise *adv; if (!subgrp) - return 0; + return false; adv = bgp_adv_fifo_first(&subgrp->sync->withdraw); if (adv) - return 1; + return true; adv = bgp_adv_fifo_first(&subgrp->sync->update); if (adv) - return 1; + return true; - return 0; + return false; } /* Make BGP update packet. */ @@ -726,8 +726,11 @@ struct bpacket *subgroup_update_packet(struct update_subgroup *subgrp) adv = bgp_adv_fifo_first(&subgrp->sync->update); while (adv) { + const struct prefix *rn_p; + assert(adv->rn); rn = adv->rn; + rn_p = bgp_node_get_prefix(rn); adj = adv->adj; addpath_tx_id = adj->addpath_tx_id; path = adv->pathi; @@ -750,9 +753,8 @@ struct bpacket *subgroup_update_packet(struct update_subgroup *subgrp) space_remaining = STREAM_CONCAT_REMAIN(s, snlri, STREAM_SIZE(s)) - BGP_MAX_PACKET_SIZE_OVERFLOW; - space_needed = - BGP_NLRI_LENGTH + addpath_overhead - + bgp_packet_mpattr_prefix_size(afi, safi, &rn->p); + space_needed = BGP_NLRI_LENGTH + addpath_overhead + + bgp_packet_mpattr_prefix_size(afi, safi, rn_p); /* When remaining space can't include NLRI and it's length. */ if (space_remaining < space_needed) @@ -798,7 +800,7 @@ struct bpacket *subgroup_update_packet(struct update_subgroup *subgrp) - BGP_MAX_PACKET_SIZE_OVERFLOW; space_needed = BGP_NLRI_LENGTH + addpath_overhead + bgp_packet_mpattr_prefix_size( - afi, safi, &rn->p); + afi, safi, rn_p); /* If the attributes alone do not leave any room for * NLRI then @@ -828,12 +830,13 @@ struct bpacket *subgroup_update_packet(struct update_subgroup *subgrp) if ((afi == AFI_IP && safi == SAFI_UNICAST) && !peer_cap_enhe(peer, afi, safi)) - stream_put_prefix_addpath(s, &rn->p, addpath_encode, + stream_put_prefix_addpath(s, rn_p, addpath_encode, addpath_tx_id); else { /* Encode the prefix in MP_REACH_NLRI attribute */ if (rn->prn) - prd = (struct prefix_rd *)&rn->prn->p; + prd = (struct prefix_rd *)bgp_node_get_prefix( + rn->prn); if (safi == SAFI_LABELED_UNICAST) { label = bgp_adv_label(rn, path, peer, afi, @@ -850,7 +853,7 @@ struct bpacket *subgroup_update_packet(struct update_subgroup *subgrp) snlri, peer, afi, safi, &vecarr, adv->baa->attr); - bgp_packet_mpattr_prefix(snlri, afi, safi, &rn->p, prd, + bgp_packet_mpattr_prefix(snlri, afi, safi, rn_p, prd, label_pnt, num_labels, addpath_encode, addpath_tx_id, adv->baa->attr); @@ -858,7 +861,7 @@ struct bpacket *subgroup_update_packet(struct update_subgroup *subgrp) num_pfx++; - if (bgp_debug_update(NULL, &rn->p, subgrp->update_group, 0)) { + if (bgp_debug_update(NULL, rn_p, subgrp->update_group, 0)) { char pfx_buf[BGP_PRD_PATH_STRLEN]; if (!send_attr_printed) { @@ -882,10 +885,10 @@ struct bpacket *subgroup_update_packet(struct update_subgroup *subgrp) send_attr_printed = 1; } - bgp_debug_rdpfxpath2str(afi, safi, prd, &rn->p, - label_pnt, num_labels, - addpath_encode, addpath_tx_id, - pfx_buf, sizeof(pfx_buf)); + bgp_debug_rdpfxpath2str(afi, safi, prd, rn_p, label_pnt, + num_labels, addpath_encode, + addpath_tx_id, pfx_buf, + sizeof(pfx_buf)); zlog_debug("u%" PRIu64 ":s%" PRIu64 " send UPDATE %s", subgrp->update_group->id, subgrp->id, pfx_buf); @@ -964,7 +967,7 @@ struct bpacket *subgroup_withdraw_packet(struct update_subgroup *subgrp) int addpath_encode = 0; int addpath_overhead = 0; uint32_t addpath_tx_id = 0; - struct prefix_rd *prd = NULL; + const struct prefix_rd *prd = NULL; if (!subgrp) @@ -982,16 +985,19 @@ struct bpacket *subgroup_withdraw_packet(struct update_subgroup *subgrp) addpath_overhead = addpath_encode ? BGP_ADDPATH_ID_LEN : 0; while ((adv = bgp_adv_fifo_first(&subgrp->sync->withdraw)) != NULL) { + const struct prefix *rn_p; + assert(adv->rn); adj = adv->adj; rn = adv->rn; + rn_p = bgp_node_get_prefix(rn); addpath_tx_id = adj->addpath_tx_id; space_remaining = STREAM_WRITEABLE(s) - BGP_MAX_PACKET_SIZE_OVERFLOW; - space_needed = - BGP_NLRI_LENGTH + addpath_overhead + BGP_TOTAL_ATTR_LEN - + bgp_packet_mpattr_prefix_size(afi, safi, &rn->p); + space_needed = BGP_NLRI_LENGTH + addpath_overhead + + BGP_TOTAL_ATTR_LEN + + bgp_packet_mpattr_prefix_size(afi, safi, rn_p); if (space_remaining < space_needed) break; @@ -1004,13 +1010,15 @@ struct bpacket *subgroup_withdraw_packet(struct update_subgroup *subgrp) if (afi == AFI_IP && safi == SAFI_UNICAST && !peer_cap_enhe(peer, afi, safi)) - stream_put_prefix_addpath(s, &rn->p, addpath_encode, + stream_put_prefix_addpath(s, rn_p, addpath_encode, addpath_tx_id); else { if (rn->prn) - prd = (struct prefix_rd *)&rn->prn->p; + prd = (struct prefix_rd *)bgp_node_get_prefix( + rn->prn); - /* If first time, format the MP_UNREACH header */ + /* If first time, format the MP_UNREACH header + */ if (first_time) { iana_afi_t pkt_afi; iana_safi_t pkt_safi; @@ -1019,8 +1027,8 @@ struct bpacket *subgroup_withdraw_packet(struct update_subgroup *subgrp) pkt_safi = safi_int2iana(safi); attrlen_pos = stream_get_endp(s); - /* total attr length = 0 for now. reevaluate - * later */ + /* total attr length = 0 for now. + * reevaluate later */ stream_putw(s, 0); mp_start = stream_get_endp(s); mplen_pos = bgp_packet_mpunreach_start(s, afi, @@ -1034,17 +1042,17 @@ struct bpacket *subgroup_withdraw_packet(struct update_subgroup *subgrp) subgrp->id, pkt_afi, pkt_safi); } - bgp_packet_mpunreach_prefix(s, &rn->p, afi, safi, prd, + bgp_packet_mpunreach_prefix(s, rn_p, afi, safi, prd, NULL, 0, addpath_encode, addpath_tx_id, NULL); } num_pfx++; - if (bgp_debug_update(NULL, &rn->p, subgrp->update_group, 0)) { + if (bgp_debug_update(NULL, rn_p, subgrp->update_group, 0)) { char pfx_buf[BGP_PRD_PATH_STRLEN]; - bgp_debug_rdpfxpath2str(afi, safi, prd, &rn->p, NULL, 0, + bgp_debug_rdpfxpath2str(afi, safi, prd, rn_p, NULL, 0, addpath_encode, addpath_tx_id, pfx_buf, sizeof(pfx_buf)); zlog_debug("u%" PRIu64 ":s%" PRIu64 diff --git a/bgpd/bgp_vnc_types.h b/bgpd/bgp_vnc_types.h index f4202ff75e..04847ce6c9 100644 --- a/bgpd/bgp_vnc_types.h +++ b/bgpd/bgp_vnc_types.h @@ -19,7 +19,7 @@ #ifndef _QUAGGA_BGP_VNC_TYPES_H #define _QUAGGA_BGP_VNC_TYPES_H -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC typedef enum { BGP_VNC_SUBTLV_TYPE_LIFETIME = 1, BGP_VNC_SUBTLV_TYPE_RFPOPTION = 2, /* deprecated */ diff --git a/bgpd/bgp_vpn.c b/bgpd/bgp_vpn.c index b67b0c322e..af632a1340 100644 --- a/bgpd/bgp_vpn.c +++ b/bgpd/bgp_vpn.c @@ -78,7 +78,9 @@ int show_adj_route_vpn(struct vty *vty, struct peer *peer, for (rn = bgp_table_top(bgp->rib[afi][safi]); rn; rn = bgp_route_next(rn)) { - if (prd && memcmp(rn->p.u.val, prd->val, 8) != 0) + const struct prefix *rn_p = bgp_node_get_prefix(rn); + + if (prd && memcmp(rn_p->u.val, prd->val, 8) != 0) continue; table = bgp_node_get_bgp_table_info(rn); @@ -153,12 +155,12 @@ int show_adj_route_vpn(struct vty *vty, struct peer *peer, uint16_t type; struct rd_as rd_as = {0}; struct rd_ip rd_ip = {0}; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC struct rd_vnc_eth rd_vnc_eth = {0}; #endif - uint8_t *pnt; + const uint8_t *pnt; - pnt = rn->p.u.val; + pnt = rn_p->u.val; /* Decode RD type. */ type = decode_rd_type(pnt); @@ -169,7 +171,7 @@ int show_adj_route_vpn(struct vty *vty, struct peer *peer, decode_rd_as4(pnt + 2, &rd_as); else if (type == RD_TYPE_IP) decode_rd_ip(pnt + 2, &rd_ip); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC else if (type == RD_TYPE_VNC_ETH) decode_rd_vnc_eth(pnt, &rd_vnc_eth); #endif @@ -198,7 +200,7 @@ int show_adj_route_vpn(struct vty *vty, struct peer *peer, vty_out(vty, "%s:%d", inet_ntoa(rd_ip.ip), rd_ip.val); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC else if (type == RD_TYPE_VNC_ETH) vty_out(vty, "%u:%02x:%02x:%02x:%02x:%02x:%02x", @@ -221,9 +223,8 @@ int show_adj_route_vpn(struct vty *vty, struct peer *peer, } rd_header = 0; } - route_vty_out_tmp(vty, &rm->p, attr, - safi, use_json, - json_routes); + route_vty_out_tmp(vty, bgp_node_get_prefix(rm), attr, + safi, use_json, json_routes); output_count++; } diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index fc89881cae..8f06fdf86c 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -68,25 +68,25 @@ #include "bgpd/bgp_addpath.h" #include "bgpd/bgp_mac.h" #include "bgpd/bgp_flowspec.h" -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/bgp_rfapi_cfg.h" #endif FRR_CFG_DEFAULT_BOOL(BGP_IMPORT_CHECK, - { .val_long = true, .match_profile = "datacenter", }, - { .val_long = false }, + { .val_bool = true, .match_profile = "datacenter", }, + { .val_bool = false }, ) FRR_CFG_DEFAULT_BOOL(BGP_SHOW_HOSTNAME, - { .val_long = true, .match_profile = "datacenter", }, - { .val_long = false }, + { .val_bool = true, .match_profile = "datacenter", }, + { .val_bool = false }, ) FRR_CFG_DEFAULT_BOOL(BGP_LOG_NEIGHBOR_CHANGES, - { .val_long = true, .match_profile = "datacenter", }, - { .val_long = false }, + { .val_bool = true, .match_profile = "datacenter", }, + { .val_bool = false }, ) FRR_CFG_DEFAULT_BOOL(BGP_DETERMINISTIC_MED, - { .val_long = true, .match_profile = "datacenter", }, - { .val_long = false }, + { .val_bool = true, .match_profile = "datacenter", }, + { .val_bool = false }, ) FRR_CFG_DEFAULT_ULONG(BGP_CONNECT_RETRY, { .val_ulong = 10, .match_profile = "datacenter", }, @@ -533,7 +533,7 @@ int bgp_vty_find_and_parse_afi_safi_bgp(struct vty *vty, return *idx; } -static int peer_address_self_check(struct bgp *bgp, union sockunion *su) +static bool peer_address_self_check(struct bgp *bgp, union sockunion *su) { struct interface *ifp = NULL; @@ -545,9 +545,9 @@ static int peer_address_self_check(struct bgp *bgp, union sockunion *su) bgp->vrf_id); if (ifp) - return 1; + return true; - return 0; + return false; } /* Utility function for looking up peer from VTY. */ @@ -2973,6 +2973,49 @@ DEFUN (no_bgp_bestpath_med, return CMD_SUCCESS; } +/* "bgp bestpath bandwidth" configuration. */ +DEFPY (bgp_bestpath_bw, + bgp_bestpath_bw_cmd, + "[no$no] bgp bestpath bandwidth [<ignore|skip-missing|default-weight-for-missing>$bw_cfg]", + NO_STR + "BGP specific commands\n" + "Change the default bestpath selection\n" + "Link Bandwidth attribute\n" + "Ignore link bandwidth (i.e., do regular ECMP, not weighted)\n" + "Ignore paths without link bandwidth for ECMP (if other paths have it)\n" + "Assign a low default weight (value 1) to paths not having link bandwidth\n") +{ + VTY_DECLVAR_CONTEXT(bgp, bgp); + afi_t afi; + safi_t safi; + + if (no) { + bgp->lb_handling = BGP_LINK_BW_ECMP; + } else { + if (!bw_cfg) { + vty_out(vty, "%% Bandwidth configuration must be specified\n"); + return CMD_ERR_INCOMPLETE; + } + if (!strcmp(bw_cfg, "ignore")) + bgp->lb_handling = BGP_LINK_BW_IGNORE_BW; + else if (!strcmp(bw_cfg, "skip-missing")) + bgp->lb_handling = BGP_LINK_BW_SKIP_MISSING; + else if (!strcmp(bw_cfg, "default-weight-for-missing")) + bgp->lb_handling = BGP_LINK_BW_DEFWT_4_MISSING; + else + return CMD_ERR_NO_MATCH; + } + + /* This config is used in route install, so redo that. */ + FOREACH_AFI_SAFI (afi, safi) { + if (!bgp_fibupd_safi(safi)) + continue; + bgp_zebra_announce_table(bgp, afi, safi); + } + + return CMD_SUCCESS; +} + /* "no bgp default ipv4-unicast". */ DEFUN (no_bgp_default_ipv4_unicast, no_bgp_default_ipv4_unicast_cmd, @@ -6590,6 +6633,7 @@ DEFUN(no_neighbor_maximum_prefix_out, if (!peer) return CMD_WARNING_CONFIG_FAILED; + UNSET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_OUT); peer->pmax_out[afi][safi] = 0; return CMD_SUCCESS; @@ -7520,8 +7564,7 @@ ALIAS (af_route_map_vpn_imexport, "For routes leaked from current address-family to vpn\n") DEFPY(af_import_vrf_route_map, af_import_vrf_route_map_cmd, - "[no] import vrf route-map RMAP$rmap_str", - NO_STR + "import vrf route-map RMAP$rmap_str", "Import routes from another VRF\n" "Vrf routes being filtered\n" "Specify route map\n" @@ -7530,13 +7573,8 @@ DEFPY(af_import_vrf_route_map, af_import_vrf_route_map_cmd, VTY_DECLVAR_CONTEXT(bgp, bgp); vpn_policy_direction_t dir = BGP_VPN_POLICY_DIR_FROMVPN; afi_t afi; - int idx = 0; - int yes = 1; struct bgp *bgp_default; - if (argv_find(argv, argc, "no", &idx)) - yes = 0; - afi = vpn_policy_getafi(vty, bgp, true); if (afi == AFI_MAX) return CMD_WARNING_CONFIG_FAILED; @@ -7559,35 +7597,56 @@ DEFPY(af_import_vrf_route_map, af_import_vrf_route_map_cmd, vpn_leak_prechange(dir, afi, bgp_get_default(), bgp); - if (yes) { - if (bgp->vpn_policy[afi].rmap_name[dir]) - XFREE(MTYPE_ROUTE_MAP_NAME, - bgp->vpn_policy[afi].rmap_name[dir]); - bgp->vpn_policy[afi].rmap_name[dir] = - XSTRDUP(MTYPE_ROUTE_MAP_NAME, rmap_str); - bgp->vpn_policy[afi].rmap[dir] = - route_map_lookup_warn_noexist(vty, rmap_str); - if (!bgp->vpn_policy[afi].rmap[dir]) - return CMD_SUCCESS; - } else { - if (bgp->vpn_policy[afi].rmap_name[dir]) - XFREE(MTYPE_ROUTE_MAP_NAME, - bgp->vpn_policy[afi].rmap_name[dir]); - bgp->vpn_policy[afi].rmap_name[dir] = NULL; - bgp->vpn_policy[afi].rmap[dir] = NULL; - } + if (bgp->vpn_policy[afi].rmap_name[dir]) + XFREE(MTYPE_ROUTE_MAP_NAME, + bgp->vpn_policy[afi].rmap_name[dir]); + bgp->vpn_policy[afi].rmap_name[dir] = + XSTRDUP(MTYPE_ROUTE_MAP_NAME, rmap_str); + bgp->vpn_policy[afi].rmap[dir] = + route_map_lookup_warn_noexist(vty, rmap_str); + if (!bgp->vpn_policy[afi].rmap[dir]) + return CMD_SUCCESS; + + SET_FLAG(bgp->af_flags[afi][SAFI_UNICAST], + BGP_CONFIG_VRF_TO_VRF_IMPORT); vpn_leak_postchange(dir, afi, bgp_get_default(), bgp); return CMD_SUCCESS; } -ALIAS(af_import_vrf_route_map, af_no_import_vrf_route_map_cmd, - "no import vrf route-map", +DEFPY(af_no_import_vrf_route_map, af_no_import_vrf_route_map_cmd, + "no import vrf route-map [RMAP$rmap_str]", NO_STR "Import routes from another VRF\n" "Vrf routes being filtered\n" - "Specify route map\n") + "Specify route map\n" + "name of route-map\n") +{ + VTY_DECLVAR_CONTEXT(bgp, bgp); + vpn_policy_direction_t dir = BGP_VPN_POLICY_DIR_FROMVPN; + afi_t afi; + + afi = vpn_policy_getafi(vty, bgp, true); + if (afi == AFI_MAX) + return CMD_WARNING_CONFIG_FAILED; + + vpn_leak_prechange(dir, afi, bgp_get_default(), bgp); + + if (bgp->vpn_policy[afi].rmap_name[dir]) + XFREE(MTYPE_ROUTE_MAP_NAME, + bgp->vpn_policy[afi].rmap_name[dir]); + bgp->vpn_policy[afi].rmap_name[dir] = NULL; + bgp->vpn_policy[afi].rmap[dir] = NULL; + + if (bgp->vpn_policy[afi].import_vrf->count == 0) + UNSET_FLAG(bgp->af_flags[afi][SAFI_UNICAST], + BGP_CONFIG_VRF_TO_VRF_IMPORT); + + vpn_leak_postchange(dir, afi, bgp_get_default(), bgp); + + return CMD_SUCCESS; +} DEFPY(bgp_imexport_vrf, bgp_imexport_vrf_cmd, "[no] import vrf VIEWVRFNAME$import_name", @@ -7613,6 +7672,11 @@ DEFPY(bgp_imexport_vrf, bgp_imexport_vrf_cmd, return CMD_WARNING; } + if (strcmp(import_name, "route-map") == 0) { + vty_out(vty, "%% Must include route-map name\n"); + return CMD_WARNING; + } + if (argv_find(argv, argc, "no", &idx)) remove = true; @@ -7935,27 +7999,32 @@ static int bgp_clear_prefix(struct vty *vty, const char *view_name, if (safi == SAFI_MPLS_VPN) { for (rn = bgp_table_top(rib); rn; rn = bgp_route_next(rn)) { - if (prd && memcmp(rn->p.u.val, prd->val, 8) != 0) + const struct prefix *rn_p = bgp_node_get_prefix(rn); + + if (prd && memcmp(rn_p->u.val, prd->val, 8) != 0) continue; table = bgp_node_get_bgp_table_info(rn); - if (table != NULL) { + if (table == NULL) + continue; - if ((rm = bgp_node_match(table, &match)) - != NULL) { - if (rm->p.prefixlen - == match.prefixlen) { - SET_FLAG(rm->flags, - BGP_NODE_USER_CLEAR); - bgp_process(bgp, rm, afi, safi); - } - bgp_unlock_node(rm); + if ((rm = bgp_node_match(table, &match)) != NULL) { + const struct prefix *rm_p = + bgp_node_get_prefix(rm); + + if (rm_p->prefixlen == match.prefixlen) { + SET_FLAG(rm->flags, + BGP_NODE_USER_CLEAR); + bgp_process(bgp, rm, afi, safi); } + bgp_unlock_node(rm); } } } else { if ((rn = bgp_node_match(rib, &match)) != NULL) { - if (rn->p.prefixlen == match.prefixlen) { + const struct prefix *rn_p = bgp_node_get_prefix(rn); + + if (rn_p->prefixlen == match.prefixlen) { SET_FLAG(rn->flags, BGP_NODE_USER_CLEAR); bgp_process(bgp, rn, afi, safi); } @@ -9010,21 +9079,23 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, json_object_int_add(json_peer, "msgSent", PEER_TOTAL_TX(peer)); + atomic_size_t outq_count, inq_count; + outq_count = atomic_load_explicit( + &peer->obuf->count, + memory_order_relaxed); + inq_count = atomic_load_explicit( + &peer->ibuf->count, + memory_order_relaxed); + json_object_int_add(json_peer, "tableVersion", peer->version[afi][safi]); json_object_int_add(json_peer, "outq", - peer->obuf->count); - json_object_int_add(json_peer, "inq", 0); + outq_count); + json_object_int_add(json_peer, "inq", + inq_count); peer_uptime(peer->uptime, timebuf, BGP_UPTIME_LEN, use_json, json_peer); - /* - * Adding "pfxRcd" field to match with the corresponding - * CLI. "prefixReceivedCount" will be deprecated in - * future. - */ - json_object_int_add(json_peer, "prefixReceivedCount", - peer->pcount[afi][pfx_rcd_safi]); json_object_int_add(json_peer, "pfxRcd", peer->pcount[afi][pfx_rcd_safi]); @@ -9096,10 +9167,21 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, vty_out(vty, "%*s", max_neighbor_width - len, " "); - vty_out(vty, "4 %10u %9u %9u %8" PRIu64 " %4d %4zd %8s", + atomic_size_t outq_count, inq_count; + outq_count = atomic_load_explicit( + &peer->obuf->count, + memory_order_relaxed); + inq_count = atomic_load_explicit( + &peer->ibuf->count, + memory_order_relaxed); + + vty_out(vty, + "4 %10u %9u %9u %8" PRIu64 + " %4zu %4zu %8s", peer->as, PEER_TOTAL_RX(peer), - PEER_TOTAL_TX(peer), peer->version[afi][safi], - 0, peer->obuf->count, + PEER_TOTAL_TX(peer), + peer->version[afi][safi], inq_count, + outq_count, peer_uptime(peer->uptime, timebuf, BGP_UPTIME_LEN, 0, NULL)); @@ -11627,7 +11709,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json, json_object_object_add( json_neigh, "gracefulRestartInfo", json_grace); } else { - vty_out(vty, " Graceful restart informations:\n"); + vty_out(vty, " Graceful restart information:\n"); if ((p->status == Established) && CHECK_FLAG(p->cap, PEER_CAP_RESTART_RCV)) { @@ -11683,9 +11765,17 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json, json_object *json_stat = NULL; json_stat = json_object_new_object(); /* Packet counts. */ - json_object_int_add(json_stat, "depthInq", 0); + + atomic_size_t outq_count, inq_count; + outq_count = atomic_load_explicit(&p->obuf->count, + memory_order_relaxed); + inq_count = atomic_load_explicit(&p->ibuf->count, + memory_order_relaxed); + + json_object_int_add(json_stat, "depthInq", + (unsigned long)inq_count); json_object_int_add(json_stat, "depthOutq", - (unsigned long)p->obuf->count); + (unsigned long)outq_count); json_object_int_add(json_stat, "opensSent", atomic_load_explicit(&p->open_out, memory_order_relaxed)); @@ -11726,11 +11816,16 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json, json_object_int_add(json_stat, "totalRecv", PEER_TOTAL_RX(p)); json_object_object_add(json_neigh, "messageStats", json_stat); } else { + atomic_size_t outq_count, inq_count; + outq_count = atomic_load_explicit(&p->obuf->count, + memory_order_relaxed); + inq_count = atomic_load_explicit(&p->ibuf->count, + memory_order_relaxed); + /* Packet counts. */ vty_out(vty, " Message statistics:\n"); - vty_out(vty, " Inq depth is 0\n"); - vty_out(vty, " Outq depth is %lu\n", - (unsigned long)p->obuf->count); + vty_out(vty, " Inq depth is %zu\n", inq_count); + vty_out(vty, " Outq depth is %zu\n", outq_count); vty_out(vty, " Sent Rcvd\n"); vty_out(vty, " Opens: %10d %10d\n", atomic_load_explicit(&p->open_out, @@ -12097,14 +12192,20 @@ static int bgp_show_neighbor_graceful_restart(struct vty *vty, struct bgp *bgp, enum show_type type, union sockunion *su, const char *conf_if, afi_t afi, - bool use_json, json_object *json) + bool use_json) { struct listnode *node, *nnode; struct peer *peer; int find = 0; safi_t safi = SAFI_UNICAST; + json_object *json = NULL; json_object *json_neighbor = NULL; + if (use_json) { + json = json_object_new_object(); + json_neighbor = json_object_new_object(); + } + for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE)) @@ -12113,16 +12214,15 @@ static int bgp_show_neighbor_graceful_restart(struct vty *vty, struct bgp *bgp, if ((peer->afc[afi][safi]) == 0) continue; - if (use_json) - json_neighbor = json_object_new_object(); - if (type == show_all) { bgp_show_peer_gr_status(vty, peer, use_json, json_neighbor); - if (use_json) + if (use_json) { json_object_object_add(json, peer->host, json_neighbor); + json_neighbor = NULL; + } } else if (type == show_peer) { if (conf_if) { @@ -12148,8 +12248,10 @@ static int bgp_show_neighbor_graceful_restart(struct vty *vty, struct bgp *bgp, json_neighbor); } - if (find) + if (find) { + json_neighbor = NULL; break; + } } if (type == show_peer && !find) { @@ -12162,6 +12264,10 @@ static int bgp_show_neighbor_graceful_restart(struct vty *vty, struct bgp *bgp, vty_out(vty, "%s\n", json_object_to_json_string_ext( json, JSON_C_TO_STRING_PRETTY)); + + if (json_neighbor) + json_object_free(json_neighbor); + json_object_free(json); } else { vty_out(vty, "\n"); } @@ -12283,7 +12389,6 @@ static void bgp_show_neighbor_graceful_restart_vty(struct vty *vty, int ret; struct bgp *bgp; union sockunion su; - json_object *json = NULL; bgp = bgp_get_default(); @@ -12294,20 +12399,17 @@ static void bgp_show_neighbor_graceful_restart_vty(struct vty *vty, bgp_show_global_graceful_restart_mode_vty(vty, bgp, use_json, NULL); - json = json_object_new_object(); if (ip_str) { ret = str2sockunion(ip_str, &su); if (ret < 0) - bgp_show_neighbor_graceful_restart(vty, bgp, type, NULL, - ip_str, afi, - use_json, json); - else bgp_show_neighbor_graceful_restart( - vty, bgp, type, &su, NULL, afi, use_json, json); + vty, bgp, type, NULL, ip_str, afi, use_json); + else + bgp_show_neighbor_graceful_restart(vty, bgp, type, &su, + NULL, afi, use_json); } else bgp_show_neighbor_graceful_restart(vty, bgp, type, NULL, NULL, - afi, use_json, json); - json_object_free(json); + afi, use_json); } static void bgp_show_all_instances_neighbors_vty(struct vty *vty, @@ -14086,7 +14188,8 @@ static bool peergroup_filter_check(struct peer *peer, afi_t afi, safi_t safi, /* Return true if the addpath type is set for peer and different from * peer-group. */ -static int peergroup_af_addpath_check(struct peer *peer, afi_t afi, safi_t safi) +static bool peergroup_af_addpath_check(struct peer *peer, afi_t afi, + safi_t safi) { enum bgp_addpath_strat type, g_type; @@ -14097,15 +14200,15 @@ static int peergroup_af_addpath_check(struct peer *peer, afi_t afi, safi_t safi) g_type = peer->group->conf->addpath_type[afi][safi]; if (type != g_type) - return 1; + return true; else - return 0; + return false; } - return 1; + return true; } - return 0; + return false; } /* This is part of the address-family block (unicast only) */ @@ -14115,7 +14218,8 @@ static void bgp_vpn_policy_config_write_afi(struct vty *vty, struct bgp *bgp, int indent = 2; if (bgp->vpn_policy[afi].rmap_name[BGP_VPN_POLICY_DIR_FROMVPN]) { - if (listcount(bgp->vpn_policy[afi].import_vrf)) + if (CHECK_FLAG(bgp->af_flags[afi][SAFI_UNICAST], + BGP_CONFIG_VRF_TO_VRF_IMPORT)) vty_out(vty, "%*simport vrf route-map %s\n", indent, "", bgp->vpn_policy[afi] .rmap_name[BGP_VPN_POLICY_DIR_FROMVPN]); @@ -15131,6 +15235,14 @@ int bgp_config_write(struct vty *vty) vty_out(vty, "\n"); } + /* Link bandwidth handling. */ + if (bgp->lb_handling == BGP_LINK_BW_IGNORE_BW) + vty_out(vty, " bgp bestpath bandwidth ignore\n"); + else if (bgp->lb_handling == BGP_LINK_BW_SKIP_MISSING) + vty_out(vty, " bgp bestpath bandwidth skip-missing\n"); + else if (bgp->lb_handling == BGP_LINK_BW_DEFWT_4_MISSING) + vty_out(vty, " bgp bestpath bandwidth default-weight-for-missing\n"); + /* BGP network import check. */ if (!!CHECK_FLAG(bgp->flags, BGP_FLAG_IMPORT_CHECK) != SAVE_BGP_IMPORT_CHECK) @@ -15211,7 +15323,7 @@ int bgp_config_write(struct vty *vty) hook_call(bgp_inst_config_write, bgp, vty); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC bgp_rfapi_cfg_write(vty, bgp); #endif @@ -15537,6 +15649,9 @@ void bgp_vty_init(void) install_element(BGP_NODE, &bgp_bestpath_med_cmd); install_element(BGP_NODE, &no_bgp_bestpath_med_cmd); + /* "bgp bestpath bandwidth" commands */ + install_element(BGP_NODE, &bgp_bestpath_bw_cmd); + /* "no bgp default ipv4-unicast" commands. */ install_element(BGP_NODE, &no_bgp_default_ipv4_unicast_cmd); install_element(BGP_NODE, &bgp_default_ipv4_unicast_cmd); diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index f3ab608492..4f54bc81fb 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -51,7 +51,7 @@ #include "bgpd/bgp_nht.h" #include "bgpd/bgp_bfd.h" #include "bgpd/bgp_label.h" -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/rfapi_backend.h" #include "bgpd/rfapi/vnc_export_bgp.h" #endif @@ -66,19 +66,19 @@ struct zclient *zclient = NULL; /* Can we install into zebra? */ -static inline int bgp_install_info_to_zebra(struct bgp *bgp) +static inline bool bgp_install_info_to_zebra(struct bgp *bgp) { if (zclient->sock <= 0) - return 0; + return false; if (!IS_BGP_INST_KNOWN_TO_ZEBRA(bgp)) { zlog_debug( "%s: No zebra instance to talk to, not installing information", __func__); - return 0; + return false; } - return 1; + return true; } int zclient_num_connects; @@ -928,8 +928,8 @@ bgp_path_info_to_ipv6_nexthop(struct bgp_path_info *path, ifindex_t *ifindex) return nexthop; } -static int bgp_table_map_apply(struct route_map *map, struct prefix *p, - struct bgp_path_info *path) +static bool bgp_table_map_apply(struct route_map *map, const struct prefix *p, + struct bgp_path_info *path) { route_map_result_t ret; @@ -937,7 +937,7 @@ static int bgp_table_map_apply(struct route_map *map, struct prefix *p, bgp_attr_flush(path->attr); if (ret != RMAP_DENYMATCH) - return 1; + return true; if (bgp_debug_zebra(p)) { if (p->family == AF_INET) { @@ -965,7 +965,7 @@ static int bgp_table_map_apply(struct route_map *map, struct prefix *p, buf[1], sizeof(buf[1]))); } } - return 0; + return false; } static struct thread *bgp_tm_thread_connect; @@ -1058,12 +1058,10 @@ int bgp_zebra_get_table_range(uint32_t chunk_size, return 0; } -static int update_ipv4nh_for_route_install(int nh_othervrf, - struct bgp *nh_bgp, - struct in_addr *nexthop, - struct attr *attr, - bool is_evpn, - struct zapi_nexthop *api_nh) +static bool update_ipv4nh_for_route_install(int nh_othervrf, struct bgp *nh_bgp, + struct in_addr *nexthop, + struct attr *attr, bool is_evpn, + struct zapi_nexthop *api_nh) { api_nh->gate.ipv4 = *nexthop; api_nh->vrf_id = nh_bgp->vrf_id; @@ -1083,15 +1081,16 @@ static int update_ipv4nh_for_route_install(int nh_othervrf, } else api_nh->type = NEXTHOP_TYPE_IPV4; - return 1; + return true; } -static int -update_ipv6nh_for_route_install(int nh_othervrf, struct bgp *nh_bgp, - struct in6_addr *nexthop, - ifindex_t ifindex, struct bgp_path_info *pi, - struct bgp_path_info *best_pi, bool is_evpn, - struct zapi_nexthop *api_nh) +static bool update_ipv6nh_for_route_install(int nh_othervrf, struct bgp *nh_bgp, + struct in6_addr *nexthop, + ifindex_t ifindex, + struct bgp_path_info *pi, + struct bgp_path_info *best_pi, + bool is_evpn, + struct zapi_nexthop *api_nh) { struct attr *attr; @@ -1108,7 +1107,7 @@ update_ipv6nh_for_route_install(int nh_othervrf, struct bgp *nh_bgp, api_nh->ifindex = attr->nh_ifindex; } else if (IN6_IS_ADDR_LINKLOCAL(nexthop)) { if (ifindex == 0) - return 0; + return false; api_nh->type = NEXTHOP_TYPE_IPV6_IFINDEX; api_nh->ifindex = ifindex; } else { @@ -1136,7 +1135,7 @@ update_ipv6nh_for_route_install(int nh_othervrf, struct bgp *nh_bgp, } if (ifindex == 0) - return 0; + return false; api_nh->type = NEXTHOP_TYPE_IPV6_IFINDEX; api_nh->ifindex = ifindex; } else { @@ -1146,10 +1145,35 @@ update_ipv6nh_for_route_install(int nh_othervrf, struct bgp *nh_bgp, } api_nh->gate.ipv6 = *nexthop; - return 1; + return true; } -void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p, +static bool bgp_zebra_use_nhop_weighted(struct bgp *bgp, struct attr *attr, + uint64_t tot_bw, uint32_t *nh_weight) +{ + uint32_t bw; + uint64_t tmp; + + bw = attr->link_bw; + /* zero link-bandwidth and link-bandwidth not present are treated + * as the same situation. + */ + if (!bw) { + /* the only situations should be if we're either told + * to skip or use default weight. + */ + if (bgp->lb_handling == BGP_LINK_BW_SKIP_MISSING) + return false; + *nh_weight = BGP_ZEBRA_DEFAULT_NHOP_WEIGHT; + } else { + tmp = (uint64_t)bw * 100; + *nh_weight = ((uint32_t)(tmp / tot_bw)); + } + + return true; +} + +void bgp_zebra_announce(struct bgp_node *rn, const struct prefix *p, struct bgp_path_info *info, struct bgp *bgp, afi_t afi, safi_t safi) { @@ -1171,6 +1195,8 @@ void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p, char buf_prefix[PREFIX_STRLEN]; /* filled in if we are debugging */ bool is_evpn; int nh_updated; + bool do_wt_ecmp; + uint64_t cum_bw = 0; /* Don't try to install if we're not connected to Zebra or Zebra doesn't * know of this instance. @@ -1185,7 +1211,8 @@ void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p, prefix2str(p, buf_prefix, sizeof(buf_prefix)); if (safi == SAFI_FLOWSPEC) { - bgp_pbr_update_entry(bgp, &rn->p, info, afi, safi, true); + bgp_pbr_update_entry(bgp, bgp_node_get_prefix(rn), + info, afi, safi, true); return; } @@ -1240,11 +1267,20 @@ void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p, /* Metric is currently based on the best-path only */ metric = info->attr->med; + + /* Determine if we're doing weighted ECMP or not */ + do_wt_ecmp = bgp_path_info_mpath_chkwtd(bgp, info); + if (do_wt_ecmp) + cum_bw = bgp_path_info_mpath_cumbw(info); + for (mpinfo = info; mpinfo; mpinfo = bgp_path_info_mpath_next(mpinfo)) { + uint32_t nh_weight; + if (valid_nh_count >= multipath_num) break; *mpinfo_cp = *mpinfo; + nh_weight = 0; /* Get nexthop address-family */ if (p->family == AF_INET @@ -1257,6 +1293,15 @@ void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p, else continue; + /* If processing for weighted ECMP, determine the next hop's + * weight. Based on user setting, we may skip the next hop + * in some situations. + */ + if (do_wt_ecmp) { + if (!bgp_zebra_use_nhop_weighted(bgp, mpinfo->attr, + cum_bw, &nh_weight)) + continue; + } api_nh = &api.nexthops[valid_nh_count]; if (nh_family == AF_INET) { if (bgp_debug_zebra(&api.prefix)) { @@ -1356,6 +1401,8 @@ void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p, } memcpy(&api_nh->rmac, &(mpinfo->attr->rmac), sizeof(struct ethaddr)); + api_nh->weight = nh_weight; + valid_nh_count++; } @@ -1435,9 +1482,10 @@ void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p, snprintf(eth_buf, sizeof(eth_buf), " RMAC %s", prefix_mac2str(&api_nh->rmac, buf1, sizeof(buf1))); - zlog_debug(" nhop [%d]: %s if %u VRF %u %s %s", + zlog_debug(" nhop [%d]: %s if %u VRF %u wt %u %s %s", i + 1, nh_buf, api_nh->ifindex, - api_nh->vrf_id, label_buf, eth_buf); + api_nh->vrf_id, api_nh->weight, + label_buf, eth_buf); } } @@ -1481,11 +1529,11 @@ void bgp_zebra_announce_table(struct bgp *bgp, afi_t afi, safi_t safi) && (pi->sub_type == BGP_ROUTE_NORMAL || pi->sub_type == BGP_ROUTE_IMPORTED))) - bgp_zebra_announce(rn, &rn->p, pi, bgp, afi, - safi); + bgp_zebra_announce(rn, bgp_node_get_prefix(rn), + pi, bgp, afi, safi); } -void bgp_zebra_withdraw(struct prefix *p, struct bgp_path_info *info, +void bgp_zebra_withdraw(const struct prefix *p, struct bgp_path_info *info, struct bgp *bgp, safi_t safi) { struct zapi_route api; @@ -1605,7 +1653,7 @@ int bgp_redistribute_set(struct bgp *bgp, afi_t afi, int type, if (vrf_bitmap_check(zclient->redist[afi][type], bgp->vrf_id)) return CMD_WARNING; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (EVPN_ENABLED(bgp) && type == ZEBRA_ROUTE_VNC_DIRECT) { vnc_export_bgp_enable( bgp, afi); /* only enables if mode bits cfg'd */ @@ -1660,11 +1708,11 @@ int bgp_redistribute_resend(struct bgp *bgp, afi_t afi, int type, } /* Redistribute with route-map specification. */ -int bgp_redistribute_rmap_set(struct bgp_redist *red, const char *name, - struct route_map *route_map) +bool bgp_redistribute_rmap_set(struct bgp_redist *red, const char *name, + struct route_map *route_map) { if (red->rmap.name && (strcmp(red->rmap.name, name) == 0)) - return 0; + return false; XFREE(MTYPE_ROUTE_MAP_NAME, red->rmap.name); /* Decrement the count for existing routemap and @@ -1675,18 +1723,18 @@ int bgp_redistribute_rmap_set(struct bgp_redist *red, const char *name, red->rmap.map = route_map; route_map_counter_increment(red->rmap.map); - return 1; + return true; } /* Redistribute with metric specification. */ -int bgp_redistribute_metric_set(struct bgp *bgp, struct bgp_redist *red, - afi_t afi, int type, uint32_t metric) +bool bgp_redistribute_metric_set(struct bgp *bgp, struct bgp_redist *red, + afi_t afi, int type, uint32_t metric) { struct bgp_node *rn; struct bgp_path_info *pi; if (red->redist_metric_flag && red->redist_metric == metric) - return 0; + return false; red->redist_metric_flag = 1; red->redist_metric = metric; @@ -1713,7 +1761,7 @@ int bgp_redistribute_metric_set(struct bgp *bgp, struct bgp_redist *red, } } - return 1; + return true; } /* Unset redistribution. */ @@ -1766,7 +1814,7 @@ int bgp_redistribute_unset(struct bgp *bgp, afi_t afi, int type, * they operate within bgpd irrespective of zebra connection * status. red lookup fails if there is no zebra connection. */ -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (EVPN_ENABLED(bgp) && type == ZEBRA_ROUTE_VNC_DIRECT) { vnc_export_bgp_disable(bgp, afi); } diff --git a/bgpd/bgp_zebra.h b/bgpd/bgp_zebra.h index 5a02e2fbf9..a069d01503 100644 --- a/bgpd/bgp_zebra.h +++ b/bgpd/bgp_zebra.h @@ -23,6 +23,9 @@ #include "vxlan.h" +/* Default weight for next hop, if doing weighted ECMP. */ +#define BGP_ZEBRA_DEFAULT_NHOP_WEIGHT 1 + extern void bgp_zebra_init(struct thread_master *master, unsigned short instance); extern void bgp_zebra_init_tm_connect(struct bgp *bgp); @@ -32,12 +35,13 @@ extern void bgp_zebra_destroy(void); extern int bgp_zebra_get_table_range(uint32_t chunk_size, uint32_t *start, uint32_t *end); extern int bgp_if_update_all(void); -extern void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p, +extern void bgp_zebra_announce(struct bgp_node *rn, const struct prefix *p, struct bgp_path_info *path, struct bgp *bgp, afi_t afi, safi_t safi); extern void bgp_zebra_announce_table(struct bgp *, afi_t, safi_t); -extern void bgp_zebra_withdraw(struct prefix *p, struct bgp_path_info *path, - struct bgp *bgp, safi_t safi); +extern void bgp_zebra_withdraw(const struct prefix *p, + struct bgp_path_info *path, struct bgp *bgp, + safi_t safi); extern void bgp_zebra_initiate_radv(struct bgp *bgp, struct peer *peer); extern void bgp_zebra_terminate_radv(struct bgp *bgp, struct peer *peer); @@ -53,10 +57,10 @@ extern struct bgp_redist *bgp_redist_add(struct bgp *, afi_t, uint8_t, extern int bgp_redistribute_set(struct bgp *, afi_t, int, unsigned short, bool changed); extern int bgp_redistribute_resend(struct bgp *, afi_t, int, unsigned short); -extern int bgp_redistribute_rmap_set(struct bgp_redist *red, const char *name, - struct route_map *route_map); -extern int bgp_redistribute_metric_set(struct bgp *, struct bgp_redist *, afi_t, - int, uint32_t); +extern bool bgp_redistribute_rmap_set(struct bgp_redist *red, const char *name, + struct route_map *route_map); +extern bool bgp_redistribute_metric_set(struct bgp *, struct bgp_redist *, + afi_t, int, uint32_t); extern int bgp_redistribute_unset(struct bgp *, afi_t, int, unsigned short); extern int bgp_redistribute_unreg(struct bgp *, afi_t, int, unsigned short); diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index 267d67e46e..dedd0ed3b1 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -65,7 +65,7 @@ #include "bgpd/bgp_nexthop.h" #include "bgpd/bgp_damp.h" #include "bgpd/bgp_mplsvpn.h" -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/bgp_rfapi_cfg.h" #include "bgpd/rfapi/rfapi_backend.h" #endif @@ -320,13 +320,12 @@ void bgp_router_id_zebra_bump(vrf_id_t vrf_id, const struct prefix *router_id) } } -int bgp_router_id_static_set(struct bgp *bgp, struct in_addr id) +void bgp_router_id_static_set(struct bgp *bgp, struct in_addr id) { bgp->router_id_static = id; bgp_router_id_set(bgp, id.s_addr != INADDR_ANY ? &id : &bgp->router_id_zebra, true /* is config */); - return 0; } /* BGP's cluster-id control. */ @@ -393,25 +392,21 @@ time_t bgp_clock(void) } /* BGP timer configuration. */ -int bgp_timers_set(struct bgp *bgp, uint32_t keepalive, uint32_t holdtime, - uint32_t connect_retry) +void bgp_timers_set(struct bgp *bgp, uint32_t keepalive, uint32_t holdtime, + uint32_t connect_retry) { bgp->default_keepalive = (keepalive < holdtime / 3 ? keepalive : holdtime / 3); bgp->default_holdtime = holdtime; bgp->default_connect_retry = connect_retry; - - return 0; } /* mostly for completeness - CLI uses its own defaults */ -int bgp_timers_unset(struct bgp *bgp) +void bgp_timers_unset(struct bgp *bgp) { bgp->default_keepalive = BGP_DEFAULT_KEEPALIVE; bgp->default_holdtime = BGP_DEFAULT_HOLDTIME; bgp->default_connect_retry = BGP_DEFAULT_CONNECT_RETRY; - - return 0; } /* BGP confederation configuration. */ @@ -499,18 +494,18 @@ int bgp_confederation_id_unset(struct bgp *bgp) } /* Is an AS part of the confed or not? */ -int bgp_confederation_peers_check(struct bgp *bgp, as_t as) +bool bgp_confederation_peers_check(struct bgp *bgp, as_t as) { int i; if (!bgp) - return 0; + return false; for (i = 0; i < bgp->confed_peers_cnt; i++) if (bgp->confed_peers[i] == as) - return 1; + return true; - return 0; + return false; } /* Add an AS to the confederation set. */ @@ -1415,8 +1410,8 @@ static int bgp_peer_conf_if_to_su_update_v4(struct peer *peer, return 0; } -static int bgp_peer_conf_if_to_su_update_v6(struct peer *peer, - struct interface *ifp) +static bool bgp_peer_conf_if_to_su_update_v6(struct peer *peer, + struct interface *ifp) { struct nbr_connected *ifc_nbr; @@ -1430,10 +1425,10 @@ static int bgp_peer_conf_if_to_su_update_v6(struct peer *peer, peer->su.sin6.sin6_len = sizeof(struct sockaddr_in6); #endif peer->su.sin6.sin6_scope_id = ifp->ifindex; - return 1; + return true; } - return 0; + return false; } /* @@ -2084,18 +2079,18 @@ int peer_activate(struct peer *peer, afi_t afi, safi_t safi) return ret; } -static int non_peergroup_deactivate_af(struct peer *peer, afi_t afi, - safi_t safi) +static bool non_peergroup_deactivate_af(struct peer *peer, afi_t afi, + safi_t safi) { if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { flog_err(EC_BGP_PEER_GROUP, "%s was called for peer-group %s", __func__, peer->host); - return 1; + return true; } /* Nothing to do if we've already deactivated this peer */ if (!peer->afc[afi][safi]) - return 0; + return false; /* De-activate the address family configuration. */ peer->afc[afi][safi] = 0; @@ -2104,7 +2099,7 @@ static int non_peergroup_deactivate_af(struct peer *peer, afi_t afi, flog_err(EC_BGP_PEER_DELETE, "couldn't delete af structure for peer %s(%s, %s)", peer->host, afi2str(afi), safi2str(safi)); - return 1; + return true; } if (peer->status == Established) { @@ -2130,7 +2125,7 @@ static int non_peergroup_deactivate_af(struct peer *peer, afi_t afi, } } - return 0; + return false; } int peer_deactivate(struct peer *peer, afi_t afi, safi_t safi) @@ -2547,15 +2542,14 @@ int peer_group_remote_as(struct bgp *bgp, const char *group_name, as_t *as, return 0; } -int peer_notify_unconfig(struct peer *peer) +void peer_notify_unconfig(struct peer *peer) { if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->status)) bgp_notify_send(peer, BGP_NOTIFY_CEASE, BGP_NOTIFY_CEASE_PEER_UNCONFIG); - return 0; } -int peer_group_notify_unconfig(struct peer_group *group) +void peer_group_notify_unconfig(struct peer_group *group) { struct peer *peer, *other; struct listnode *node, *nnode; @@ -2568,7 +2562,6 @@ int peer_group_notify_unconfig(struct peer_group *group) } else peer_notify_unconfig(peer); } - return 0; } int peer_group_delete(struct peer_group *group) @@ -2977,13 +2970,15 @@ static struct bgp *bgp_create(as_t *as, const char *name, bgp->rib_stale_time = BGP_DEFAULT_RIB_STALE_TIME; bgp->dynamic_neighbors_limit = BGP_DYNAMIC_NEIGHBORS_LIMIT_DEFAULT; bgp->dynamic_neighbors_count = 0; + bgp->lb_ref_bw = BGP_LINK_BW_REF_BW; + bgp->lb_handling = BGP_LINK_BW_ECMP; bgp->ebgp_requires_policy = DEFAULT_EBGP_POLICY_DISABLED; bgp->reject_as_sets = BGP_REJECT_AS_SETS_DISABLED; bgp_addpath_init_bgp_data(&bgp->tx_addpath); bgp->as = *as; -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC if (inst_type != BGP_INSTANCE_TYPE_VRF) { bgp->rfapi = bgp_rfapi_new(bgp); assert(bgp->rfapi); @@ -3382,7 +3377,7 @@ int bgp_delete(struct bgp *bgp) /* TODO - Other memory may need to be freed - e.g., NHT */ -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC rfapi_delete(bgp); #endif bgp_cleanup_routes(bgp); @@ -3771,10 +3766,10 @@ static void peer_drop_dynamic_neighbor(struct peer *peer) } /* If peer is configured at least one address family return 1. */ -int peer_active(struct peer *peer) +bool peer_active(struct peer *peer) { if (BGP_PEER_SU_UNSPEC(peer)) - return 0; + return false; if (peer->afc[AFI_IP][SAFI_UNICAST] || peer->afc[AFI_IP][SAFI_MULTICAST] || peer->afc[AFI_IP][SAFI_LABELED_UNICAST] || peer->afc[AFI_IP][SAFI_MPLS_VPN] || peer->afc[AFI_IP][SAFI_ENCAP] @@ -3786,12 +3781,12 @@ int peer_active(struct peer *peer) || peer->afc[AFI_IP6][SAFI_ENCAP] || peer->afc[AFI_IP6][SAFI_FLOWSPEC] || peer->afc[AFI_L2VPN][SAFI_EVPN]) - return 1; - return 0; + return true; + return false; } /* If peer is negotiated at least one address family return 1. */ -int peer_active_nego(struct peer *peer) +bool peer_active_nego(struct peer *peer) { if (peer->afc_nego[AFI_IP][SAFI_UNICAST] || peer->afc_nego[AFI_IP][SAFI_MULTICAST] @@ -3806,8 +3801,8 @@ int peer_active_nego(struct peer *peer) || peer->afc_nego[AFI_IP6][SAFI_ENCAP] || peer->afc_nego[AFI_IP6][SAFI_FLOWSPEC] || peer->afc_nego[AFI_L2VPN][SAFI_EVPN]) - return 1; - return 0; + return true; + return false; } void peer_change_action(struct peer *peer, afi_t afi, safi_t safi, @@ -4206,6 +4201,19 @@ static int peer_af_flag_modify(struct peer *peer, afi_t afi, safi_t safi, } } + /* + * If the peer is a route server client let's not + * muck with the nexthop on the way out the door + */ + if (flag & PEER_FLAG_RSERVER_CLIENT) { + if (set) + SET_FLAG(peer->af_flags[afi][safi], + PEER_FLAG_NEXTHOP_UNCHANGED); + else + UNSET_FLAG(peer->af_flags[afi][safi], + PEER_FLAG_NEXTHOP_UNCHANGED); + } + /* Inherit from peer-group or set/unset flags accordingly. */ if (peer_group_active(peer) && set == invert) peer_af_flag_inherit(peer, afi, safi, flag); @@ -4308,18 +4316,16 @@ int peer_af_flag_unset(struct peer *peer, afi_t afi, safi_t safi, uint32_t flag) } -int peer_tx_shutdown_message_set(struct peer *peer, const char *msg) +void peer_tx_shutdown_message_set(struct peer *peer, const char *msg) { XFREE(MTYPE_PEER_TX_SHUTDOWN_MSG, peer->tx_shutdown_message); peer->tx_shutdown_message = msg ? XSTRDUP(MTYPE_PEER_TX_SHUTDOWN_MSG, msg) : NULL; - return 0; } -int peer_tx_shutdown_message_unset(struct peer *peer) +void peer_tx_shutdown_message_unset(struct peer *peer) { XFREE(MTYPE_PEER_TX_SHUTDOWN_MSG, peer->tx_shutdown_message); - return 0; } @@ -4426,20 +4432,16 @@ int peer_ebgp_multihop_unset(struct peer *peer) } /* Neighbor description. */ -int peer_description_set(struct peer *peer, const char *desc) +void peer_description_set(struct peer *peer, const char *desc) { XFREE(MTYPE_PEER_DESC, peer->desc); peer->desc = XSTRDUP(MTYPE_PEER_DESC, desc); - - return 0; } -int peer_description_unset(struct peer *peer) +void peer_description_unset(struct peer *peer) { XFREE(MTYPE_PEER_DESC, peer->desc); - - return 0; } /* Neighbor update-source. */ @@ -4789,16 +4791,14 @@ int peer_default_originate_unset(struct peer *peer, afi_t afi, safi_t safi) return 0; } -int peer_port_set(struct peer *peer, uint16_t port) +void peer_port_set(struct peer *peer, uint16_t port) { peer->port = port; - return 0; } -int peer_port_unset(struct peer *peer) +void peer_port_unset(struct peer *peer) { peer->port = BGP_PORT_DEFAULT; - return 0; } /* @@ -5802,7 +5802,7 @@ static void peer_distribute_update(struct access_list *access) } } } -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC vnc_prefix_list_update(bgp); #endif } @@ -6173,8 +6173,7 @@ static void peer_aslist_update(const char *aslist_name) static void peer_aslist_add(char *aslist_name) { peer_aslist_update(aslist_name); - route_map_notify_dependencies((char *)aslist_name, - RMAP_EVENT_ASLIST_ADDED); + route_map_notify_dependencies(aslist_name, RMAP_EVENT_ASLIST_ADDED); } static void peer_aslist_del(const char *aslist_name) @@ -7038,7 +7037,7 @@ void bgp_init(unsigned short instance) /* Init zebra. */ bgp_zebra_init(bm->master, instance); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC vnc_zebra_init(bm->master); #endif @@ -7053,7 +7052,7 @@ void bgp_init(unsigned short instance) bgp_route_map_init(); bgp_scan_vty_init(); bgp_mplsvpn_init(); -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC rfapi_init(); #endif bgp_ethernetvpn_init(); diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index 1ada056a92..f6f9687783 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -284,6 +284,20 @@ enum global_gr_command { #define BGP_GR_SUCCESS 0 #define BGP_GR_FAILURE 1 +/* Handling of BGP link bandwidth (LB) on receiver - whether and how to + * do weighted ECMP. Note: This applies after multipath computation. + */ +enum bgp_link_bw_handling { + /* Do ECMP if some paths don't have LB - default */ + BGP_LINK_BW_ECMP, + /* Completely ignore LB, just do regular ECMP */ + BGP_LINK_BW_IGNORE_BW, + /* Skip paths without LB, do wECMP on others */ + BGP_LINK_BW_SKIP_MISSING, + /* Do wECMP with default weight for paths not having LB */ + BGP_LINK_BW_DEFWT_4_MISSING +}; + /* BGP instance structure. */ struct bgp { /* AS number of this BGP instance. */ @@ -395,6 +409,14 @@ struct bgp { #define BGP_UPDATE_DELAY_MIN 0 #define BGP_UPDATE_DELAY_MAX 3600 + /* Reference bandwidth for BGP link-bandwidth. Used when + * the LB value has to be computed based on some other + * factor (e.g., number of multipaths for the prefix) + * Value is in Mbps + */ + uint32_t lb_ref_bw; +#define BGP_LINK_BW_REF_BW 1 + /* BGP flags. */ uint32_t flags; #define BGP_FLAG_ALWAYS_COMPARE_MED (1 << 0) @@ -650,6 +672,9 @@ struct bgp { /* Count of peers in established state */ uint32_t established_peers; + /* Weighted ECMP related config. */ + enum bgp_link_bw_handling lb_handling; + QOBJ_FIELDS }; DECLARE_QOBJ_TYPE(bgp) @@ -838,6 +863,37 @@ struct bgp_peer_gr { bgp_peer_gr_action_ptr action_fun; }; +/* BGP finite state machine events. */ +enum bgp_fsm_events { + BGP_Start = 1, + BGP_Stop, + TCP_connection_open, + TCP_connection_closed, + TCP_connection_open_failed, + TCP_fatal_error, + ConnectRetry_timer_expired, + Hold_Timer_expired, + KeepAlive_timer_expired, + Receive_OPEN_message, + Receive_KEEPALIVE_message, + Receive_UPDATE_message, + Receive_NOTIFICATION_message, + Clearing_Completed, + BGP_EVENTS_MAX, +}; + +/* BGP finite state machine status. */ +enum bgp_fsm_status { + Idle = 1, + Connect, + Active, + OpenSent, + OpenConfirm, + Established, + Clearing, + Deleted, + BGP_STATUS_MAX, +}; /* BGP neighbor structure. */ struct peer { @@ -896,15 +952,15 @@ struct peer { struct peer *doppelganger; /* Status of the peer. */ - int status; - int ostatus; + enum bgp_fsm_status status; + enum bgp_fsm_status ostatus; /* FSM events, stored for debug purposes. * Note: uchar used for reduced memory usage. */ - unsigned char cur_event; - unsigned char last_event; - unsigned char last_major_event; + enum bgp_fsm_events cur_event; + enum bgp_fsm_events last_event; + enum bgp_fsm_events last_major_event; /* Peer index, used for dumping TABLE_DUMP_V2 format */ uint16_t table_dump_index; @@ -1499,6 +1555,12 @@ struct bgp_nlri { #define BGP_NOTIFY_CEASE 6 #define BGP_NOTIFY_CAPABILITY_ERR 7 +/* Subcodes for BGP Finite State Machine Error */ +#define BGP_NOTIFY_FSM_ERR_SUBCODE_UNSPECIFIC 0 +#define BGP_NOTIFY_FSM_ERR_SUBCODE_OPENSENT 1 +#define BGP_NOTIFY_FSM_ERR_SUBCODE_OPENCONFIRM 2 +#define BGP_NOTIFY_FSM_ERR_SUBCODE_ESTABLISHED 3 + #define BGP_NOTIFY_SUBCODE_UNSPECIFIC 0 /* BGP_NOTIFY_HEADER_ERR sub codes. */ @@ -1544,34 +1606,6 @@ struct bgp_nlri { #define BGP_NOTIFY_CAPABILITY_INVALID_LENGTH 2 #define BGP_NOTIFY_CAPABILITY_MALFORMED_CODE 3 -/* BGP finite state machine status. */ -#define Idle 1 -#define Connect 2 -#define Active 3 -#define OpenSent 4 -#define OpenConfirm 5 -#define Established 6 -#define Clearing 7 -#define Deleted 8 -#define BGP_STATUS_MAX 9 - -/* BGP finite state machine events. */ -#define BGP_Start 1 -#define BGP_Stop 2 -#define TCP_connection_open 3 -#define TCP_connection_closed 4 -#define TCP_connection_open_failed 5 -#define TCP_fatal_error 6 -#define ConnectRetry_timer_expired 7 -#define Hold_Timer_expired 8 -#define KeepAlive_timer_expired 9 -#define Receive_OPEN_message 10 -#define Receive_KEEPALIVE_message 11 -#define Receive_UPDATE_message 12 -#define Receive_NOTIFICATION_message 13 -#define Clearing_Completed 14 -#define BGP_EVENTS_MAX 15 - /* BGP timers default value. */ #define BGP_INIT_START_TIMER 1 /* The following 3 are RFC defaults that are overridden in bgp_vty.c with @@ -1733,8 +1767,8 @@ extern struct peer *peer_unlock_with_caller(const char *, struct peer *); extern bgp_peer_sort_t peer_sort(struct peer *peer); extern bgp_peer_sort_t peer_sort_lookup(struct peer *peer); -extern int peer_active(struct peer *); -extern int peer_active_nego(struct peer *); +extern bool peer_active(struct peer *); +extern bool peer_active_nego(struct peer *); extern void bgp_recalculate_all_bestpaths(struct bgp *bgp); extern struct peer *peer_create(union sockunion *, const char *, struct bgp *, as_t, as_t, int, afi_t, safi_t, @@ -1768,21 +1802,21 @@ extern int bgp_handle_socket(struct bgp *bgp, struct vrf *vrf, vrf_id_t old_vrf_id, bool create); extern void bgp_router_id_zebra_bump(vrf_id_t, const struct prefix *); -extern int bgp_router_id_static_set(struct bgp *, struct in_addr); +extern void bgp_router_id_static_set(struct bgp *, struct in_addr); extern int bgp_cluster_id_set(struct bgp *, struct in_addr *); extern int bgp_cluster_id_unset(struct bgp *); extern int bgp_confederation_id_set(struct bgp *, as_t); extern int bgp_confederation_id_unset(struct bgp *); -extern int bgp_confederation_peers_check(struct bgp *, as_t); +extern bool bgp_confederation_peers_check(struct bgp *, as_t); extern int bgp_confederation_peers_add(struct bgp *, as_t); extern int bgp_confederation_peers_remove(struct bgp *, as_t); -extern int bgp_timers_set(struct bgp *, uint32_t keepalive, uint32_t holdtime, - uint32_t connect_retry); -extern int bgp_timers_unset(struct bgp *); +extern void bgp_timers_set(struct bgp *, uint32_t keepalive, uint32_t holdtime, + uint32_t connect_retry); +extern void bgp_timers_unset(struct bgp *); extern int bgp_default_local_preference_set(struct bgp *, uint32_t); extern int bgp_default_local_preference_unset(struct bgp *); @@ -1793,19 +1827,19 @@ extern int bgp_default_subgroup_pkt_queue_max_unset(struct bgp *bgp); extern int bgp_listen_limit_set(struct bgp *, int); extern int bgp_listen_limit_unset(struct bgp *); -extern int bgp_update_delay_active(struct bgp *); -extern int bgp_update_delay_configured(struct bgp *); +extern bool bgp_update_delay_active(struct bgp *); +extern bool bgp_update_delay_configured(struct bgp *); extern int bgp_afi_safi_peer_exists(struct bgp *bgp, afi_t afi, safi_t safi); extern void peer_as_change(struct peer *, as_t, int); extern int peer_remote_as(struct bgp *, union sockunion *, const char *, as_t *, int, afi_t, safi_t); extern int peer_group_remote_as(struct bgp *, const char *, as_t *, int); extern int peer_delete(struct peer *peer); -extern int peer_notify_unconfig(struct peer *peer); +extern void peer_notify_unconfig(struct peer *peer); extern int peer_group_delete(struct peer_group *); extern int peer_group_remote_as_delete(struct peer_group *); extern int peer_group_listen_range_add(struct peer_group *, struct prefix *); -extern int peer_group_notify_unconfig(struct peer_group *group); +extern void peer_group_notify_unconfig(struct peer_group *group); extern int peer_activate(struct peer *, afi_t, safi_t); extern int peer_deactivate(struct peer *, afi_t, safi_t); @@ -1830,8 +1864,8 @@ extern int peer_ebgp_multihop_set(struct peer *, int); extern int peer_ebgp_multihop_unset(struct peer *); extern int is_ebgp_multihop_configured(struct peer *peer); -extern int peer_description_set(struct peer *, const char *); -extern int peer_description_unset(struct peer *); +extern void peer_description_set(struct peer *, const char *); +extern void peer_description_unset(struct peer *); extern int peer_update_source_if_set(struct peer *, const char *); extern int peer_update_source_addr_set(struct peer *, const union sockunion *); @@ -1842,8 +1876,8 @@ extern int peer_default_originate_set(struct peer *peer, afi_t afi, safi_t safi, struct route_map *route_map); extern int peer_default_originate_unset(struct peer *, afi_t, safi_t); -extern int peer_port_set(struct peer *, uint16_t); -extern int peer_port_unset(struct peer *); +extern void peer_port_set(struct peer *, uint16_t); +extern void peer_port_unset(struct peer *); extern int peer_weight_set(struct peer *, afi_t, safi_t, uint16_t); extern int peer_weight_unset(struct peer *, afi_t, safi_t); @@ -1900,8 +1934,8 @@ extern int peer_clear_soft(struct peer *, afi_t, safi_t, enum bgp_clear_type); extern int peer_ttl_security_hops_set(struct peer *, int); extern int peer_ttl_security_hops_unset(struct peer *); -extern int peer_tx_shutdown_message_set(struct peer *, const char *msg); -extern int peer_tx_shutdown_message_unset(struct peer *); +extern void peer_tx_shutdown_message_set(struct peer *, const char *msg); +extern void peer_tx_shutdown_message_unset(struct peer *); extern int bgp_route_map_update_timer(struct thread *thread); extern void bgp_route_map_terminate(void); diff --git a/bgpd/rfapi/bgp_rfapi_cfg.h b/bgpd/rfapi/bgp_rfapi_cfg.h index b72d38220b..f1548a6173 100644 --- a/bgpd/rfapi/bgp_rfapi_cfg.h +++ b/bgpd/rfapi/bgp_rfapi_cfg.h @@ -24,7 +24,7 @@ #include "lib/table.h" #include "lib/routemap.h" -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "rfapi.h" struct rfapi_l2_group_cfg { diff --git a/bgpd/rfapi/rfapi.c b/bgpd/rfapi/rfapi.c index d87292f652..a0d8995a0f 100644 --- a/bgpd/rfapi/rfapi.c +++ b/bgpd/rfapi/rfapi.c @@ -355,7 +355,7 @@ int rfapi_check(void *handle) void del_vnc_route(struct rfapi_descriptor *rfd, struct peer *peer, /* rfd->peer for RFP regs */ - struct bgp *bgp, safi_t safi, struct prefix *p, + struct bgp *bgp, safi_t safi, const struct prefix *p, struct prefix_rd *prd, uint8_t type, uint8_t sub_type, struct rfapi_nexthop *lnh, int kill) { @@ -557,7 +557,7 @@ void rfapi_vn_options_free(struct rfapi_vn_option *p) /* Based on bgp_redistribute_add() */ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */ - struct bgp *bgp, int safi, struct prefix *p, + struct bgp *bgp, int safi, const struct prefix *p, struct prefix_rd *prd, struct rfapi_ip_addr *nexthop, uint32_t *local_pref, uint32_t *lifetime, /* NULL => dont send lifetime */ @@ -838,7 +838,7 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */ beec.val[1] = ECOMMUNITY_OPAQUE_SUBTYPE_ENCAP; beec.val[6] = ((TunnelType) >> 8) & 0xff; beec.val[7] = (TunnelType)&0xff; - ecommunity_add_val(attr.ecommunity, &beec); + ecommunity_add_val(attr.ecommunity, &beec, false, false); } /* @@ -2650,7 +2650,8 @@ int rfapi_register(void *handle, struct rfapi_ip_prefix *prefix, ecom_value.val[7] = (l2o->logical_net_id >> 0) & 0xff; rtlist = ecommunity_new(); - ecommunity_add_val(rtlist, &ecom_value); + ecommunity_add_val(rtlist, &ecom_value, + false, false); } if (l2o->tag_id) { as_t as = bgp->as; @@ -2675,7 +2676,8 @@ int rfapi_register(void *handle, struct rfapi_ip_prefix *prefix, ecom_value.val[7] = val & 0xff; if (rtlist == NULL) rtlist = ecommunity_new(); - ecommunity_add_val(rtlist, &ecom_value); + ecommunity_add_val(rtlist, &ecom_value, + false, false); } } diff --git a/bgpd/rfapi/rfapi.h b/bgpd/rfapi/rfapi.h index 6af2ebeeb8..beb44aa780 100644 --- a/bgpd/rfapi/rfapi.h +++ b/bgpd/rfapi/rfapi.h @@ -21,7 +21,7 @@ #ifndef _QUAGGA_BGP_RFAPI_H #define _QUAGGA_BGP_RFAPI_H -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include <stdint.h> #include <netinet/in.h> diff --git a/bgpd/rfapi/rfapi_ap.c b/bgpd/rfapi/rfapi_ap.c index c5fda15d33..abb18aeb2c 100644 --- a/bgpd/rfapi/rfapi_ap.c +++ b/bgpd/rfapi/rfapi_ap.c @@ -81,10 +81,10 @@ * is used to spread out the sort for adbs with the same lifetime * and thereby make the skip list operations more efficient. */ -static int sl_adb_lifetime_cmp(void *adb1, void *adb2) +static int sl_adb_lifetime_cmp(const void *adb1, const void *adb2) { - struct rfapi_adb *a1 = adb1; - struct rfapi_adb *a2 = adb2; + const struct rfapi_adb *a1 = adb1; + const struct rfapi_adb *a2 = adb2; if (a1->lifetime < a2->lifetime) return -1; diff --git a/bgpd/rfapi/rfapi_backend.h b/bgpd/rfapi/rfapi_backend.h index 96e464d2ae..4d2ae0b02f 100644 --- a/bgpd/rfapi/rfapi_backend.h +++ b/bgpd/rfapi/rfapi_backend.h @@ -21,7 +21,7 @@ #ifndef _QUAGGA_BGP_RFAPI_BACKEND_H #define _QUAGGA_BGP_RFAPI_BACKEND_H -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC #include "bgpd/bgp_route.h" #include "bgpd/bgp_nexthop.h" @@ -35,16 +35,16 @@ extern void rfapi_delete(struct bgp *); struct rfapi *bgp_rfapi_new(struct bgp *bgp); void bgp_rfapi_destroy(struct bgp *bgp, struct rfapi *h); -extern void rfapiProcessUpdate(struct peer *peer, void *rfd, struct prefix *p, - struct prefix_rd *prd, struct attr *attr, - afi_t afi, safi_t safi, uint8_t type, - uint8_t sub_type, uint32_t *label); +extern void rfapiProcessUpdate(struct peer *peer, void *rfd, + const struct prefix *p, struct prefix_rd *prd, + struct attr *attr, afi_t afi, safi_t safi, + uint8_t type, uint8_t sub_type, uint32_t *label); -extern void rfapiProcessWithdraw(struct peer *peer, void *rfd, struct prefix *p, - struct prefix_rd *prd, struct attr *attr, - afi_t afi, safi_t safi, uint8_t type, - int kill); +extern void rfapiProcessWithdraw(struct peer *peer, void *rfd, + const struct prefix *p, struct prefix_rd *prd, + struct attr *attr, afi_t afi, safi_t safi, + uint8_t type, int kill); extern void rfapiProcessPeerDown(struct peer *peer); @@ -56,7 +56,7 @@ extern void vnc_zebra_withdraw(struct prefix *p, struct bgp_path_info *old_select); -extern void rfapi_vty_out_vncinfo(struct vty *vty, struct prefix *p, +extern void rfapi_vty_out_vncinfo(struct vty *vty, const struct prefix *p, struct bgp_path_info *bpi, safi_t safi); diff --git a/bgpd/rfapi/rfapi_import.c b/bgpd/rfapi/rfapi_import.c index 2f274015fc..41e6a64a64 100644 --- a/bgpd/rfapi/rfapi_import.c +++ b/bgpd/rfapi/rfapi_import.c @@ -573,7 +573,7 @@ struct rfapi_import_table *rfapiMacImportTableGet(struct bgp *bgp, uint32_t lni) eval.val[7] = (lni >> 0) & 0xff; enew = ecommunity_new(); - ecommunity_add_val(enew, &eval); + ecommunity_add_val(enew, &eval, false, false); it->rt_import_list = enew; for (afi = AFI_IP; afi < AFI_MAX; ++afi) { @@ -663,14 +663,17 @@ rfapiMonitorMoveShorter(struct agg_node *original_vpn_node, int lockoffset) * If no less-specific routes, try to use the 0/0 node */ if (!par) { + const struct prefix *p; /* this isn't necessarily 0/0 */ par = agg_route_table_top(original_vpn_node); + if (par) + p = agg_node_get_prefix(par); /* * If we got the top node but it wasn't 0/0, * ignore it */ - if (par && par->p.prefixlen) { + if (par && p->prefixlen) { agg_unlock_node(par); /* maybe free */ par = NULL; } @@ -685,9 +688,10 @@ rfapiMonitorMoveShorter(struct agg_node *original_vpn_node, int lockoffset) */ if (!par) { struct prefix pfx_default; + const struct prefix *p = agg_node_get_prefix(original_vpn_node); memset(&pfx_default, 0, sizeof(pfx_default)); - pfx_default.family = original_vpn_node->p.family; + pfx_default.family = p->family; /* creates default node if none exists */ par = agg_node_get(agg_get_table(original_vpn_node), @@ -768,6 +772,7 @@ static void rfapiMonitorMoveLonger(struct agg_node *new_vpn_node) struct rfapi_monitor_vpn *mlast; struct bgp_path_info *bpi; struct agg_node *par; + const struct prefix *new_vpn_node_p = agg_node_get_prefix(new_vpn_node); RFAPI_CHECK_REFCOUNT(new_vpn_node, SAFI_MPLS_VPN, 0); @@ -808,12 +813,11 @@ static void rfapiMonitorMoveLonger(struct agg_node *new_vpn_node) * specific updated node */ for (mlast = NULL, monitor = RFAPI_MONITOR_VPN(par); monitor;) { - /* * If new longest match for monitor prefix is the new * route's prefix, move monitor to new route's prefix */ - if (prefix_match(&new_vpn_node->p, &monitor->p)) { + if (prefix_match(new_vpn_node_p, &monitor->p)) { /* detach */ if (mlast) { mlast->next = monitor->next; @@ -1040,7 +1044,7 @@ int rfapiEcommunityGetEthernetTag(struct ecommunity *ecom, uint16_t *tag_id) for (i = 0; i < ecom->size; ++i) { as_t as = 0; int encode = 0; - uint8_t *p = ecom->val + (i * ECOMMUNITY_SIZE); + const uint8_t *p = ecom->val + (i * ECOMMUNITY_SIZE); /* High-order octet of type. */ encode = *p++; @@ -1266,6 +1270,7 @@ rfapiRouteInfo2NextHopEntry(struct rfapi_ip_prefix *rprefix, { struct rfapi_next_hop_entry *new; int have_vnc_tunnel_un = 0; + const struct prefix *p = agg_node_get_prefix(rn); #ifdef DEBUG_ENCAP_MONITOR vnc_zlog_debug_verbose("%s: entry, bpi %p, rn %p", __func__, bpi, rn); @@ -1289,8 +1294,7 @@ rfapiRouteInfo2NextHopEntry(struct rfapi_ip_prefix *rprefix, vo->type = RFAPI_VN_OPTION_TYPE_L2ADDR; - memcpy(&vo->v.l2addr.macaddr, &rn->p.u.prefix_eth.octet, - ETH_ALEN); + memcpy(&vo->v.l2addr.macaddr, &p->u.prefix_eth.octet, ETH_ALEN); /* only low 3 bytes of this are significant */ (void)rfapiEcommunityGetLNI(bpi->attr->ecommunity, &vo->v.l2addr.logical_net_id); @@ -1493,7 +1497,8 @@ static int rfapiNhlAddNodeRoutes( struct prefix pfx_un; struct skiplist *seen_nexthops; int count = 0; - int is_l2 = (rn->p.family == AF_ETHERNET); + const struct prefix *p = agg_node_get_prefix(rn); + int is_l2 = (p->family == AF_ETHERNET); if (rfd_rib_node) { struct agg_table *atable = agg_get_table(rfd_rib_node); @@ -1626,14 +1631,14 @@ static int rfapiNhlAddSubtree( * hands in node->link */ if (agg_node_left(rn) && agg_node_left(rn) != omit_node) { if (agg_node_left(rn)->info) { + const struct prefix *p = + agg_node_get_prefix(agg_node_left(rn)); int count = 0; struct agg_node *rib_rn = NULL; - rfapiQprefix2Rprefix(&agg_node_left(rn)->p, &rprefix); - if (rfd_rib_table) { - rib_rn = agg_node_get(rfd_rib_table, - &agg_node_left(rn)->p); - } + rfapiQprefix2Rprefix(p, &rprefix); + if (rfd_rib_table) + rib_rn = agg_node_get(rfd_rib_table, p); count = rfapiNhlAddNodeRoutes( agg_node_left(rn), &rprefix, lifetime, 0, head, @@ -1653,14 +1658,15 @@ static int rfapiNhlAddSubtree( if (agg_node_right(rn) && agg_node_right(rn) != omit_node) { if (agg_node_right(rn)->info) { + const struct prefix *p = + agg_node_get_prefix(agg_node_right(rn)); int count = 0; struct agg_node *rib_rn = NULL; - rfapiQprefix2Rprefix(&agg_node_right(rn)->p, &rprefix); - if (rfd_rib_table) { - rib_rn = agg_node_get(rfd_rib_table, - &agg_node_right(rn)->p); - } + rfapiQprefix2Rprefix(p, &rprefix); + if (rfd_rib_table) + rib_rn = agg_node_get(rfd_rib_table, p); + count = rfapiNhlAddNodeRoutes( agg_node_right(rn), &rprefix, lifetime, 0, head, tail, exclude_vnaddr, rib_rn, @@ -1712,23 +1718,18 @@ struct rfapi_next_hop_entry *rfapiRouteNode2NextHopList( struct rfapi_next_hop_entry *answer = NULL; struct rfapi_next_hop_entry *last = NULL; struct agg_node *parent; + const struct prefix *p = agg_node_get_prefix(rn); int count = 0; struct agg_node *rib_rn; #ifdef DEBUG_RETURNED_NHL - { - char buf[PREFIX_STRLEN]; - - prefix2str(&rn->p, buf, sizeof(buf)); - vnc_zlog_debug_verbose("%s: called with node pfx=%s", __func__, - buf); - } + vnc_zlog_debug_verbose("%s: called with node pfx=%rRN", __func__, rn); rfapiDebugBacktrace(); #endif - rfapiQprefix2Rprefix(&rn->p, &rprefix); + rfapiQprefix2Rprefix(p, &rprefix); - rib_rn = rfd_rib_table ? agg_node_get(rfd_rib_table, &rn->p) : NULL; + rib_rn = rfd_rib_table ? agg_node_get(rfd_rib_table, p) : NULL; /* * Add non-withdrawn routes at this node @@ -1780,9 +1781,10 @@ struct rfapi_next_hop_entry *rfapiRouteNode2NextHopList( * Add non-withdrawn routes from less-specific prefix */ if (parent) { - rib_rn = rfd_rib_table ? agg_node_get(rfd_rib_table, &parent->p) - : NULL; - rfapiQprefix2Rprefix(&parent->p, &rprefix); + const struct prefix *p = agg_node_get_prefix(parent); + + rib_rn = rfd_rib_table ? agg_node_get(rfd_rib_table, p) : NULL; + rfapiQprefix2Rprefix(p, &rprefix); count += rfapiNhlAddNodeRoutes(parent, &rprefix, lifetime, 0, &answer, &last, exclude_vnaddr, rib_rn, pfx_target_original); @@ -1863,7 +1865,9 @@ struct rfapi_next_hop_entry *rfapiEthRouteNode2NextHopList( struct rfapi_next_hop_entry *last = NULL; struct agg_node *rib_rn; - rib_rn = rfd_rib_table ? agg_node_get(rfd_rib_table, &rn->p) : NULL; + rib_rn = rfd_rib_table + ? agg_node_get(rfd_rib_table, agg_node_get_prefix(rn)) + : NULL; count = rfapiNhlAddNodeRoutes(rn, rprefix, lifetime, 0, &answer, &last, NULL, rib_rn, pfx_target_original); @@ -2006,10 +2010,10 @@ static void rfapiBgpInfoDetach(struct agg_node *rn, struct bgp_path_info *bpi) /* * For L3-indexed import tables */ -static int rfapi_bi_peer_rd_cmp(void *b1, void *b2) +static int rfapi_bi_peer_rd_cmp(const void *b1, const void *b2) { - struct bgp_path_info *bpi1 = b1; - struct bgp_path_info *bpi2 = b2; + const struct bgp_path_info *bpi1 = b1; + const struct bgp_path_info *bpi2 = b2; /* * Compare peers @@ -2022,8 +2026,9 @@ static int rfapi_bi_peer_rd_cmp(void *b1, void *b2) /* * compare RDs */ - return vnc_prefix_cmp((struct prefix *)&bpi1->extra->vnc.import.rd, - (struct prefix *)&bpi2->extra->vnc.import.rd); + return vnc_prefix_cmp( + (const struct prefix *)&bpi1->extra->vnc.import.rd, + (const struct prefix *)&bpi2->extra->vnc.import.rd); } /* @@ -2031,10 +2036,10 @@ static int rfapi_bi_peer_rd_cmp(void *b1, void *b2) * The BPIs in these tables should ALWAYS have an aux_prefix set because * they arrive via IPv4 or IPv6 advertisements. */ -static int rfapi_bi_peer_rd_aux_cmp(void *b1, void *b2) +static int rfapi_bi_peer_rd_aux_cmp(const void *b1, const void *b2) { - struct bgp_path_info *bpi1 = b1; - struct bgp_path_info *bpi2 = b2; + const struct bgp_path_info *bpi1 = b1; + const struct bgp_path_info *bpi2 = b2; int rc; /* @@ -2089,6 +2094,7 @@ static void rfapiItBiIndexAdd(struct agg_node *rn, /* Import table VPN node */ struct bgp_path_info *bpi) /* new BPI */ { struct skiplist *sl; + const struct prefix *p; assert(rn); assert(bpi); @@ -2105,7 +2111,8 @@ static void rfapiItBiIndexAdd(struct agg_node *rn, /* Import table VPN node */ sl = RFAPI_RDINDEX_W_ALLOC(rn); if (!sl) { - if (AF_ETHERNET == rn->p.family) { + p = agg_node_get_prefix(rn); + if (AF_ETHERNET == p->family) { sl = skiplist_new(0, rfapi_bi_peer_rd_aux_cmp, NULL); } else { sl = skiplist_new(0, rfapi_bi_peer_rd_cmp, NULL); @@ -2152,7 +2159,7 @@ static void rfapiItBiIndexDump(struct agg_node *rn) static struct bgp_path_info *rfapiItBiIndexSearch( struct agg_node *rn, /* Import table VPN node */ struct prefix_rd *prd, struct peer *peer, - struct prefix *aux_prefix) /* optional L3 addr for L2 ITs */ + const struct prefix *aux_prefix) /* optional L3 addr for L2 ITs */ { struct skiplist *sl; int rc; @@ -2232,7 +2239,7 @@ static struct bgp_path_info *rfapiItBiIndexSearch( bpi_fake.peer = peer; bpi_fake.extra = &bpi_extra; - bpi_fake.extra->vnc.import.rd = *(struct prefix_rd *)prd; + bpi_fake.extra->vnc.import.rd = *prd; if (aux_prefix) { bpi_fake.extra->vnc.import.aux_prefix = *aux_prefix; } else { @@ -2375,7 +2382,7 @@ static int rfapiWithdrawTimerVPN(struct thread *t) struct rfapi_withdraw *wcb = t->arg; struct bgp_path_info *bpi = wcb->info; struct bgp *bgp = bgp_get_default(); - + const struct prefix *p; struct rfapi_monitor_vpn *moved; afi_t afi; @@ -2398,15 +2405,8 @@ static int rfapiWithdrawTimerVPN(struct thread *t) RFAPI_CHECK_REFCOUNT(wcb->node, SAFI_MPLS_VPN, wcb->lockoffset); - { - char buf[BUFSIZ]; - - vnc_zlog_debug_verbose( - "%s: removing bpi %p at prefix %s/%d", __func__, bpi, - rfapi_ntop(wcb->node->p.family, &wcb->node->p.u.prefix, - buf, BUFSIZ), - wcb->node->p.prefixlen); - } + vnc_zlog_debug_verbose("%s: removing bpi %p at prefix %pRN", __func__, + bpi, wcb->node); /* * Remove the route (doubly-linked) @@ -2415,7 +2415,8 @@ static int rfapiWithdrawTimerVPN(struct thread *t) && VALID_INTERIOR_TYPE(bpi->type)) RFAPI_MONITOR_EXTERIOR(wcb->node)->valid_interior_count--; - afi = family2afi(wcb->node->p.family); + p = agg_node_get_prefix(wcb->node); + afi = family2afi(p->family); wcb->import_table->holddown_count[afi] -= 1; /* keep count consistent */ rfapiItBiIndexDel(wcb->node, bpi); rfapiBgpInfoDetach(wcb->node, bpi); /* with removed bpi */ @@ -2846,11 +2847,13 @@ rfapiBiStartWithdrawTimer(struct rfapi_import_table *import_table, } -typedef void(rfapi_bi_filtered_import_f)(struct rfapi_import_table *, int, - struct peer *, void *, struct prefix *, - struct prefix *, afi_t, - struct prefix_rd *, struct attr *, - uint8_t, uint8_t, uint32_t *); +typedef void(rfapi_bi_filtered_import_f)(struct rfapi_import_table *table, + int action, struct peer *peer, + void *rfd, const struct prefix *prefix, + const struct prefix *aux_prefix, + afi_t afi, struct prefix_rd *prd, + struct attr *attr, uint8_t type, + uint8_t sub_type, uint32_t *label); static void rfapiExpireEncapNow(struct rfapi_import_table *it, @@ -2899,11 +2902,11 @@ static int rfapiGetNexthop(struct attr *attr, struct prefix *prefix) static void rfapiBgpInfoFilteredImportEncap( struct rfapi_import_table *import_table, int action, struct peer *peer, void *rfd, /* set for looped back routes */ - struct prefix *p, - struct prefix *aux_prefix, /* Unused for encap routes */ + const struct prefix *p, + const struct prefix *aux_prefix, /* Unused for encap routes */ afi_t afi, struct prefix_rd *prd, struct attr *attr, /* part of bgp_path_info */ - uint8_t type, /* part of bgp_path_info */ + uint8_t type, /* part of bgp_path_info */ uint8_t sub_type, /* part of bgp_path_info */ uint32_t *label) /* part of bgp_path_info */ { @@ -3074,11 +3077,8 @@ static void rfapiBgpInfoFilteredImportEncap( if (action == FIF_ACTION_WITHDRAW) { vnc_zlog_debug_verbose( - "%s: withdrawing at prefix %s/%d", - __func__, - inet_ntop(rn->p.family, &rn->p.u.prefix, - buf, BUFSIZ), - rn->p.prefixlen); + "%s: withdrawing at prefix %pRN", + __func__, rn); rfapiBiStartWithdrawTimer( import_table, rn, bpi, afi, SAFI_ENCAP, @@ -3086,13 +3086,11 @@ static void rfapiBgpInfoFilteredImportEncap( } else { vnc_zlog_debug_verbose( - "%s: %s at prefix %s/%d", __func__, + "%s: %s at prefix %pRN", __func__, ((action == FIF_ACTION_KILL) ? "killing" : "replacing"), - inet_ntop(rn->p.family, &rn->p.u.prefix, - buf, BUFSIZ), - rn->p.prefixlen); + rn); /* * If this route is waiting to be deleted @@ -3160,10 +3158,8 @@ static void rfapiBgpInfoFilteredImportEncap( rn = agg_node_get(rt, p); } - vnc_zlog_debug_verbose( - "%s: (afi=%d, rn=%p) inserting at prefix %s/%d", __func__, afi, - rn, inet_ntop(rn->p.family, &rn->p.u.prefix, buf, BUFSIZ), - rn->p.prefixlen); + vnc_zlog_debug_verbose("%s: (afi=%d, rn=%p) inserting at prefix %pRN", + __func__, afi, rn, rn); rfapiBgpInfoAttachSorted(rn, info_new, afi, SAFI_ENCAP); @@ -3246,6 +3242,7 @@ static void rfapiBgpInfoFilteredImportEncap( __func__, rn); #endif for (m = RFAPI_MONITOR_ENCAP(rn); m; m = m->next) { + const struct prefix *p; /* * For each referenced bpi/route, copy the ENCAP route's @@ -3273,9 +3270,9 @@ static void rfapiBgpInfoFilteredImportEncap( * list * per prefix. */ - + p = agg_node_get_prefix(m->node); referenced_vpn_prefix = - agg_node_get(referenced_vpn_table, &m->node->p); + agg_node_get(referenced_vpn_table, p); assert(referenced_vpn_prefix); for (mnext = referenced_vpn_prefix->info; mnext; mnext = mnext->next) { @@ -3360,11 +3357,11 @@ static void rfapiExpireVpnNow(struct rfapi_import_table *it, void rfapiBgpInfoFilteredImportVPN( struct rfapi_import_table *import_table, int action, struct peer *peer, void *rfd, /* set for looped back routes */ - struct prefix *p, - struct prefix *aux_prefix, /* AFI_L2VPN: optional IP */ + const struct prefix *p, + const struct prefix *aux_prefix, /* AFI_L2VPN: optional IP */ afi_t afi, struct prefix_rd *prd, struct attr *attr, /* part of bgp_path_info */ - uint8_t type, /* part of bgp_path_info */ + uint8_t type, /* part of bgp_path_info */ uint8_t sub_type, /* part of bgp_path_info */ uint32_t *label) /* part of bgp_path_info */ { @@ -3525,11 +3522,8 @@ void rfapiBgpInfoFilteredImportVPN( BGP_PATH_REMOVED); vnc_zlog_debug_verbose( - "%s: withdrawing at prefix %s/%d%s", - __func__, rfapi_ntop(rn->p.family, - &rn->p.u.prefix, - buf, BUFSIZ), - rn->p.prefixlen, + "%s: withdrawing at prefix %pRN%s", + __func__, rn, (washolddown ? " (already being withdrawn)" : "")); @@ -3548,14 +3542,11 @@ void rfapiBgpInfoFilteredImportVPN( VNC_ITRCCK; } else { vnc_zlog_debug_verbose( - "%s: %s at prefix %s/%d", __func__, + "%s: %s at prefix %pRN", __func__, ((action == FIF_ACTION_KILL) ? "killing" : "replacing"), - rfapi_ntop(rn->p.family, - &rn->p.u.prefix, buf, - BUFSIZ), - rn->p.prefixlen); + rn); /* * If this route is waiting to be deleted @@ -3673,10 +3664,8 @@ void rfapiBgpInfoFilteredImportVPN( info_new->extra->vnc.import.aux_prefix = *aux_prefix; } - vnc_zlog_debug_verbose( - "%s: inserting bpi %p at prefix %s/%d #%d", __func__, info_new, - rfapi_ntop(rn->p.family, &rn->p.u.prefix, buf, BUFSIZ), - rn->p.prefixlen, rn->lock); + vnc_zlog_debug_verbose("%s: inserting bpi %p at prefix %pRN #%d", + __func__, info_new, rn, rn->lock); rfapiBgpInfoAttachSorted(rn, info_new, afi, SAFI_MPLS_VPN); rfapiItBiIndexAdd(rn, info_new); @@ -3839,11 +3828,11 @@ void rfapiBgpInfoFilteredImportVPN( static void rfapiBgpInfoFilteredImportBadSafi( struct rfapi_import_table *import_table, int action, struct peer *peer, void *rfd, /* set for looped back routes */ - struct prefix *p, - struct prefix *aux_prefix, /* AFI_L2VPN: optional IP */ + const struct prefix *p, + const struct prefix *aux_prefix, /* AFI_L2VPN: optional IP */ afi_t afi, struct prefix_rd *prd, struct attr *attr, /* part of bgp_path_info */ - uint8_t type, /* part of bgp_path_info */ + uint8_t type, /* part of bgp_path_info */ uint8_t sub_type, /* part of bgp_path_info */ uint32_t *label) /* part of bgp_path_info */ { @@ -3869,7 +3858,7 @@ rfapiBgpInfoFilteredImportFunction(safi_t safi) void rfapiProcessUpdate(struct peer *peer, void *rfd, /* set when looped from RFP/RFAPI */ - struct prefix *p, struct prefix_rd *prd, + const struct prefix *p, struct prefix_rd *prd, struct attr *attr, afi_t afi, safi_t safi, uint8_t type, uint8_t sub_type, uint32_t *label) { @@ -3953,7 +3942,7 @@ void rfapiProcessUpdate(struct peer *peer, } -void rfapiProcessWithdraw(struct peer *peer, void *rfd, struct prefix *p, +void rfapiProcessWithdraw(struct peer *peer, void *rfd, const struct prefix *p, struct prefix_rd *prd, struct attr *attr, afi_t afi, safi_t safi, uint8_t type, int kill) { @@ -4224,9 +4213,11 @@ static void rfapiBgpTableFilteredImport(struct bgp *bgp, safi))( it, /* which import table */ FIF_ACTION_UPDATE, bpi->peer, - NULL, &rn2->p, /* prefix */ + NULL, bgp_node_get_prefix(rn2), NULL, afi, - (struct prefix_rd *)&rn1->p, + (struct prefix_rd *) + bgp_node_get_prefix( + rn1), bpi->attr, bpi->type, bpi->sub_type, &label); } @@ -4444,27 +4435,20 @@ static void rfapiDeleteRemotePrefixesIt( for (rn = agg_route_top(rt); rn; rn = agg_route_next(rn)) { struct bgp_path_info *bpi; struct bgp_path_info *next; + const struct prefix *rn_p = agg_node_get_prefix(rn); if (p && VNC_DEBUG(IMPORT_DEL_REMOTE)) { char p1line[PREFIX_STRLEN]; - char p2line[PREFIX_STRLEN]; prefix2str(p, p1line, sizeof(p1line)); - prefix2str(&rn->p, p2line, sizeof(p2line)); - vnc_zlog_debug_any("%s: want %s, have %s", - __func__, p1line, p2line); + vnc_zlog_debug_any("%s: want %s, have %pRN", + __func__, p1line, rn); } - if (p && prefix_cmp(p, &rn->p)) + if (p && prefix_cmp(p, rn_p)) continue; - { - char buf_pfx[PREFIX_STRLEN]; - - prefix2str(&rn->p, buf_pfx, sizeof(buf_pfx)); - vnc_zlog_debug_verbose("%s: rn pfx=%s", - __func__, buf_pfx); - } + vnc_zlog_debug_verbose("%s: rn pfx=%pRN", __func__, rn); /* TBD is this valid for afi == AFI_L2VPN? */ RFAPI_CHECK_REFCOUNT(rn, SAFI_MPLS_VPN, 1); @@ -4596,7 +4580,7 @@ static void rfapiDeleteRemotePrefixesIt( } } - vnc_direct_bgp_rh_del_route(bgp, afi, &rn->p, + vnc_direct_bgp_rh_del_route(bgp, afi, rn_p, bpi->peer); RFAPI_UPDATE_ITABLE_COUNT(bpi, it, afi, -1); diff --git a/bgpd/rfapi/rfapi_import.h b/bgpd/rfapi/rfapi_import.h index 1ab9cc5193..50093111c2 100644 --- a/bgpd/rfapi/rfapi_import.h +++ b/bgpd/rfapi/rfapi_import.h @@ -143,11 +143,11 @@ extern void rfapiUnicastNexthop2Prefix(afi_t afi, struct attr *attr, extern void rfapiBgpInfoFilteredImportVPN( struct rfapi_import_table *import_table, int action, struct peer *peer, void *rfd, /* set for looped back routes */ - struct prefix *p, - struct prefix *aux_prefix, /* AFI_ETHER: optional IP */ + const struct prefix *p, + const struct prefix *aux_prefix, /* AFI_ETHER: optional IP */ afi_t afi, struct prefix_rd *prd, struct attr *attr, /* part of bgp_path_info */ - uint8_t type, /* part of bgp_path_info */ + uint8_t type, /* part of bgp_path_info */ uint8_t sub_type, /* part of bgp_path_info */ uint32_t *label); /* part of bgp_path_info */ diff --git a/bgpd/rfapi/rfapi_monitor.c b/bgpd/rfapi/rfapi_monitor.c index dc1f7e0fbb..cd26892b84 100644 --- a/bgpd/rfapi/rfapi_monitor.c +++ b/bgpd/rfapi/rfapi_monitor.c @@ -789,7 +789,8 @@ static void rfapiMonitorTimerRestart(struct rfapi_monitor_vpn *m) * been responsible for the response, i.e., any monitors for * the exact prefix or a parent of it. */ -void rfapiMonitorTimersRestart(struct rfapi_descriptor *rfd, struct prefix *p) +void rfapiMonitorTimersRestart(struct rfapi_descriptor *rfd, + const struct prefix *p) { struct agg_node *rn; @@ -818,12 +819,14 @@ void rfapiMonitorTimersRestart(struct rfapi_descriptor *rfd, struct prefix *p) for (rn = agg_route_top(rfd->mon); rn; rn = agg_route_next(rn)) { struct rfapi_monitor_vpn *m; + const struct prefix *p_node; if (!((m = rn->info))) continue; + p_node = agg_node_get_prefix(m->node); /* NB order of test is significant ! */ - if (!m->node || prefix_match(&m->node->p, p)) { + if (!m->node || prefix_match(p_node, p)) { rfapiMonitorTimerRestart(m); } } @@ -841,7 +844,8 @@ void rfapiMonitorItNodeChanged( struct skiplist *nves_seen; struct agg_node *rn = it_node; struct bgp *bgp = bgp_get_default(); - afi_t afi = family2afi(rn->p.family); + const struct prefix *p = agg_node_get_prefix(rn); + afi_t afi = family2afi(p->family); #if DEBUG_L2_EXTRA char buf_prefix[PREFIX_STRLEN]; #endif @@ -866,10 +870,9 @@ void rfapiMonitorItNodeChanged( if ((sl = RFAPI_MONITOR_ETH(rn))) { for (cursor = NULL, - rc = skiplist_next(sl, NULL, (void **)&m, - (void **)&cursor); + rc = skiplist_next(sl, NULL, (void **)&m, &cursor); !rc; rc = skiplist_next(sl, NULL, (void **)&m, - (void **)&cursor)) { + &cursor)) { if (skiplist_search(nves_seen, m->rfd, NULL)) { /* @@ -931,17 +934,14 @@ void rfapiMonitorItNodeChanged( assert(!skiplist_insert(nves_seen, m->rfd, NULL)); - char buf_attach_pfx[PREFIX_STRLEN]; char buf_target_pfx[PREFIX_STRLEN]; - prefix2str(&m->node->p, buf_attach_pfx, - sizeof(buf_attach_pfx)); prefix2str(&m->p, buf_target_pfx, sizeof(buf_target_pfx)); vnc_zlog_debug_verbose( - "%s: update rfd %p attached to pfx %s (targ=%s)", - __func__, m->rfd, - buf_attach_pfx, buf_target_pfx); + "%s: update rfd %p attached to pfx %pRN (targ=%s)", + __func__, m->rfd, m->node, + buf_target_pfx); /* * update its RIB @@ -1103,10 +1103,10 @@ static void rfapiMonitorEthTimerRestart(struct rfapi_monitor_eth *m) m->rfd->response_lifetime, &m->timer); } -static int mon_eth_cmp(void *a, void *b) +static int mon_eth_cmp(const void *a, const void *b) { - struct rfapi_monitor_eth *m1; - struct rfapi_monitor_eth *m2; + const struct rfapi_monitor_eth *m1; + const struct rfapi_monitor_eth *m2; int i; @@ -1272,7 +1272,7 @@ static void rfapiMonitorEthDetachImport( #if DEBUG_L2_EXTRA char buf_prefix[PREFIX_STRLEN]; - prefix2str(&rn->p, buf_prefix, sizeof(buf_prefix)); + prefix2str(agg_node_get_prefix(rn), buf_prefix, sizeof(buf_prefix)); #endif /* diff --git a/bgpd/rfapi/rfapi_monitor.h b/bgpd/rfapi/rfapi_monitor.h index b8eec56475..3a2248aa60 100644 --- a/bgpd/rfapi/rfapi_monitor.h +++ b/bgpd/rfapi/rfapi_monitor.h @@ -167,7 +167,7 @@ extern void rfapiMonitorResponseRemovalOn(struct bgp *bgp); extern void rfapiMonitorExtraPrune(safi_t safi, struct agg_node *rn); extern void rfapiMonitorTimersRestart(struct rfapi_descriptor *rfd, - struct prefix *p); + const struct prefix *p); extern void rfapiMonitorItNodeChanged(struct rfapi_import_table *import_table, struct agg_node *it_node, diff --git a/bgpd/rfapi/rfapi_nve_addr.c b/bgpd/rfapi/rfapi_nve_addr.c index ee54d88c3f..b8193f1431 100644 --- a/bgpd/rfapi/rfapi_nve_addr.c +++ b/bgpd/rfapi/rfapi_nve_addr.c @@ -58,10 +58,10 @@ static void logdifferent(const char *tag, struct rfapi_nve_addr *a, #endif -int rfapi_nve_addr_cmp(void *k1, void *k2) +int rfapi_nve_addr_cmp(const void *k1, const void *k2) { - struct rfapi_nve_addr *a = (struct rfapi_nve_addr *)k1; - struct rfapi_nve_addr *b = (struct rfapi_nve_addr *)k2; + const struct rfapi_nve_addr *a = (struct rfapi_nve_addr *)k1; + const struct rfapi_nve_addr *b = (struct rfapi_nve_addr *)k2; int ret = 0; if (!a || !b) { diff --git a/bgpd/rfapi/rfapi_nve_addr.h b/bgpd/rfapi/rfapi_nve_addr.h index 2d54d4a3cc..7bcb3cab69 100644 --- a/bgpd/rfapi/rfapi_nve_addr.h +++ b/bgpd/rfapi/rfapi_nve_addr.h @@ -30,7 +30,7 @@ struct rfapi_nve_addr { }; -extern int rfapi_nve_addr_cmp(void *k1, void *k2); +extern int rfapi_nve_addr_cmp(const void *k1, const void *k2); extern void rfapiNveAddr2Str(struct rfapi_nve_addr *na, char *buf, int bufsize); diff --git a/bgpd/rfapi/rfapi_private.h b/bgpd/rfapi/rfapi_private.h index ff1cf7ef42..68caba600a 100644 --- a/bgpd/rfapi/rfapi_private.h +++ b/bgpd/rfapi/rfapi_private.h @@ -272,16 +272,13 @@ struct rfapi { ? ((prefix)->prefixlen == 128) \ : 0)) -extern void rfapiQprefix2Rprefix(struct prefix *qprefix, - struct rfapi_ip_prefix *rprefix); - extern int rfapi_find_rfd(struct bgp *bgp, struct rfapi_ip_addr *vn_addr, struct rfapi_ip_addr *un_addr, struct rfapi_descriptor **rfd); extern void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie + UN addr for VPN */ - struct bgp *bgp, int safi, struct prefix *p, + struct bgp *bgp, int safi, const struct prefix *p, struct prefix_rd *prd, struct rfapi_ip_addr *nexthop, uint32_t *local_pref, /* host byte order */ uint32_t *lifetime, /* host byte order */ @@ -297,7 +294,7 @@ add_vnc_route(struct rfapi_descriptor *rfd, /* cookie + UN addr for VPN */ #endif extern void del_vnc_route(struct rfapi_descriptor *rfd, struct peer *peer, - struct bgp *bgp, safi_t safi, struct prefix *p, + struct bgp *bgp, safi_t safi, const struct prefix *p, struct prefix_rd *prd, uint8_t type, uint8_t sub_type, struct rfapi_nexthop *lnh, int kill); diff --git a/bgpd/rfapi/rfapi_rib.c b/bgpd/rfapi/rfapi_rib.c index 3d4bdef75a..95b8582b95 100644 --- a/bgpd/rfapi/rfapi_rib.c +++ b/bgpd/rfapi/rfapi_rib.c @@ -268,8 +268,8 @@ static void rfapi_info_free(struct rfapi_info *goner) if (goner->timer) { struct rfapi_rib_tcb *tcb; - tcb = ((struct thread *)goner->timer)->arg; - thread_cancel((struct thread *)goner->timer); + tcb = goner->timer->arg; + thread_cancel(goner->timer); XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb); goner->timer = NULL; } @@ -340,7 +340,6 @@ static void rfapiRibStartTimer(struct rfapi_descriptor *rfd, { struct thread *t = ri->timer; struct rfapi_rib_tcb *tcb = NULL; - char buf_prefix[PREFIX_STRLEN]; if (t) { tcb = t->arg; @@ -361,9 +360,8 @@ static void rfapiRibStartTimer(struct rfapi_descriptor *rfd, UNSET_FLAG(tcb->flags, RFAPI_RIB_TCB_FLAG_DELETED); } - prefix2str(&rn->p, buf_prefix, sizeof(buf_prefix)); - vnc_zlog_debug_verbose("%s: rfd %p pfx %s life %u", __func__, rfd, - buf_prefix, ri->lifetime); + vnc_zlog_debug_verbose("%s: rfd %p pfx %pRN life %u", __func__, rfd, rn, + ri->lifetime); ri->timer = NULL; thread_add_timer(bm->master, rfapiRibExpireTimer, tcb, ri->lifetime, &ri->timer); @@ -388,10 +386,10 @@ extern void rfapi_rib_key_init(struct prefix *prefix, /* may be NULL */ /* * Compares two <struct rfapi_rib_key>s */ -int rfapi_rib_key_cmp(void *k1, void *k2) +int rfapi_rib_key_cmp(const void *k1, const void *k2) { - struct rfapi_rib_key *a = (struct rfapi_rib_key *)k1; - struct rfapi_rib_key *b = (struct rfapi_rib_key *)k2; + const struct rfapi_rib_key *a = (struct rfapi_rib_key *)k1; + const struct rfapi_rib_key *b = (struct rfapi_rib_key *)k2; int ret; if (!a || !b) @@ -741,11 +739,12 @@ int rfapiRibPreloadBi( struct rfapi_rib_key rk; struct agg_node *trn; afi_t afi; + const struct prefix *p = agg_node_get_prefix(rfd_rib_node); if (!rfd_rib_node) return 0; - afi = family2afi(rfd_rib_node->p.family); + afi = family2afi(p->family); rfd = agg_get_table_info(agg_get_table(rfd_rib_node)); @@ -803,8 +802,7 @@ int rfapiRibPreloadBi( /* * Update last sent time for prefix */ - trn = agg_node_get(rfd->rsp_times[afi], - &rfd_rib_node->p); /* locks trn */ + trn = agg_node_get(rfd->rsp_times[afi], p); /* locks trn */ trn->info = (void *)(uintptr_t)bgp_clock(); if (trn->lock > 1) agg_unlock_node(trn); @@ -852,10 +850,9 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd, struct list *lPendCost = NULL; struct list *delete_list = NULL; int printedprefix = 0; - char buf_prefix[PREFIX_STRLEN]; int rib_node_started_nonempty = 0; int sendingsomeroutes = 0; - + const struct prefix *p; #if DEBUG_PROCESS_PENDING_NODE unsigned int count_rib_initial = 0; unsigned int count_pend_vn_initial = 0; @@ -863,12 +860,12 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd, #endif assert(pn); - prefix2str(&pn->p, buf_prefix, sizeof(buf_prefix)); - vnc_zlog_debug_verbose("%s: afi=%d, %s pn->info=%p", __func__, afi, - buf_prefix, pn->info); + p = agg_node_get_prefix(pn); + vnc_zlog_debug_verbose("%s: afi=%d, %pRN pn->info=%p", __func__, afi, + pn, pn->info); if (AFI_L2VPN != afi) { - rfapiQprefix2Rprefix(&pn->p, &hp); + rfapiQprefix2Rprefix(p, &hp); } RFAPI_RIB_CHECK_COUNTS(1, 0); @@ -876,7 +873,7 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd, /* * Find corresponding RIB node */ - rn = agg_node_get(rfd->rib[afi], &pn->p); /* locks rn */ + rn = agg_node_get(rfd->rib[afi], p); /* locks rn */ /* * RIB skiplist has key=rfapi_addr={vn,un}, val = rfapi_info, @@ -935,9 +932,9 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd, prefix2str(&ri->rk.vn, buf, sizeof(buf)); prefix2str(&ri->un, buf2, sizeof(buf2)); vnc_zlog_debug_verbose( - "%s: put dl pfx=%s vn=%s un=%s cost=%d life=%d vn_options=%p", - __func__, buf_prefix, buf, buf2, - ri->cost, ri->lifetime, ri->vn_options); + "%s: put dl pfx=%pRN vn=%s un=%s cost=%d life=%d vn_options=%p", + __func__, pn, buf, buf2, ri->cost, + ri->lifetime, ri->vn_options); skiplist_delete_first(slRibPt); } @@ -1186,8 +1183,7 @@ callback: vnc_zlog_debug_verbose("%s: lPendCost->count now %d", __func__, lPendCost->count); - vnc_zlog_debug_verbose("%s: For prefix %s (a)", __func__, - buf_prefix); + vnc_zlog_debug_verbose("%s: For prefix %pRN (a)", __func__, pn); printedprefix = 1; for (ALL_LIST_ELEMENTS(lPendCost, node, nnode, ri)) { @@ -1246,7 +1242,7 @@ callback: * update this NVE's timestamp for this prefix */ trn = agg_node_get(rfd->rsp_times[afi], - &pn->p); /* locks trn */ + p); /* locks trn */ trn->info = (void *)(uintptr_t)bgp_clock(); if (trn->lock > 1) agg_unlock_node(trn); @@ -1268,8 +1264,8 @@ callback: char buf2[BUFSIZ]; if (!printedprefix) { - vnc_zlog_debug_verbose("%s: For prefix %s (d)", - __func__, buf_prefix); + vnc_zlog_debug_verbose("%s: For prefix %pRN (d)", + __func__, pn); } vnc_zlog_debug_verbose("%s: delete_list has %d elements", __func__, delete_list->count); @@ -1465,7 +1461,7 @@ callback: } if (sendingsomeroutes) - rfapiMonitorTimersRestart(rfd, &pn->p); + rfapiMonitorTimersRestart(rfd, p); agg_unlock_node(rn); /* agg_node_get() */ @@ -1589,7 +1585,7 @@ void rfapiRibUpdatePendingNode( struct rfapi_import_table *it, /* needed for L2 */ struct agg_node *it_node, uint32_t lifetime) { - struct prefix *prefix; + const struct prefix *prefix; struct bgp_path_info *bpi; struct agg_node *pn; afi_t afi; @@ -1606,7 +1602,7 @@ void rfapiRibUpdatePendingNode( RFAPI_RIB_CHECK_COUNTS(1, 0); - prefix = &it_node->p; + prefix = agg_node_get_prefix(it_node); afi = family2afi(prefix->family); prefix2str(prefix, buf, sizeof(buf)); vnc_zlog_debug_verbose("%s: prefix=%s", __func__, buf); @@ -1794,7 +1790,8 @@ int rfapiRibFTDFilterRecentPrefix( struct prefix *pfx_target_original) /* query target */ { struct bgp *bgp = rfd->bgp; - afi_t afi = family2afi(it_rn->p.family); + const struct prefix *p = agg_node_get_prefix(it_rn); + afi_t afi = family2afi(p->family); time_t prefix_time; struct agg_node *trn; @@ -1809,14 +1806,15 @@ int rfapiRibFTDFilterRecentPrefix( * This matches behavior of now-obsolete rfapiRibFTDFilterRecent(), * but we need to decide if that is correct. */ - if (it_rn->p.family == AF_ETHERNET) + if (p->family == AF_ETHERNET) return 0; #ifdef DEBUG_FTD_FILTER_RECENT { char buf_pfx[PREFIX_STRLEN]; - prefix2str(&it_rn->p, buf_pfx, sizeof(buf_pfx)); + prefix2str(agg_node_get_prefix(it_rn), buf_pfx, + sizeof(buf_pfx)); vnc_zlog_debug_verbose("%s: prefix %s", __func__, buf_pfx); } #endif @@ -1824,7 +1822,7 @@ int rfapiRibFTDFilterRecentPrefix( /* * prefix covers target address, so allow prefix */ - if (prefix_match(&it_rn->p, pfx_target_original)) { + if (prefix_match(p, pfx_target_original)) { #ifdef DEBUG_FTD_FILTER_RECENT vnc_zlog_debug_verbose("%s: prefix covers target, allowed", __func__); @@ -1835,7 +1833,7 @@ int rfapiRibFTDFilterRecentPrefix( /* * check this NVE's timestamp for this prefix */ - trn = agg_node_get(rfd->rsp_times[afi], &it_rn->p); /* locks trn */ + trn = agg_node_get(rfd->rsp_times[afi], p); /* locks trn */ prefix_time = (time_t)trn->info; if (trn->lock > 1) agg_unlock_node(trn); @@ -1997,7 +1995,8 @@ rfapiRibPreload(struct bgp *bgp, struct rfapi_descriptor *rfd, } vnc_zlog_debug_verbose( "%s: RIB skiplist for this prefix follows", __func__); - rfapiRibShowRibSl(NULL, &rn->p, (struct skiplist *)rn->info); + rfapiRibShowRibSl(NULL, agg_node_get_prefix(rn), + (struct skiplist *)rn->info); #endif @@ -2114,11 +2113,10 @@ void rfapiRibPendingDeleteRoute(struct bgp *bgp, struct rfapi_import_table *it, { struct rfapi_descriptor *rfd; struct listnode *node; - char buf[PREFIX_STRLEN]; + const struct prefix *p = agg_node_get_prefix(it_node); - prefix2str(&it_node->p, buf, sizeof(buf)); - vnc_zlog_debug_verbose("%s: entry, it=%p, afi=%d, it_node=%p, pfx=%s", - __func__, it, afi, it_node, buf); + vnc_zlog_debug_verbose("%s: entry, it=%p, afi=%d, it_node=%p, pfx=%pRN", + __func__, it, afi, it_node, it_node); if (AFI_L2VPN == afi) { /* @@ -2157,7 +2155,7 @@ void rfapiRibPendingDeleteRoute(struct bgp *bgp, struct rfapi_import_table *it, * delete */ if ((rn = agg_node_lookup(m->rfd->rib[afi], - &it_node->p))) { + p))) { rfapiRibUpdatePendingNode( bgp, m->rfd, it, it_node, m->rfd->response_lifetime); @@ -2179,8 +2177,7 @@ void rfapiRibPendingDeleteRoute(struct bgp *bgp, struct rfapi_import_table *it, * this * NVE, it's OK to send an update with the delete */ - if ((rn = agg_node_lookup(m->rfd->rib[afi], - &it_node->p))) { + if ((rn = agg_node_lookup(m->rfd->rib[afi], p))) { rfapiRibUpdatePendingNode( bgp, m->rfd, it, it_node, m->rfd->response_lifetime); @@ -2212,8 +2209,7 @@ void rfapiRibPendingDeleteRoute(struct bgp *bgp, struct rfapi_import_table *it, * prefix * previously, we should send an updated response. */ - if ((rn = agg_node_lookup(rfd->rib[afi], - &it_node->p))) { + if ((rn = agg_node_lookup(rfd->rib[afi], p))) { rfapiRibUpdatePendingNode( bgp, rfd, it, it_node, rfd->response_lifetime); @@ -2416,7 +2412,8 @@ void rfapiRibShowResponses(void *stream, struct prefix *pfx_match, for (rn = agg_route_top(rfd->rib[afi]); rn; rn = agg_route_next(rn)) { - + const struct prefix *p = + agg_node_get_prefix(rn); struct skiplist *sl; char str_pfx[PREFIX_STRLEN]; int printedprefix = 0; @@ -2433,9 +2430,8 @@ void rfapiRibShowResponses(void *stream, struct prefix *pfx_match, nhs_total += skiplist_count(sl); ++prefixes_total; - if (pfx_match - && !prefix_match(pfx_match, &rn->p) - && !prefix_match(&rn->p, pfx_match)) + if (pfx_match && !prefix_match(pfx_match, p) + && !prefix_match(p, pfx_match)) continue; ++prefixes_displayed; @@ -2472,7 +2468,7 @@ void rfapiRibShowResponses(void *stream, struct prefix *pfx_match, str_un, BUFSIZ)); } - prefix2str(&rn->p, str_pfx, sizeof(str_pfx)); + prefix2str(p, str_pfx, sizeof(str_pfx)); // fp(out, " %s\n", buf); /* prefix */ routes_displayed++; diff --git a/bgpd/rfapi/rfapi_rib.h b/bgpd/rfapi/rfapi_rib.h index 38a6df9fbf..3ad021b4f4 100644 --- a/bgpd/rfapi/rfapi_rib.h +++ b/bgpd/rfapi/rfapi_rib.h @@ -147,7 +147,7 @@ extern void rfapi_rib_key_init(struct prefix *prefix, /* may be NULL */ struct prefix *aux, /* may be NULL */ struct rfapi_rib_key *rk); -extern int rfapi_rib_key_cmp(void *k1, void *k2); +extern int rfapi_rib_key_cmp(const void *k1, const void *k2); extern void rfapiAdbFree(struct rfapi_adb *adb); diff --git a/bgpd/rfapi/rfapi_vty.c b/bgpd/rfapi/rfapi_vty.c index 58fdc7c130..89f6852e2b 100644 --- a/bgpd/rfapi/rfapi_vty.c +++ b/bgpd/rfapi/rfapi_vty.c @@ -194,7 +194,7 @@ int rfapiQprefix2Raddr(struct prefix *qprefix, struct rfapi_ip_addr *raddr) * Translate Quagga prefix to RFAPI prefix */ /* rprefix->cost set to 0 */ -void rfapiQprefix2Rprefix(struct prefix *qprefix, +void rfapiQprefix2Rprefix(const struct prefix *qprefix, struct rfapi_ip_prefix *rprefix) { memset(rprefix, 0, sizeof(struct rfapi_ip_prefix)); @@ -393,7 +393,7 @@ int rfapiStream2Vty(void *stream, /* input */ } /* called from bgpd/bgp_vty.c'route_vty_out() */ -void rfapi_vty_out_vncinfo(struct vty *vty, struct prefix *p, +void rfapi_vty_out_vncinfo(struct vty *vty, const struct prefix *p, struct bgp_path_info *bpi, safi_t safi) { char *s; @@ -743,7 +743,6 @@ static void rfapiDebugPrintMonitorEncap(void *stream, void rfapiShowItNode(void *stream, struct agg_node *rn) { struct bgp_path_info *bpi; - char buf[BUFSIZ]; int (*fp)(void *, const char *, ...); struct vty *vty; @@ -753,9 +752,7 @@ void rfapiShowItNode(void *stream, struct agg_node *rn) if (rfapiStream2Vty(stream, &fp, &vty, &out, &vty_newline) == 0) return; - fp(out, "%s/%d @%p #%d%s", - rfapi_ntop(rn->p.family, &rn->p.u.prefix, buf, BUFSIZ), - rn->p.prefixlen, rn, rn->lock, HVTYNL); + fp(out, "%pRN @%p #%d%s", rn, rn, rn->lock, HVTYNL); for (bpi = rn->info; bpi; bpi = bpi->next) { rfapiPrintBi(stream, bpi); @@ -782,14 +779,15 @@ void rfapiShowImportTable(void *stream, const char *label, struct agg_table *rt, for (rn = agg_route_top(rt); rn; rn = agg_route_next(rn)) { struct bgp_path_info *bpi; + const struct prefix *p = agg_node_get_prefix(rn); - if (rn->p.family == AF_ETHERNET) { - rfapiEthAddr2Str(&rn->p.u.prefix_eth, buf, BUFSIZ); + if (p->family == AF_ETHERNET) { + rfapiEthAddr2Str(&p->u.prefix_eth, buf, BUFSIZ); } else { - inet_ntop(rn->p.family, &rn->p.u.prefix, buf, BUFSIZ); + inet_ntop(p->family, &p->u.prefix, buf, BUFSIZ); } - fp(out, "%s/%d @%p #%d%s", buf, rn->p.prefixlen, rn, + fp(out, "%s/%d @%p #%d%s", buf, p->prefixlen, rn, rn->lock - 1, /* account for loop iterator locking */ HVTYNL); @@ -868,6 +866,8 @@ int rfapiShowVncQueries(void *stream, struct prefix *pfx_match) if (rfd->mon) { for (rn = agg_route_top(rfd->mon); rn; rn = agg_route_next(rn)) { + const struct prefix *p = + agg_node_get_prefix(rn); struct rfapi_monitor_vpn *m; char buf_remain[BUFSIZ]; char buf_pfx[BUFSIZ]; @@ -879,9 +879,8 @@ int rfapiShowVncQueries(void *stream, struct prefix *pfx_match) ++queries_total; - if (pfx_match - && !prefix_match(pfx_match, &rn->p) - && !prefix_match(&rn->p, pfx_match)) + if (pfx_match && !prefix_match(pfx_match, p) + && !prefix_match(p, pfx_match)) continue; ++queries_displayed; @@ -1028,6 +1027,7 @@ static int rfapiPrintRemoteRegBi(struct bgp *bgp, void *stream, char buf_vn[BUFSIZ]; char buf_lifetime[BUFSIZ]; int nlines = 0; + const struct prefix *p = agg_node_get_prefix(rn); if (!stream) return 0; /* for debug log, print into buf & call output once */ @@ -1040,8 +1040,8 @@ static int rfapiPrintRemoteRegBi(struct bgp *bgp, void *stream, */ buf_pfx[0] = 0; snprintf(buf_pfx, BUFSIZ, "%s/%d", - rfapi_ntop(rn->p.family, &rn->p.u.prefix, buf_ntop, BUFSIZ), - rn->p.prefixlen); + rfapi_ntop(p->family, &p->u.prefix, buf_ntop, BUFSIZ), + p->prefixlen); buf_pfx[BUFSIZ - 1] = 0; nlines++; @@ -1155,7 +1155,7 @@ static int rfapiPrintRemoteRegBi(struct bgp *bgp, void *stream, } fp(out, "%s", HVTYNL); - if (rn->p.family == AF_ETHERNET) { + if (p->family == AF_ETHERNET) { /* * If there is a corresponding IP address && != VN address, * print that on the next line @@ -1221,13 +1221,13 @@ static int rfapiShowRemoteRegistrationsIt(struct bgp *bgp, void *stream, for (rn = agg_route_top(it->imported_vpn[afi]); rn; rn = agg_route_next(rn)) { - + const struct prefix *p = agg_node_get_prefix(rn); struct bgp_path_info *bpi; int count_only; /* allow for wider or more narrow mask from user */ - if (prefix_only && !prefix_match(prefix_only, &rn->p) - && !prefix_match(&rn->p, prefix_only)) + if (prefix_only && !prefix_match(prefix_only, p) + && !prefix_match(p, prefix_only)) count_only = 1; else count_only = 0; @@ -2754,10 +2754,10 @@ static void nve_addr_free(void *hap) XFREE(MTYPE_RFAPI_NVE_ADDR, hap); } -static int nve_addr_cmp(void *k1, void *k2) +static int nve_addr_cmp(const void *k1, const void *k2) { - struct nve_addr *a = (struct nve_addr *)k1; - struct nve_addr *b = (struct nve_addr *)k2; + const struct nve_addr *a = (struct nve_addr *)k1; + const struct nve_addr *b = (struct nve_addr *)k2; int ret = 0; if (!a || !b) { @@ -3428,7 +3428,7 @@ static void clear_vnc_nve_closer(struct rfapi_local_reg_delete_arg *cda) &cursor)) { if (pValue->rfd) { - ((struct rfapi_descriptor *)pValue->rfd)->flags |= + pValue->rfd->flags |= RFAPI_HD_FLAG_CLOSING_ADMINISTRATIVELY; rfapi_close(pValue->rfd); } diff --git a/bgpd/rfapi/rfapi_vty.h b/bgpd/rfapi/rfapi_vty.h index 8b881292ac..b5e1c38b0d 100644 --- a/bgpd/rfapi/rfapi_vty.h +++ b/bgpd/rfapi/rfapi_vty.h @@ -43,7 +43,7 @@ extern void rfapiRprefixApplyMask(struct rfapi_ip_prefix *rprefix); extern int rfapiQprefix2Raddr(struct prefix *qprefix, struct rfapi_ip_addr *raddr); -extern void rfapiQprefix2Rprefix(struct prefix *qprefix, +extern void rfapiQprefix2Rprefix(const struct prefix *qprefix, struct rfapi_ip_prefix *rprefix); extern int rfapiRprefix2Qprefix(struct rfapi_ip_prefix *rprefix, diff --git a/bgpd/rfapi/vnc_debug.h b/bgpd/rfapi/vnc_debug.h index dd49383072..c472b6366e 100644 --- a/bgpd/rfapi/vnc_debug.h +++ b/bgpd/rfapi/vnc_debug.h @@ -20,7 +20,7 @@ #ifndef _QUAGGA_BGP_VNC_DEBUG_H #define _QUAGGA_BGP_VNC_DEBUG_H -#if ENABLE_BGP_VNC +#ifdef ENABLE_BGP_VNC /* * debug state storage diff --git a/bgpd/rfapi/vnc_export_bgp.c b/bgpd/rfapi/vnc_export_bgp.c index 352f5e8328..bd3395b49f 100644 --- a/bgpd/rfapi/vnc_export_bgp.c +++ b/bgpd/rfapi/vnc_export_bgp.c @@ -177,7 +177,7 @@ void vnc_direct_bgp_add_route_ce(struct bgp *bgp, struct agg_node *rn, { struct attr *attr = bpi->attr; struct peer *peer = bpi->peer; - struct prefix *prefix = &rn->p; + const struct prefix *prefix = agg_node_get_prefix(rn); afi_t afi = family2afi(prefix->family); struct bgp_node *urn; struct bgp_path_info *ubpi; @@ -330,7 +330,8 @@ void vnc_direct_bgp_add_route_ce(struct bgp *bgp, struct agg_node *rn, void vnc_direct_bgp_del_route_ce(struct bgp *bgp, struct agg_node *rn, struct bgp_path_info *bpi) { - afi_t afi = family2afi(rn->p.family); + const struct prefix *p = agg_node_get_prefix(rn); + afi_t afi = family2afi(p->family); struct bgp_path_info *vbpi; struct prefix ce_nexthop; @@ -395,8 +396,8 @@ void vnc_direct_bgp_del_route_ce(struct bgp *bgp, struct agg_node *rn, /* * withdraw the route */ - bgp_withdraw(bpi->peer, &rn->p, 0, /* addpath_id */ - NULL, /* attr, ignored */ + bgp_withdraw(bpi->peer, p, 0, /* addpath_id */ + NULL, /* attr, ignored */ afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ NULL, 0, NULL); /* tag not used for unicast */ @@ -432,17 +433,11 @@ static void vnc_direct_bgp_vpn_enable_ce(struct bgp *bgp, afi_t afi) */ for (rn = agg_route_top(bgp->rfapi->it_ce->imported_vpn[afi]); rn; rn = agg_route_next(rn)) { - if (!rn->info) continue; - { - char prefixstr[PREFIX_STRLEN]; - - prefix2str(&rn->p, prefixstr, sizeof(prefixstr)); - vnc_zlog_debug_verbose("%s: checking prefix %s", - __func__, prefixstr); - } + vnc_zlog_debug_verbose("%s: checking prefix %pRN", __func__, + rn); for (ri = rn->info; ri; ri = ri->next) { @@ -492,9 +487,9 @@ static void vnc_direct_bgp_vpn_disable_ce(struct bgp *bgp, afi_t afi) && ri->sub_type == BGP_ROUTE_REDISTRIBUTE) { bgp_withdraw( - ri->peer, &rn->p, /* prefix */ - 0, /* addpath_id */ - NULL, /* ignored */ + ri->peer, bgp_node_get_prefix(rn), + 0, /* addpath_id */ + NULL, /* ignored */ AFI_IP, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, @@ -537,7 +532,7 @@ static struct ecommunity *vnc_route_origin_ecom(struct agg_node *rn) &bpi->attr->mp_nexthop_global_in.s_addr, 4); roec.val[6] = 0; roec.val[7] = 0; - ecommunity_add_val(new, &roec); + ecommunity_add_val(new, &roec, false, false); break; case AF_INET6: /* No support for IPv6 addresses in extended communities @@ -568,7 +563,7 @@ static struct ecommunity *vnc_route_origin_ecom_single(struct in_addr *origin) new = ecommunity_new(); assert(new); - ecommunity_add_val(new, &roec); + ecommunity_add_val(new, &roec, false, false); if (!new->size) { ecommunity_free(&new); @@ -698,7 +693,8 @@ void vnc_direct_bgp_add_prefix(struct bgp *bgp, struct attr attr = {0}; struct listnode *node, *nnode; struct rfapi_rfg_name *rfgn; - afi_t afi = family2afi(rn->p.family); + const struct prefix *p = agg_node_get_prefix(rn); + afi_t afi = family2afi(p->family); if (!afi) { flog_err(EC_LIB_DEVELOPMENT, "%s: can't get afi of route node", @@ -769,7 +765,7 @@ void vnc_direct_bgp_add_prefix(struct bgp *bgp, */ if (rfgn->rfg->plist_export_bgp[afi]) { if (prefix_list_apply(rfgn->rfg->plist_export_bgp[afi], - &rn->p) + p) == PREFIX_DENY) continue; @@ -808,7 +804,8 @@ void vnc_direct_bgp_del_prefix(struct bgp *bgp, { struct listnode *node, *nnode; struct rfapi_rfg_name *rfgn; - afi_t afi = family2afi(rn->p.family); + const struct prefix *p = agg_node_get_prefix(rn); + afi_t afi = family2afi(p->family); if (!afi) { flog_err(EC_LIB_DEVELOPMENT, "%s: can't get afi route node", @@ -877,9 +874,9 @@ void vnc_direct_bgp_del_prefix(struct bgp *bgp, if (rfapiRaddr2Qprefix(&irfd->vn_addr, &nhp)) continue; - bgp_withdraw(irfd->peer, &rn->p, /* prefix */ - 0, /* addpath_id */ - NULL, /* attr, ignored */ + bgp_withdraw(irfd->peer, p, /* prefix */ + 0, /* addpath_id */ + NULL, /* attr, ignored */ afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ @@ -907,9 +904,9 @@ void vnc_direct_bgp_del_prefix(struct bgp *bgp, if (rfapiRaddr2Qprefix(&irfd->vn_addr, &nhp)) continue; - bgp_withdraw(irfd->peer, &rn->p, /* prefix */ - 0, /* addpath_id */ - NULL, /* attr, ignored */ + bgp_withdraw(irfd->peer, p, /* prefix */ + 0, /* addpath_id */ + NULL, /* attr, ignored */ afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ @@ -998,6 +995,8 @@ void vnc_direct_bgp_add_nve(struct bgp *bgp, struct rfapi_descriptor *rfd) struct attr hattr; struct attr *iattr; struct bgp_path_info info; + const struct prefix *p = + agg_node_get_prefix(rn); if (rfapiRaddr2Qprefix(&irfd->vn_addr, &nhp)) @@ -1010,7 +1009,7 @@ void vnc_direct_bgp_add_nve(struct bgp *bgp, struct rfapi_descriptor *rfd) if (prefix_list_apply( rfgn->rfg->plist_export_bgp [afi], - &rn->p) + p) == PREFIX_DENY) continue; @@ -1033,8 +1032,7 @@ void vnc_direct_bgp_add_nve(struct bgp *bgp, struct rfapi_descriptor *rfd) ret = route_map_apply( rfgn->rfg ->routemap_export_bgp, - &rn->p, RMAP_BGP, - &info); + p, RMAP_BGP, &info); if (ret == RMAP_DENYMATCH) { bgp_attr_flush(&hattr); continue; @@ -1044,8 +1042,8 @@ void vnc_direct_bgp_add_nve(struct bgp *bgp, struct rfapi_descriptor *rfd) iattr = bgp_attr_intern(&hattr); bgp_attr_flush(&hattr); bgp_update( - irfd->peer, &rn->p, /* prefix */ - 0, /* addpath_id */ + irfd->peer, p, /* prefix */ + 0, /* addpath_id */ iattr, /* bgp_update copies it */ afi, SAFI_UNICAST, @@ -1134,7 +1132,8 @@ void vnc_direct_bgp_del_nve(struct bgp *bgp, struct rfapi_descriptor *rfd) rn = agg_route_next(rn)) { if (rn->info) { - + const struct prefix *p = + agg_node_get_prefix(rn); struct prefix nhp; struct rfapi_descriptor *irfd = rfd; @@ -1142,10 +1141,9 @@ void vnc_direct_bgp_del_nve(struct bgp *bgp, struct rfapi_descriptor *rfd) &nhp)) continue; - bgp_withdraw(irfd->peer, - &rn->p, /* prefix */ - 0, /* addpath_id */ - NULL, /* attr, ignored */ + bgp_withdraw(irfd->peer, p, /* prefix */ + 0, /* addpath_id */ + NULL, /* attr, ignored */ afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, @@ -1169,6 +1167,7 @@ static void vnc_direct_add_rn_group_rd(struct bgp *bgp, struct bgp_path_info info; struct attr hattr; struct attr *iattr; + const struct prefix *p = agg_node_get_prefix(rn); if (irfd == NULL && rfg->type != RFAPI_GROUP_CFG_VRF) { /* need new rfapi_handle, for peer strcture @@ -1189,7 +1188,7 @@ static void vnc_direct_add_rn_group_rd(struct bgp *bgp, } if (rfg->label > MPLS_LABEL_MAX) { vnc_zlog_debug_verbose( - "%s: VRF \"%s\" is missing defaul label configuration.\n", + "%s: VRF \"%s\" is missing default label configuration.\n", __func__, rfg->name); return; } @@ -1242,8 +1241,8 @@ static void vnc_direct_add_rn_group_rd(struct bgp *bgp, info.peer = irfd->peer; info.attr = &hattr; - ret = route_map_apply(rfg->routemap_export_bgp, &rn->p, - RMAP_BGP, &info); + ret = route_map_apply(rfg->routemap_export_bgp, p, RMAP_BGP, + &info); if (ret == RMAP_DENYMATCH) { bgp_attr_flush(&hattr); vnc_zlog_debug_verbose( @@ -1261,9 +1260,9 @@ static void vnc_direct_add_rn_group_rd(struct bgp *bgp, iattr = bgp_attr_intern(&hattr); bgp_attr_flush(&hattr); - bgp_update(irfd->peer, &rn->p, /* prefix */ - 0, /* addpath_id */ - iattr, /* bgp_update copies it */ + bgp_update(irfd->peer, p, /* prefix */ + 0, /* addpath_id */ + iattr, /* bgp_update copies it */ afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ NULL, /* tag not used for unicast */ @@ -1317,7 +1316,7 @@ static void vnc_direct_bgp_add_group_afi(struct bgp *bgp, for (rn = agg_route_top(rt); rn; rn = agg_route_next(rn)) { if (rn->info) { - + const struct prefix *p = agg_node_get_prefix(rn); struct listnode *ln; /* @@ -1325,7 +1324,7 @@ static void vnc_direct_bgp_add_group_afi(struct bgp *bgp, */ if (rfg->plist_export_bgp[afi]) { if (prefix_list_apply( - rfg->plist_export_bgp[afi], &rn->p) + rfg->plist_export_bgp[afi], p) == PREFIX_DENY) continue; @@ -1374,9 +1373,10 @@ static void vnc_direct_del_rn_group_rd(struct bgp *bgp, { if (irfd == NULL) return; - bgp_withdraw(irfd->peer, &rn->p, /* prefix */ - 0, /* addpath_id */ - NULL, /* attr, ignored */ + + bgp_withdraw(irfd->peer, agg_node_get_prefix(rn), /* prefix */ + 0, /* addpath_id */ + NULL, /* attr, ignored */ afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ NULL, 0, NULL); /* tag not used for unicast */ @@ -1493,9 +1493,9 @@ static void vnc_direct_bgp_unexport_table(afi_t afi, struct agg_table *rt, irfd)) { bgp_withdraw(irfd->peer, - &rn->p, /* prefix */ - 0, /* addpath_id */ - NULL, /* attr, ignored */ + agg_node_get_prefix(rn), + 0, /* addpath_id */ + NULL, /* attr, ignored */ afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, @@ -1633,7 +1633,7 @@ void vnc_direct_bgp_vpn_disable(struct bgp *bgp, afi_t afi) * caller do it? */ void vnc_direct_bgp_rh_add_route(struct bgp *bgp, afi_t afi, - struct prefix *prefix, struct peer *peer, + const struct prefix *prefix, struct peer *peer, struct attr *attr) { struct vnc_export_info *eti; @@ -1732,13 +1732,14 @@ void vnc_direct_bgp_rh_add_route(struct bgp *bgp, afi_t afi, static int vncExportWithdrawTimer(struct thread *t) { struct vnc_export_info *eti = t->arg; + const struct prefix *p = agg_node_get_prefix(eti->node); /* * withdraw the route */ - bgp_withdraw(eti->peer, &eti->node->p, 0, /* addpath_id */ - NULL, /* attr, ignored */ - family2afi(eti->node->p.family), SAFI_UNICAST, eti->type, + bgp_withdraw(eti->peer, p, 0, /* addpath_id */ + NULL, /* attr, ignored */ + family2afi(p->family), SAFI_UNICAST, eti->type, eti->subtype, NULL, /* RD not used for unicast */ NULL, 0, NULL); /* tag not used for unicast, EVPN neither */ @@ -1757,7 +1758,7 @@ static int vncExportWithdrawTimer(struct thread *t) * caller do it? */ void vnc_direct_bgp_rh_del_route(struct bgp *bgp, afi_t afi, - struct prefix *prefix, struct peer *peer) + const struct prefix *prefix, struct peer *peer) { struct vnc_export_info *eti; @@ -1840,11 +1841,12 @@ void vnc_direct_bgp_rh_vpn_enable(struct bgp *bgp, afi_t afi) struct bgp_table *table; struct bgp_node *rn; struct bgp_path_info *ri; + const struct prefix *prn_p = bgp_node_get_prefix(prn); memset(&prd, 0, sizeof(prd)); prd.family = AF_UNSPEC; prd.prefixlen = 64; - memcpy(prd.val, prn->p.u.val, 8); + memcpy(prd.val, prn_p->u.val, 8); /* This is the per-RD table of prefixes */ table = bgp_node_get_bgp_table_info(prn); @@ -1853,6 +1855,7 @@ void vnc_direct_bgp_rh_vpn_enable(struct bgp *bgp, afi_t afi) continue; for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) { + const struct prefix *rn_p; /* * skip prefix list check if no routes here @@ -1860,21 +1863,17 @@ void vnc_direct_bgp_rh_vpn_enable(struct bgp *bgp, afi_t afi) if (!bgp_node_has_bgp_path_info_data(rn)) continue; - { - char prefixstr[PREFIX_STRLEN]; + vnc_zlog_debug_verbose("%s: checking prefix %pRN", + __func__, rn); - prefix2str(&rn->p, prefixstr, - sizeof(prefixstr)); - vnc_zlog_debug_verbose("%s: checking prefix %s", - __func__, prefixstr); - } + rn_p = bgp_node_get_prefix(rn); /* * prefix list check */ if (hc->plist_export_bgp[afi]) { if (prefix_list_apply(hc->plist_export_bgp[afi], - &rn->p) + rn_p) == PREFIX_DENY) { vnc_zlog_debug_verbose( @@ -1919,8 +1918,7 @@ void vnc_direct_bgp_rh_vpn_enable(struct bgp *bgp, afi_t afi) info.attr = &hattr; ret = route_map_apply( hc->routemap_export_bgp, - &rn->p, RMAP_BGP, - &info); + rn_p, RMAP_BGP, &info); if (ret == RMAP_DENYMATCH) { bgp_attr_flush(&hattr); vnc_zlog_debug_verbose( @@ -1939,7 +1937,7 @@ void vnc_direct_bgp_rh_vpn_enable(struct bgp *bgp, afi_t afi) * this route */ eti = vnc_eti_get( - bgp, EXPORT_TYPE_BGP, &rn->p, + bgp, EXPORT_TYPE_BGP, rn_p, ri->peer, ZEBRA_ROUTE_VNC_DIRECT_RH, BGP_ROUTE_REDISTRIBUTE); @@ -1960,19 +1958,19 @@ void vnc_direct_bgp_rh_vpn_enable(struct bgp *bgp, afi_t afi) "%s: calling bgp_update", __func__); - bgp_update( - ri->peer, &rn->p, /* prefix */ - 0, /* addpath_id */ - iattr, /* bgp_update copies - it */ - AFI_IP, SAFI_UNICAST, - ZEBRA_ROUTE_VNC_DIRECT_RH, - BGP_ROUTE_REDISTRIBUTE, NULL, - /* RD not used for unicast */ - NULL, - /* tag not used for unicast, - or EVPN */ - 0, 0, NULL); /* EVPN not used */ + bgp_update(ri->peer, rn_p, /* prefix */ + 0, /* addpath_id */ + iattr, /* bgp_update copies + it */ + AFI_IP, SAFI_UNICAST, + ZEBRA_ROUTE_VNC_DIRECT_RH, + BGP_ROUTE_REDISTRIBUTE, NULL, + /* RD not used for unicast */ + NULL, + /* tag not used for unicast, + or EVPN */ + 0, 0, + NULL); /* EVPN not used */ bgp_attr_unintern(&iattr); } @@ -2001,7 +1999,7 @@ void vnc_direct_bgp_rh_vpn_disable(struct bgp *bgp, afi_t afi) */ for (rn = bgp_table_top(bgp->rib[afi][SAFI_UNICAST]); rn; rn = bgp_route_next(rn)) { - + const struct prefix *rn_p = bgp_node_get_prefix(rn); struct bgp_path_info *ri; struct bgp_path_info *next; @@ -2018,7 +2016,7 @@ void vnc_direct_bgp_rh_vpn_disable(struct bgp *bgp, afi_t afi) * Delete routes immediately (no timer) */ eti = vnc_eti_checktimer( - bgp, EXPORT_TYPE_BGP, &rn->p, ri->peer, + bgp, EXPORT_TYPE_BGP, rn_p, ri->peer, ZEBRA_ROUTE_VNC_DIRECT_RH, BGP_ROUTE_REDISTRIBUTE); if (eti) { @@ -2027,8 +2025,8 @@ void vnc_direct_bgp_rh_vpn_disable(struct bgp *bgp, afi_t afi) vnc_eti_delete(eti); } - bgp_withdraw(ri->peer, &rn->p, /* prefix */ - 0, /* addpath_id */ + bgp_withdraw(ri->peer, rn_p, /* prefix */ + 0, /* addpath_id */ NULL, /* ignored */ AFI_IP, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT_RH, diff --git a/bgpd/rfapi/vnc_export_bgp_p.h b/bgpd/rfapi/vnc_export_bgp_p.h index a1cb972740..bf292abb0a 100644 --- a/bgpd/rfapi/vnc_export_bgp_p.h +++ b/bgpd/rfapi/vnc_export_bgp_p.h @@ -61,12 +61,12 @@ extern void vnc_direct_bgp_reexport_group_afi(struct bgp *bgp, extern void vnc_direct_bgp_rh_add_route(struct bgp *bgp, afi_t afi, - struct prefix *prefix, + const struct prefix *prefix, struct peer *peer, struct attr *attr); extern void vnc_direct_bgp_rh_del_route(struct bgp *bgp, afi_t afi, - struct prefix *prefix, + const struct prefix *prefix, struct peer *peer); extern void vnc_direct_bgp_reexport(struct bgp *bgp, afi_t afi); diff --git a/bgpd/rfapi/vnc_export_table.c b/bgpd/rfapi/vnc_export_table.c index 5e00a1017b..255f868bdf 100644 --- a/bgpd/rfapi/vnc_export_table.c +++ b/bgpd/rfapi/vnc_export_table.c @@ -34,7 +34,7 @@ #include "bgpd/rfapi/vnc_debug.h" struct agg_node *vnc_etn_get(struct bgp *bgp, vnc_export_type_t type, - struct prefix *p) + const struct prefix *p) { struct agg_table *t = NULL; struct agg_node *rn = NULL; @@ -66,7 +66,7 @@ struct agg_node *vnc_etn_get(struct bgp *bgp, vnc_export_type_t type, } struct agg_node *vnc_etn_lookup(struct bgp *bgp, vnc_export_type_t type, - struct prefix *p) + const struct prefix *p) { struct agg_table *t = NULL; struct agg_node *rn = NULL; @@ -98,7 +98,7 @@ struct agg_node *vnc_etn_lookup(struct bgp *bgp, vnc_export_type_t type, } struct vnc_export_info *vnc_eti_get(struct bgp *bgp, vnc_export_type_t etype, - struct prefix *p, struct peer *peer, + const struct prefix *p, struct peer *peer, uint8_t type, uint8_t subtype) { struct agg_node *etn; @@ -165,8 +165,9 @@ void vnc_eti_delete(struct vnc_export_info *goner) struct vnc_export_info *vnc_eti_checktimer(struct bgp *bgp, vnc_export_type_t etype, - struct prefix *p, struct peer *peer, - uint8_t type, uint8_t subtype) + const struct prefix *p, + struct peer *peer, uint8_t type, + uint8_t subtype) { struct agg_node *etn; struct vnc_export_info *eti; diff --git a/bgpd/rfapi/vnc_export_table.h b/bgpd/rfapi/vnc_export_table.h index fdb35e81e1..8a1fc9aaef 100644 --- a/bgpd/rfapi/vnc_export_table.h +++ b/bgpd/rfapi/vnc_export_table.h @@ -46,21 +46,21 @@ struct vnc_export_info { }; extern struct agg_node *vnc_etn_get(struct bgp *bgp, vnc_export_type_t type, - struct prefix *p); + const struct prefix *p); extern struct agg_node *vnc_etn_lookup(struct bgp *bgp, vnc_export_type_t type, - struct prefix *p); + const struct prefix *p); -extern struct vnc_export_info *vnc_eti_get(struct bgp *bgp, - vnc_export_type_t etype, - struct prefix *p, struct peer *peer, - uint8_t type, uint8_t subtype); +extern struct vnc_export_info * +vnc_eti_get(struct bgp *bgp, vnc_export_type_t etype, const struct prefix *p, + struct peer *peer, uint8_t type, uint8_t subtype); extern void vnc_eti_delete(struct vnc_export_info *goner); extern struct vnc_export_info * -vnc_eti_checktimer(struct bgp *bgp, vnc_export_type_t etype, struct prefix *p, - struct peer *peer, uint8_t type, uint8_t subtype); +vnc_eti_checktimer(struct bgp *bgp, vnc_export_type_t etype, + const struct prefix *p, struct peer *peer, uint8_t type, + uint8_t subtype); #endif /* _QUAGGA_VNC_VNC_EXPORT_TABLE_H_ */ diff --git a/bgpd/rfapi/vnc_import_bgp.c b/bgpd/rfapi/vnc_import_bgp.c index ba6ef14257..ac5beed0e3 100644 --- a/bgpd/rfapi/vnc_import_bgp.c +++ b/bgpd/rfapi/vnc_import_bgp.c @@ -104,7 +104,7 @@ uint32_t calc_local_pref(struct attr *attr, struct peer *peer) return local_pref; } -static int is_host_prefix(struct prefix *p) +static int is_host_prefix(const struct prefix *p) { switch (p->family) { case AF_INET: @@ -128,14 +128,14 @@ struct prefix_bag { static const uint8_t maskbit[] = {0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xff}; -int vnc_prefix_cmp(void *pfx1, void *pfx2) +int vnc_prefix_cmp(const void *pfx1, const void *pfx2) { int offset; int shift; uint8_t mask; - struct prefix *p1 = pfx1; - struct prefix *p2 = pfx2; + const struct prefix *p1 = pfx1; + const struct prefix *p2 = pfx2; if (p1->family < p2->family) return -1; @@ -299,9 +299,9 @@ static void vnc_rhnck(char *tag) */ static int process_unicast_route(struct bgp *bgp, /* in */ afi_t afi, /* in */ - struct prefix *prefix, /* in */ - struct bgp_path_info *info, /* in */ - struct ecommunity **ecom, /* OUT */ + const struct prefix *prefix, /* in */ + struct bgp_path_info *info, /* in */ + struct ecommunity **ecom, /* OUT */ struct prefix *unicast_nexthop) /* OUT */ { struct rfapi_cfg *hc = bgp->rfapi_cfg; @@ -415,7 +415,7 @@ static int process_unicast_route(struct bgp *bgp, /* in */ localadmin = htons(hc->resolve_nve_roo_local_admin); memcpy(vnc_gateway_magic.val + 6, (char *)&localadmin, 2); - ecommunity_add_val(*ecom, &vnc_gateway_magic); + ecommunity_add_val(*ecom, &vnc_gateway_magic, false, false); } return 0; @@ -425,10 +425,10 @@ static int process_unicast_route(struct bgp *bgp, /* in */ static void vnc_import_bgp_add_route_mode_resolve_nve_one_bi( struct bgp *bgp, afi_t afi, struct bgp_path_info *bpi, /* VPN bpi */ struct prefix_rd *prd, /* RD */ - struct prefix *prefix, /* unicast route prefix */ - uint32_t *local_pref, /* NULL = no local_pref */ - uint32_t *med, /* NULL = no med */ - struct ecommunity *ecom) /* generated ecoms */ + const struct prefix *prefix, /* unicast route prefix */ + uint32_t *local_pref, /* NULL = no local_pref */ + uint32_t *med, /* NULL = no med */ + struct ecommunity *ecom) /* generated ecoms */ { struct prefix un; struct prefix nexthop; @@ -509,11 +509,12 @@ static void vnc_import_bgp_add_route_mode_resolve_nve_one_bi( } static void vnc_import_bgp_add_route_mode_resolve_nve_one_rd( - struct prefix_rd *prd, /* RD */ + struct prefix_rd *prd, /* RD */ struct bgp_table *table_rd, /* per-rd VPN route table */ - afi_t afi, struct bgp *bgp, struct prefix *prefix, /* unicast prefix */ - struct ecommunity *ecom, /* generated ecoms */ - uint32_t *local_pref, /* NULL = no local_pref */ + afi_t afi, struct bgp *bgp, + const struct prefix *prefix, /* unicast prefix */ + struct ecommunity *ecom, /* generated ecoms */ + uint32_t *local_pref, /* NULL = no local_pref */ uint32_t *med, /* NULL = no med */ struct prefix *ubpi_nexthop) /* unicast nexthop */ { @@ -552,8 +553,8 @@ static void vnc_import_bgp_add_route_mode_resolve_nve_one_rd( } static void vnc_import_bgp_add_route_mode_resolve_nve( - struct bgp *bgp, struct prefix *prefix, /* unicast prefix */ - struct bgp_path_info *info) /* unicast info */ + struct bgp *bgp, const struct prefix *prefix, /* unicast prefix */ + struct bgp_path_info *info) /* unicast info */ { afi_t afi = family2afi(prefix->family); @@ -673,8 +674,9 @@ static void vnc_import_bgp_add_route_mode_resolve_nve( continue; vnc_import_bgp_add_route_mode_resolve_nve_one_rd( - (struct prefix_rd *)&bnp->p, table, afi, bgp, prefix, - ecom, &local_pref, med, &pfx_unicast_nexthop); + (struct prefix_rd *)bgp_node_get_prefix(bnp), table, + afi, bgp, prefix, ecom, &local_pref, med, + &pfx_unicast_nexthop); } @@ -686,7 +688,7 @@ static void vnc_import_bgp_add_route_mode_resolve_nve( static void vnc_import_bgp_add_route_mode_plain(struct bgp *bgp, - struct prefix *prefix, + const struct prefix *prefix, struct bgp_path_info *info) { afi_t afi = family2afi(prefix->family); @@ -874,10 +876,9 @@ static void vnc_import_bgp_add_route_mode_plain(struct bgp *bgp, ecommunity_free(&ecom); } -static void -vnc_import_bgp_add_route_mode_nvegroup(struct bgp *bgp, struct prefix *prefix, - struct bgp_path_info *info, - struct rfapi_nve_group_cfg *rfg) +static void vnc_import_bgp_add_route_mode_nvegroup( + struct bgp *bgp, const struct prefix *prefix, + struct bgp_path_info *info, struct rfapi_nve_group_cfg *rfg) { afi_t afi = family2afi(prefix->family); struct peer *peer = info->peer; @@ -1080,7 +1081,7 @@ vnc_import_bgp_add_route_mode_nvegroup(struct bgp *bgp, struct prefix *prefix, } static void vnc_import_bgp_del_route_mode_plain(struct bgp *bgp, - struct prefix *prefix, + const struct prefix *prefix, struct bgp_path_info *info) { struct prefix_rd prd; @@ -1153,7 +1154,7 @@ static void vnc_import_bgp_del_route_mode_plain(struct bgp *bgp, } static void vnc_import_bgp_del_route_mode_nvegroup(struct bgp *bgp, - struct prefix *prefix, + const struct prefix *prefix, struct bgp_path_info *info) { struct prefix_rd prd; @@ -1236,7 +1237,7 @@ static void vnc_import_bgp_del_route_mode_nvegroup(struct bgp *bgp, static void vnc_import_bgp_del_route_mode_resolve_nve_one_bi( struct bgp *bgp, afi_t afi, struct bgp_path_info *bpi, /* VPN bpi */ struct prefix_rd *prd, /* RD */ - struct prefix *prefix) /* unicast route prefix */ + const struct prefix *prefix) /* unicast route prefix */ { struct prefix un; @@ -1272,8 +1273,9 @@ static void vnc_import_bgp_del_route_mode_resolve_nve_one_bi( static void vnc_import_bgp_del_route_mode_resolve_nve_one_rd( struct prefix_rd *prd, struct bgp_table *table_rd, /* per-rd VPN route table */ - afi_t afi, struct bgp *bgp, struct prefix *prefix, /* unicast prefix */ - struct prefix *ubpi_nexthop) /* unicast bpi's nexthop */ + afi_t afi, struct bgp *bgp, + const struct prefix *prefix, /* unicast prefix */ + const struct prefix *ubpi_nexthop) /* unicast bpi's nexthop */ { struct bgp_node *bn; struct bgp_path_info *bpi; @@ -1312,7 +1314,7 @@ static void vnc_import_bgp_del_route_mode_resolve_nve_one_rd( static void vnc_import_bgp_del_route_mode_resolve_nve(struct bgp *bgp, afi_t afi, - struct prefix *prefix, + const struct prefix *prefix, struct bgp_path_info *info) { struct ecommunity *ecom = NULL; @@ -1376,8 +1378,8 @@ vnc_import_bgp_del_route_mode_resolve_nve(struct bgp *bgp, afi_t afi, continue; vnc_import_bgp_del_route_mode_resolve_nve_one_rd( - (struct prefix_rd *)&bnp->p, table, afi, bgp, prefix, - &pfx_unicast_nexthop); /* TBD how is this set? */ + (struct prefix_rd *)bgp_node_get_prefix(bnp), table, + afi, bgp, prefix, &pfx_unicast_nexthop); } if (ecom) @@ -1396,7 +1398,7 @@ vnc_import_bgp_del_route_mode_resolve_nve(struct bgp *bgp, afi_t afi, void vnc_import_bgp_add_vnc_host_route_mode_resolve_nve( struct bgp *bgp, struct prefix_rd *prd, /* RD */ struct bgp_table *table_rd, /* per-rd VPN route table */ - struct prefix *prefix, /* VPN prefix */ + const struct prefix *prefix, /* VPN prefix */ struct bgp_path_info *bpi) /* new VPN host route */ { afi_t afi = family2afi(prefix->family); @@ -1533,7 +1535,7 @@ void vnc_import_bgp_add_vnc_host_route_mode_resolve_nve( void vnc_import_bgp_del_vnc_host_route_mode_resolve_nve( struct bgp *bgp, struct prefix_rd *prd, /* RD */ struct bgp_table *table_rd, /* per-rd VPN route table */ - struct prefix *prefix, /* VPN prefix */ + const struct prefix *prefix, /* VPN prefix */ struct bgp_path_info *bpi) /* old VPN host route */ { afi_t afi = family2afi(prefix->family); @@ -1675,8 +1677,8 @@ static int is_usable_interior_route(struct bgp_path_info *bpi_interior) */ static void vnc_import_bgp_exterior_add_route_it( struct bgp *bgp, /* exterior instance, we hope */ - struct prefix *prefix, /* unicast prefix */ - struct bgp_path_info *info, /* unicast info */ + const struct prefix *prefix, /* unicast prefix */ + struct bgp_path_info *info, /* unicast info */ struct rfapi_import_table *it_only) /* NULL, or limit to this IT */ { struct rfapi *h; @@ -1844,9 +1846,9 @@ static void vnc_import_bgp_exterior_add_route_it( } void vnc_import_bgp_exterior_add_route( - struct bgp *bgp, /* exterior instance, we hope */ - struct prefix *prefix, /* unicast prefix */ - struct bgp_path_info *info) /* unicast info */ + struct bgp *bgp, /* exterior instance, we hope */ + const struct prefix *prefix, /* unicast prefix */ + struct bgp_path_info *info) /* unicast info */ { vnc_import_bgp_exterior_add_route_it(bgp, prefix, info, NULL); } @@ -1861,8 +1863,8 @@ void vnc_import_bgp_exterior_add_route( * right routes. */ void vnc_import_bgp_exterior_del_route( - struct bgp *bgp, struct prefix *prefix, /* unicast prefix */ - struct bgp_path_info *info) /* unicast info */ + struct bgp *bgp, const struct prefix *prefix, /* unicast prefix */ + struct bgp_path_info *info) /* unicast info */ { struct rfapi *h; struct rfapi_cfg *hc; @@ -2027,7 +2029,8 @@ void vnc_import_bgp_exterior_add_route_interior( struct agg_node *rn_interior, /* VPN IT node */ struct bgp_path_info *bpi_interior) /* VPN IT route */ { - afi_t afi = family2afi(rn_interior->p.family); + const struct prefix *p = agg_node_get_prefix(rn_interior); + afi_t afi = family2afi(p->family); struct agg_node *par; struct bgp_path_info *bpi_exterior; struct prefix *pfx_exterior; /* exterior pfx */ @@ -2057,13 +2060,8 @@ void vnc_import_bgp_exterior_add_route_interior( } /*debugging */ - { - char str_pfx[PREFIX_STRLEN]; - - prefix2str(&rn_interior->p, str_pfx, sizeof(str_pfx)); - vnc_zlog_debug_verbose("%s: interior prefix=%s, bpi type=%d", - __func__, str_pfx, bpi_interior->type); - } + vnc_zlog_debug_verbose("%s: interior prefix=%pRN, bpi type=%d", + __func__, rn_interior, bpi_interior->type); if (RFAPI_HAS_MONITOR_EXTERIOR(rn_interior)) { @@ -2178,7 +2176,7 @@ void vnc_import_bgp_exterior_add_route_interior( rfapiUnicastNexthop2Prefix(afi, bpi_exterior->attr, &pfx_nexthop); - if (prefix_match(&rn_interior->p, &pfx_nexthop)) { + if (prefix_match(p, &pfx_nexthop)) { struct bgp_path_info *bpi; struct prefix_rd *prd; @@ -2321,7 +2319,7 @@ void vnc_import_bgp_exterior_add_route_interior( rfapiUnicastNexthop2Prefix(afi, bpi_exterior->attr, &pfx_nexthop); - if (prefix_match(&rn_interior->p, &pfx_nexthop)) { + if (prefix_match(p, &pfx_nexthop)) { struct prefix_rd *prd; struct attr new_attr; @@ -2409,7 +2407,8 @@ void vnc_import_bgp_exterior_del_route_interior( struct agg_node *rn_interior, /* VPN IT node */ struct bgp_path_info *bpi_interior) /* VPN IT route */ { - afi_t afi = family2afi(rn_interior->p.family); + const struct prefix *p = agg_node_get_prefix(rn_interior); + afi_t afi = family2afi(p->family); struct agg_node *par; struct bgp_path_info *bpi_exterior; struct prefix *pfx_exterior; /* exterior pfx */ @@ -2443,14 +2442,8 @@ void vnc_import_bgp_exterior_del_route_interior( } /*debugging */ - { - char str_pfx[PREFIX_STRLEN]; - - prefix2str(&rn_interior->p, str_pfx, sizeof(str_pfx)); - - vnc_zlog_debug_verbose("%s: interior prefix=%s, bpi type=%d", - __func__, str_pfx, bpi_interior->type); - } + vnc_zlog_debug_verbose("%s: interior prefix=%pRN, bpi type=%d", + __func__, rn_interior, bpi_interior->type); /* * Remove constructed routes based on the deleted interior route @@ -2597,7 +2590,7 @@ void vnc_import_bgp_exterior_del_route_interior( * Generic add/delete unicast routes ***********************************************************************/ -void vnc_import_bgp_add_route(struct bgp *bgp, struct prefix *prefix, +void vnc_import_bgp_add_route(struct bgp *bgp, const struct prefix *prefix, struct bgp_path_info *info) { afi_t afi = family2afi(prefix->family); @@ -2666,7 +2659,7 @@ void vnc_import_bgp_add_route(struct bgp *bgp, struct prefix *prefix, /* * "Withdrawing a Route" import process */ -void vnc_import_bgp_del_route(struct bgp *bgp, struct prefix *prefix, +void vnc_import_bgp_del_route(struct bgp *bgp, const struct prefix *prefix, struct bgp_path_info *info) /* unicast info */ { afi_t afi = family2afi(prefix->family); @@ -2762,7 +2755,8 @@ void vnc_import_bgp_redist_enable(struct bgp *bgp, afi_t afi) if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)) continue; - vnc_import_bgp_add_route(bgp, &rn->p, bpi); + vnc_import_bgp_add_route(bgp, bgp_node_get_prefix(rn), + bpi); } } vnc_zlog_debug_verbose( @@ -2803,8 +2797,8 @@ void vnc_import_bgp_exterior_redist_enable(struct bgp *bgp, afi_t afi) if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)) continue; - vnc_import_bgp_exterior_add_route(bgp_exterior, &rn->p, - bpi); + vnc_import_bgp_exterior_add_route( + bgp_exterior, bgp_node_get_prefix(rn), bpi); } } vnc_zlog_debug_verbose( @@ -2850,7 +2844,8 @@ void vnc_import_bgp_exterior_redist_enable_it( continue; vnc_import_bgp_exterior_add_route_it( - bgp_exterior, &rn->p, bpi, it_only); + bgp_exterior, bgp_node_get_prefix(rn), bpi, + it_only); } } } @@ -2880,58 +2875,49 @@ void vnc_import_bgp_redist_disable(struct bgp *bgp, afi_t afi) */ for (rn1 = bgp_table_top(bgp->rib[afi][SAFI_MPLS_VPN]); rn1; rn1 = bgp_route_next(rn1)) { + const struct prefix *rn1_p; - if (bgp_node_has_bgp_path_info_data(rn1)) { - - for (rn2 = bgp_table_top( - bgp_node_get_bgp_table_info(rn1)); - rn2; rn2 = bgp_route_next(rn2)) { - - struct bgp_path_info *bpi; - struct bgp_path_info *nextbpi; - - for (bpi = bgp_node_get_bgp_path_info(rn2); bpi; - bpi = nextbpi) { - - nextbpi = bpi->next; + if (!bgp_node_has_bgp_path_info_data(rn1)) + continue; - if (bpi->type - == ZEBRA_ROUTE_BGP_DIRECT) { + rn1_p = bgp_node_get_prefix(rn1); + for (rn2 = bgp_table_top(bgp_node_get_bgp_table_info(rn1)); rn2; + rn2 = bgp_route_next(rn2)) { + const struct prefix *rn2_p = bgp_node_get_prefix(rn2); + struct bgp_path_info *bpi; + struct bgp_path_info *nextbpi; - struct rfapi_descriptor *rfd; - vncHDBgpDirect.peer = bpi->peer; + for (bpi = bgp_node_get_bgp_path_info(rn2); bpi; + bpi = nextbpi) { - assert(bpi->extra); + nextbpi = bpi->next; - rfd = bpi->extra->vnc.export - .rfapi_handle; + if (bpi->type != ZEBRA_ROUTE_BGP_DIRECT) + continue; - vnc_zlog_debug_verbose( - "%s: deleting bpi=%p, bpi->peer=%p, bpi->type=%d, bpi->sub_type=%d, bpi->extra->vnc.export.rfapi_handle=%p [passing rfd=%p]", - __func__, bpi, - bpi->peer, bpi->type, - bpi->sub_type, - (bpi->extra - ? bpi->extra - ->vnc - .export - .rfapi_handle - : NULL), - rfd); + struct rfapi_descriptor *rfd; + vncHDBgpDirect.peer = bpi->peer; + assert(bpi->extra); - del_vnc_route( - rfd, bpi->peer, bgp, - SAFI_MPLS_VPN, &rn2->p, - (struct prefix_rd *)&rn1 - ->p, - bpi->type, - bpi->sub_type, NULL, - 1); /* kill */ + rfd = bpi->extra->vnc.export.rfapi_handle; - vncHDBgpDirect.peer = NULL; - } - } + vnc_zlog_debug_verbose( + "%s: deleting bpi=%p, bpi->peer=%p, bpi->type=%d, bpi->sub_type=%d, bpi->extra->vnc.export.rfapi_handle=%p [passing rfd=%p]", + __func__, bpi, bpi->peer, bpi->type, + bpi->sub_type, + (bpi->extra ? bpi->extra->vnc.export + .rfapi_handle + : NULL), + rfd); + + del_vnc_route(rfd, bpi->peer, bgp, + SAFI_MPLS_VPN, rn2_p, + (struct prefix_rd *)rn1_p, + bpi->type, bpi->sub_type, NULL, + 1); /* kill */ + + vncHDBgpDirect.peer = NULL; } } } @@ -2987,8 +2973,9 @@ void vnc_import_bgp_exterior_redist_disable(struct bgp *bgp, afi_t afi) if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)) continue; - vnc_import_bgp_exterior_del_route(bgp_exterior, - &rn->p, bpi); + vnc_import_bgp_exterior_del_route( + bgp_exterior, bgp_node_get_prefix(rn), + bpi); } } #if DEBUG_RHN_LIST diff --git a/bgpd/rfapi/vnc_import_bgp.h b/bgpd/rfapi/vnc_import_bgp.h index 3db6f4010a..ab2ec1a748 100644 --- a/bgpd/rfapi/vnc_import_bgp.h +++ b/bgpd/rfapi/vnc_import_bgp.h @@ -32,12 +32,14 @@ extern uint32_t calc_local_pref(struct attr *attr, struct peer *peer); -extern int vnc_prefix_cmp(void *pfx1, void *pfx2); +extern int vnc_prefix_cmp(const void *pfx1, const void *pfx2); -extern void vnc_import_bgp_add_route(struct bgp *bgp, struct prefix *prefix, +extern void vnc_import_bgp_add_route(struct bgp *bgp, + const struct prefix *prefix, struct bgp_path_info *info); -extern void vnc_import_bgp_del_route(struct bgp *bgp, struct prefix *prefix, +extern void vnc_import_bgp_del_route(struct bgp *bgp, + const struct prefix *prefix, struct bgp_path_info *info); extern void vnc_import_bgp_redist_enable(struct bgp *bgp, afi_t afi); @@ -51,23 +53,23 @@ extern void vnc_import_bgp_exterior_redist_disable(struct bgp *bgp, afi_t afi); extern void vnc_import_bgp_exterior_add_route( struct bgp *bgp, /* exterior instance, we hope */ - struct prefix *prefix, /* unicast prefix */ + const struct prefix *prefix, /* unicast prefix */ struct bgp_path_info *info); /* unicast info */ extern void vnc_import_bgp_exterior_del_route( - struct bgp *bgp, struct prefix *prefix, /* unicast prefix */ - struct bgp_path_info *info); /* unicast info */ + struct bgp *bgp, const struct prefix *prefix, /* unicast prefix */ + struct bgp_path_info *info); /* unicast info */ extern void vnc_import_bgp_add_vnc_host_route_mode_resolve_nve( struct bgp *bgp, struct prefix_rd *prd, /* RD */ struct bgp_table *table_rd, /* per-rd VPN route table */ - struct prefix *prefix, /* VPN prefix */ + const struct prefix *prefix, /* VPN prefix */ struct bgp_path_info *bpi); /* new VPN host route */ extern void vnc_import_bgp_del_vnc_host_route_mode_resolve_nve( struct bgp *bgp, struct prefix_rd *prd, /* RD */ struct bgp_table *table_rd, /* per-rd VPN route table */ - struct prefix *prefix, /* VPN prefix */ + const struct prefix *prefix, /* VPN prefix */ struct bgp_path_info *bpi); /* old VPN host route */ #endif /* _QUAGGA_RFAPI_VNC_IMPORT_BGP_H_ */ diff --git a/bgpd/rfapi/vnc_zebra.c b/bgpd/rfapi/vnc_zebra.c index 80a590f56a..686dc394a7 100644 --- a/bgpd/rfapi/vnc_zebra.c +++ b/bgpd/rfapi/vnc_zebra.c @@ -304,10 +304,12 @@ static void vnc_redistribute_withdraw(struct bgp *bgp, afi_t afi, uint8_t type) */ for (prn = bgp_table_top(bgp->rib[afi][SAFI_MPLS_VPN]); prn; prn = bgp_route_next(prn)) { + const struct prefix *prn_p = bgp_node_get_prefix(prn); + memset(&prd, 0, sizeof(prd)); prd.family = AF_UNSPEC; prd.prefixlen = 64; - memcpy(prd.val, prn->p.u.val, 8); + memcpy(prd.val, prn_p->u.val, 8); /* This is the per-RD table of prefixes */ table = bgp_node_get_bgp_table_info(prn); @@ -329,7 +331,7 @@ static void vnc_redistribute_withdraw(struct bgp *bgp, afi_t afi, uint8_t type) del_vnc_route( &vncHD1VR, /* use dummy ptr as cookie */ vncHD1VR.peer, bgp, SAFI_MPLS_VPN, - &(rn->p), &prd, type, + bgp_node_get_prefix(rn), &prd, type, BGP_ROUTE_REDISTRIBUTE, NULL, 0); } } @@ -380,7 +382,7 @@ static int vnc_zebra_read_route(ZAPI_CALLBACK_ARGS) /* * low-level message builder */ -static void vnc_zebra_route_msg(struct prefix *p, unsigned int nhp_count, +static void vnc_zebra_route_msg(const struct prefix *p, unsigned int nhp_count, void *nhp_ary, int add) /* 1 = add, 0 = del */ { struct zapi_route api; @@ -560,7 +562,7 @@ static void vnc_zebra_add_del_prefix(struct bgp *bgp, int add) /* !0 = add, 0 = del */ { struct list *nves; - + const struct prefix *p = agg_node_get_prefix(rn); unsigned int nexthop_count = 0; void *nh_ary = NULL; void *nhp_ary = NULL; @@ -570,15 +572,15 @@ static void vnc_zebra_add_del_prefix(struct bgp *bgp, if (zclient_vnc->sock < 0) return; - if (rn->p.family != AF_INET && rn->p.family != AF_INET6) { + if (p->family != AF_INET && p->family != AF_INET6) { flog_err(EC_LIB_DEVELOPMENT, "%s: invalid route node addr family", __func__); return; } - if (!vrf_bitmap_check(zclient_vnc->redist[family2afi(rn->p.family)] - [ZEBRA_ROUTE_VNC], - VRF_DEFAULT)) + if (!vrf_bitmap_check( + zclient_vnc->redist[family2afi(p->family)][ZEBRA_ROUTE_VNC], + VRF_DEFAULT)) return; if (!bgp->rfapi_cfg) { @@ -592,17 +594,16 @@ static void vnc_zebra_add_del_prefix(struct bgp *bgp, return; } - import_table_to_nve_list_zebra(bgp, import_table, &nves, rn->p.family); + import_table_to_nve_list_zebra(bgp, import_table, &nves, p->family); if (nves) { - nve_list_to_nh_array(rn->p.family, nves, &nexthop_count, - &nh_ary, &nhp_ary); + nve_list_to_nh_array(p->family, nves, &nexthop_count, &nh_ary, + &nhp_ary); list_delete(&nves); if (nexthop_count) - vnc_zebra_route_msg(&rn->p, nexthop_count, nhp_ary, - add); + vnc_zebra_route_msg(p, nexthop_count, nhp_ary, add); } XFREE(MTYPE_TMP, nhp_ary); @@ -695,15 +696,14 @@ static void vnc_zebra_add_del_nve(struct bgp *bgp, struct rfapi_descriptor *rfd, */ for (rn = agg_route_top(rt); rn; rn = agg_route_next(rn)) { - - if (rn->info) { - - vnc_zlog_debug_verbose( - "%s: sending %s", __func__, - (add ? "add" : "del")); - vnc_zebra_route_msg(&rn->p, 1, &pAddr, - add); - } + if (!rn->info) + continue; + + vnc_zlog_debug_verbose("%s: sending %s", + __func__, + (add ? "add" : "del")); + vnc_zebra_route_msg(agg_node_get_prefix(rn), 1, + &pAddr, add); } } } @@ -778,9 +778,9 @@ static void vnc_zebra_add_del_group_afi(struct bgp *bgp, for (rn = agg_route_top(rt); rn; rn = agg_route_next(rn)) { if (rn->info) { - vnc_zebra_route_msg(&rn->p, - nexthop_count, - nhp_ary, add); + vnc_zebra_route_msg( + agg_node_get_prefix(rn), + nexthop_count, nhp_ary, add); } } } diff --git a/configure.ac b/configure.ac index fa332b7dab..35261aced4 100755 --- a/configure.ac +++ b/configure.ac @@ -122,7 +122,7 @@ AC_ARG_ENABLE([pkgsrcrcdir], pkgsrcrcdir="$enableval",) dnl XXX add --pkgsrcrcdir to autoconf standard directory list somehow AC_SUBST([pkgsrcrcdir]) -AM_CONDITIONAL([PKGSRC], [test "x$pkgsrcrcdir" != "x"]) +AM_CONDITIONAL([PKGSRC], [test "$pkgsrcrcdir" != ""]) AC_ARG_WITH([moduledir], [AS_HELP_STRING([--with-moduledir=DIR], [module directory (${libdir}/frr/modules)])], [ moduledir="$withval" @@ -182,13 +182,13 @@ dnl - specifically, options to control warnings AC_USE_SYSTEM_EXTENSIONS AC_DEFUN([AC_C_FLAG], [{ - m4_pushdef([cachename],[m4_translit([frr_cv_$1],[ =-+],[____])]) + m4_pushdef([cachename],[m4_translit([frr_cv_$1],[ =-+/{}$],[________])]) AC_CACHE_CHECK([[whether $CC supports $1]], cachename, [ AC_LANG_PUSH([C]) ac_c_flag_save="$CFLAGS" CFLAGS="$CFLAGS $1" AC_COMPILE_IFELSE( - [AC_LANG_PROGRAM([[]])], + [AC_LANG_PROGRAM([[$4]])], [ cachename=yes ], [ @@ -197,7 +197,7 @@ AC_DEFUN([AC_C_FLAG], [{ CFLAGS="$ac_c_flag_save" AC_LANG_POP([C]) ]) - if test "${cachename}" = yes; then + if test "$cachename" = "yes"; then m4_if([$3], [], [CFLAGS="$CFLAGS $1"], [$3]) else : @@ -242,8 +242,8 @@ CC="${CC% -std=c99}" AC_C_FLAG([-std=gnu11], [CC="$ac_cc"], [CC="$CC -std=gnu11"]) dnl if the user has specified any CFLAGS, override our settings -if test "x${enable_gcov}" = "xyes"; then - if test "z$orig_cflags" = "z"; then +if test "$enable_gcov" = "yes"; then + if test "$orig_cflags" = ""; then AC_C_FLAG([-coverage]) AC_C_FLAG([-O0]) fi @@ -251,7 +251,7 @@ if test "x${enable_gcov}" = "xyes"; then LDFLAGS="${LDFLAGS} -lgcov" fi -if test "x${enable_clang_coverage}" = "xyes"; then +if test "$enable_clang_coverage" = "yes"; then AC_C_FLAG([-fprofile-instr-generate], [ AC_MSG_ERROR([$CC does not support -fprofile-instr-generate.]) ]) @@ -260,13 +260,13 @@ if test "x${enable_clang_coverage}" = "xyes"; then ]) fi -if test "x${enable_dev_build}" = "xyes"; then +if test "$enable_dev_build" = "yes"; then AC_DEFINE([DEV_BUILD], [1], [Build for development]) - if test "z$orig_cflags" = "z"; then + if test "$orig_cflags" = ""; then AC_C_FLAG([-g3]) AC_C_FLAG([-O0]) fi - if test "x${enable_lua}" = "xyes"; then + if test "$enable_lua" = "yes"; then AX_PROG_LUA([5.3]) AX_LUA_HEADERS AX_LUA_LIBS([ @@ -275,15 +275,15 @@ if test "x${enable_dev_build}" = "xyes"; then ]) fi else - if test "x${enable_lua}" = "xyes"; then + if test "$enable_lua" = "yes"; then AC_MSG_ERROR([Lua is not meant to be built/used outside of development at this time]) fi - if test "z$orig_cflags" = "z"; then + if test "$orig_cflags" = ""; then AC_C_FLAG([-g]) AC_C_FLAG([-O2]) fi fi -AM_CONDITIONAL([DEV_BUILD], [test "x$enable_dev_build" = "xyes"]) +AM_CONDITIONAL([DEV_BUILD], [test "$enable_dev_build" = "yes"]) dnl always want these CFLAGS AC_C_FLAG([-fno-omit-frame-pointer]) @@ -295,7 +295,8 @@ AC_C_FLAG([-Wmissing-declarations]) AC_C_FLAG([-Wpointer-arith]) AC_C_FLAG([-Wbad-function-cast]) AC_C_FLAG([-Wwrite-strings]) -if test x"${enable_gcc_ultra_verbose}" = x"yes" ; then +AC_C_FLAG([-Wundef]) +if test "$enable_gcc_ultra_verbose" = "yes" ; then AC_C_FLAG([-Wcast-qual]) AC_C_FLAG([-Wstrict-prototypes]) AC_C_FLAG([-Wmissing-noreturn]) @@ -318,7 +319,7 @@ dnl for some reason the string consts get 'promoted' to char *, dnl triggering a const to non-const conversion warning. AC_C_FLAG([-diag-disable 3179]) -if test x"${enable_werror}" = x"yes" ; then +if test "$enable_werror" = "yes" ; then WERROR="-Werror" fi AC_SUBST([WERROR]) @@ -354,6 +355,44 @@ if test "$enable_undefined_sanitizer" = "yes"; then fi AC_SUBST([SAN_FLAGS]) +dnl frr-format.so +if test "$with_frr_format" != "no" -a "$with_frr_format" != "yes" -a -n "$with_frr_format"; then + AC_C_FLAG([-fplugin=${with_frr_format}], [ + AC_MSG_ERROR([specified frr-format plugin ($with_frr_format) does not work]) + ],,[ +#ifndef _FRR_ATTRIBUTE_PRINTFRR +#error plugin not loaded +#endif +#if _FRR_ATTRIBUTE_PRINTFRR < 0x10000 +#error plugin too old +#endif + ]) +elif test "$with_frr_format" = "no"; then + : #nothing +else + AC_C_FLAG([-fplugin=tools/gcc-plugins/frr-format.so],[ + AC_C_FLAG([-fplugin=frr-format],[ + if test "$with_frr_format" = "yes"; then + AC_MSG_ERROR([frr-format plugin requested but not found]) + fi + ],,[ +#ifndef _FRR_ATTRIBUTE_PRINTFRR +#error plugin not loaded +#endif +#if _FRR_ATTRIBUTE_PRINTFRR < 0x10000 +#error plugin too old +#endif + ]) + ],,[ +#ifndef _FRR_ATTRIBUTE_PRINTFRR +#error plugin not loaded +#endif +#if _FRR_ATTRIBUTE_PRINTFRR < 0x10000 +#error plugin too old +#endif + ]) +fi + dnl ---------- dnl Essentials dnl ---------- @@ -369,7 +408,7 @@ AX_PTHREAD([ AC_SEARCH_LIBS([pthread_condattr_setclock], [], [frr_cv_pthread_condattr_setclock=yes], [frr_cv_pthread_condattr_setclock=no]) -if test "$frr_cv_pthread_condattr_setclock" = yes; then +if test "$frr_cv_pthread_condattr_setclock" = "yes"; then AC_DEFINE([HAVE_PTHREAD_CONDATTR_SETCLOCK], [1], [Have pthread.h pthread_condattr_setclock]) fi @@ -400,7 +439,7 @@ if test "$enable_shared" != "yes"; then AC_MSG_ERROR([FRR cannot be built with --disable-shared. If you want statically linked daemons, use --enable-shared --enable-static --enable-static-bin]) fi AC_SUBST([AC_LDFLAGS]) -AM_CONDITIONAL([STATIC_BIN], [test "x$enable_static_bin" = "xyes"]) +AM_CONDITIONAL([STATIC_BIN], [test "$enable_static_bin" = "yes"]) dnl $AR and $RANLIB are set by LT_INIT above AC_MSG_CHECKING([whether $AR supports D option]) @@ -451,7 +490,7 @@ AC_ARG_WITH([pkg-extra-version], ], []) AC_ARG_WITH([pkg-git-version], AS_HELP_STRING([--with-pkg-git-version], [add git information to MOTD and build version string]), - [ test "x$withval" != "xno" && with_pkg_git_version="yes" ]) + [ test "$withval" != "no" && with_pkg_git_version="yes" ]) AC_ARG_WITH([clippy], AS_HELP_STRING([--with-clippy=PATH], [use external clippy helper program])) AC_ARG_WITH([vtysh_pager], @@ -600,25 +639,27 @@ AC_ARG_ENABLE([undefined-sanitizer], AS_HELP_STRING([--undefined-sanitizer], [enable UndefinedBehaviorSanitizer support for detecting undefined behavior])) AC_ARG_WITH([crypto], AS_HELP_STRING([--with-crypto=<internal|openssl>], [choose between different implementations of cryptographic functions(default value is --with-crypto=internal)])) +AC_ARG_WITH([frr-format], + AS_HELP_STRING([--with-frr-format[=<.../frr-format.so>]], [use frr-format GCC plugin])) #if openssl, else use the internal -AS_IF([test x"${with_crypto}" = x"openssl"], [ +AS_IF([test "$with_crypto" = "openssl"], [ AC_CHECK_LIB([crypto], [EVP_DigestInit], [LIBS="$LIBS -lcrypto"], [], []) -if test $ac_cv_lib_crypto_EVP_DigestInit = no; then +if test "$ac_cv_lib_crypto_EVP_DigestInit" = "no"; then AC_MSG_ERROR([build with openssl has been specified but openssl library was not found on your system]) else AC_DEFINE([CRYPTO_OPENSSL], [1], [Compile with openssl support]) fi -], [test x"${with_crypto}" = x"internal" || test x"${with_crypto}" = x"" ], [AC_DEFINE([CRYPTO_INTERNAL], [1], [Compile with internal cryptographic implementation]) +], [test "$with_crypto" = "internal" || test "$with_crypto" = "" ], [AC_DEFINE([CRYPTO_INTERNAL], [1], [Compile with internal cryptographic implementation]) ], [AC_MSG_ERROR([Unknown value for --with-crypto])] ) -AS_IF([test "${enable_clippy_only}" != "yes"], [ +AS_IF([test "$enable_clippy_only" != "yes"], [ AC_CHECK_HEADERS([json-c/json.h]) AC_CHECK_LIB([json-c], [json_object_get], [LIBS="$LIBS -ljson-c"], [], [-lm]) -if test "$ac_cv_lib_json_c_json_object_get" = no; then +if test "$ac_cv_lib_json_c_json_object_get" = "no"; then AC_CHECK_LIB([json], [json_object_get], [LIBS="$LIBS -ljson"]) - if test "$ac_cv_lib_json_json_object_get" = no; then + if test "$ac_cv_lib_json_json_object_get" = "no"; then AC_MSG_ERROR([libjson is needed to compile]) fi fi @@ -630,8 +671,8 @@ AC_ARG_ENABLE([dev_build], AC_ARG_ENABLE([lua], AS_HELP_STRING([--enable-lua], [Build Lua scripting])) -if test x"${enable_time_check}" != x"no" ; then - if test x"${enable_time_check}" = x"yes" -o x"${enable_time_check}" = x ; then +if test "$enable_time_check" != "no" ; then + if test "$enable_time_check" = "yes" -o "$enable_time_check" = "" ; then AC_DEFINE([CONSUMED_TIME_CHECK], [5000000], [Consumed Time Check]) else AC_DEFINE_UNQUOTED([CONSUMED_TIME_CHECK], [$enable_time_check], [Consumed Time Check]) @@ -650,7 +691,7 @@ case "${enable_systemd}" in "no") ;; "yes") AC_CHECK_LIB([systemd], [sd_notify], [LIBS="$LIBS -lsystemd"]) - if test $ac_cv_lib_systemd_sd_notify = no; then + if test "$ac_cv_lib_systemd_sd_notify" = "no"; then AC_MSG_ERROR([enable systemd has been specified but systemd development env not found on your system]) else AC_DEFINE([HAVE_SYSTEMD], [1], [Compile systemd support in]) @@ -659,11 +700,11 @@ case "${enable_systemd}" in "*") ;; esac -if test "${enable_rr_semantics}" != "no" ; then +if test "$enable_rr_semantics" != "no" ; then AC_DEFINE([HAVE_V6_RR_SEMANTICS], [1], [Compile in v6 Route Replacement Semantics]) fi -if test "${enable_datacenter}" = "yes" ; then +if test "$enable_datacenter" = "yes" ; then AC_DEFINE([HAVE_DATACENTER], [1], [Compile extensions for a DataCenter]) AC_MSG_WARN([The --enable-datacenter compile time option is deprecated. Please modify the init script to pass -F datacenter to the daemons instead.]) DFLT_NAME="datacenter" @@ -671,22 +712,22 @@ else DFLT_NAME="traditional" fi -if test "${enable_fuzzing}" = "yes" ; then +if test "$enable_fuzzing" = "yes" ; then AC_DEFINE([HANDLE_ZAPI_FUZZING], [1], [Compile extensions to use with a fuzzer]) fi -if test "${enable_netlink_fuzzing}" = "yes" ; then +if test "$enable_netlink_fuzzing" = "yes" ; then AC_DEFINE([HANDLE_NETLINK_FUZZING], [1], [Compile extensions to use with a fuzzer for netlink]) fi -if test "${enable_cumulus}" = "yes" ; then +if test "$enable_cumulus" = "yes" ; then AC_DEFINE([HAVE_CUMULUS], [1], [Compile Special Cumulus Code in]) fi AC_SUBST([DFLT_NAME]) AC_DEFINE_UNQUOTED([DFLT_NAME], ["$DFLT_NAME"], [Name of the configuration default set]) -if test "${enable_shell_access}" = "yes"; then +if test "$enable_shell_access" = "yes"; then AC_DEFINE([HAVE_SHELL_ACCESS], [1], [Allow user to use ssh/telnet/bash, be aware this is considered insecure]) fi @@ -702,15 +743,15 @@ AS_IF([test "$host" = "$build"], [ FRR_PYTHON_MODULES([pytest]) -if test "${enable_doc}" != "no"; then +if test "$enable_doc" != "no"; then FRR_PYTHON_MODULES([sphinx], , [ - if test "${enable_doc}" = "yes"; then + if test "$enable_doc" = "yes"; then AC_MSG_ERROR([Documentation was explicitly requested with --enable-doc but sphinx is not available for $PYTHON. Please disable docs or install sphinx.]) fi ]) fi -AM_CONDITIONAL([DOC], [test "${enable_doc}" != "no" -a "$frr_py_mod_sphinx" != "false"]) -AM_CONDITIONAL([DOC_HTML], [test "${enable_doc_html}" = "yes"]) +AM_CONDITIONAL([DOC], [test "$enable_doc" != "no" -a "$frr_py_mod_sphinx" != "false"]) +AM_CONDITIONAL([DOC_HTML], [test "$enable_doc_html" = "yes"]) FRR_PYTHON_MOD_EXEC([sphinx], [--version], [ PYSPHINX="-m sphinx" @@ -731,35 +772,35 @@ fi # AC_MSG_CHECKING([if zebra should be configurable to send Route Advertisements]) -if test "${enable_rtadv}" != "no"; then +if test "$enable_rtadv" != "no"; then AC_MSG_RESULT([yes]) AC_DEFINE([HAVE_RTADV], [1], [Enable IPv6 Routing Advertisement support]) else AC_MSG_RESULT([no]) fi -if test x"${enable_user}" = x"no"; then +if test "$enable_user" = "no"; then enable_user="" else - if test x"${enable_user}" = x"yes" || test x"${enable_user}" = x""; then + if test "$enable_user" = "yes" || test "$enable_user" = ""; then enable_user="frr" fi AC_DEFINE_UNQUOTED([FRR_USER], ["${enable_user}"], [frr User]) fi -if test x"${enable_group}" = x"no"; then +if test "$enable_group" = "no"; then enable_group="" else - if test x"${enable_group}" = x"yes" || test x"${enable_group}" = x""; then + if test "$enable_group" = "yes" || test "$enable_group" = ""; then enable_group="frr" fi AC_DEFINE_UNQUOTED([FRR_GROUP], ["${enable_group}"], [frr Group]) fi -if test x"${enable_vty_group}" = x"yes" ; then +if test "$enable_vty_group" = "yes" ; then AC_MSG_ERROR([--enable-vty-group requires a group as argument, not yes]) -elif test x"${enable_vty_group}" != x""; then - if test x"${enable_vty_group}" != x"no"; then +elif test "$enable_vty_group" != ""; then + if test "$enable_vty_group" != "no"; then AC_DEFINE_UNQUOTED([VTY_GROUP], ["${enable_vty_group}"], [VTY Sockets Group]) fi fi @@ -784,7 +825,7 @@ case "${enable_multipath}" in ;; "") ;; - *) + *) AC_MSG_FAILURE([Please specify digit to enable multipath ARG]) ;; esac @@ -796,12 +837,12 @@ AC_DEFINE_UNQUOTED([VTYSH_PAGER], ["$VTYSH_PAGER"], [What pager to use]) dnl -------------------- dnl Enable code coverage dnl -------------------- -AM_CONDITIONAL([HAVE_GCOV], [test '!' "$enable_gcov" = no]) +AM_CONDITIONAL([HAVE_GCOV], [test "$enable_gcov" != "no"]) dnl ------------------------------------ dnl Alpine only accepts numeric versions dnl ------------------------------------ -if test "x${enable_numeric_version}" != "x" ; then +if test "$enable_numeric_version" != "" ; then VERSION="`echo ${VERSION} | tr -c -d '[[.0-9]]'`" PACKAGE_VERSION="`echo ${PACKAGE_VERSION} | tr -c -d '[[.0-9]]'`" fi @@ -810,7 +851,7 @@ dnl ----------------------------------- dnl Add extra version string to package dnl name, string and version fields. dnl ----------------------------------- -if test "x${EXTRAVERSION}" != "x" ; then +if test "$EXTRAVERSION" != "" ; then VERSION="${VERSION}${EXTRAVERSION}" PACKAGE_VERSION="${PACKAGE_VERSION}${EXTRAVERSION}" AC_SUBST(PACKAGE_EXTRAVERSION, ["${EXTRAVERSION}"]) @@ -818,17 +859,17 @@ if test "x${EXTRAVERSION}" != "x" ; then fi AC_SUBST([EXTRAVERSION]) -if test "x$with_pkg_git_version" = "xyes"; then - if test -d "${srcdir}/.git"; then +if test "$with_pkg_git_version" = "yes"; then + if test -e "${srcdir}/.git"; then AC_DEFINE([GIT_VERSION], [1], [include git version info]) else with_pkg_git_version="no" AC_MSG_WARN([--with-pkg-git-version given, but this is not a git checkout]) fi fi -AM_CONDITIONAL([GIT_VERSION], [test "x$with_pkg_git_version" = "xyes"]) +AM_CONDITIONAL([GIT_VERSION], [test "$with_pkg_git_version" = "yes"]) AC_CHECK_TOOL([OBJCOPY], [objcopy], [:]) -if test "x${OBJCOPY}" != "x:"; then +if test "$OBJCOPY" != ":"; then AC_CACHE_CHECK([for .interp value to use], [frr_cv_interp], [ frr_cv_interp="" AC_LINK_IFELSE([AC_LANG_SOURCE([[int main() { return 0; }]])], [ @@ -1122,16 +1163,6 @@ case "$host_os" in AC_DEFINE([OPEN_BSD], [1], [OpenBSD]) AC_DEFINE([KAME], [1], [KAME IPv6]) AC_DEFINE([BSD_V6_SYSCTL], [1], [BSD v6 sysctl to turn on and off forwarding]) - - if test "x${enable_pimd}" != "xno"; then - case "$host_os" in - openbsd6.0) - ;; - openbsd[6-9]*) - AC_MSG_FAILURE([pimd cannot be enabled as PIM support has been removed from OpenBSD 6.1]) - ;; - esac - fi ;; *) AC_MSG_RESULT([BSD]) @@ -1141,7 +1172,7 @@ case "$host_os" in AC_DEFINE([BSD_V6_SYSCTL], [1], [BSD v6 sysctl to turn on and off forwarding]) ;; esac -AM_CONDITIONAL([SOLARIS], [test "${SOLARIS}" = "solaris"]) +AM_CONDITIONAL([SOLARIS], [test "$SOLARIS" = "solaris"]) AM_CONDITIONAL([LINUX], [${is_linux}]) AC_SYS_LARGEFILE @@ -1149,7 +1180,7 @@ AC_SYS_LARGEFILE dnl ------------------------ dnl Integrated REALMS option dnl ------------------------ -if test "${enable_realms}" = "yes"; then +if test "$enable_realms" = "yes"; then case "$host_os" in linux*) AC_DEFINE([SUPPORT_REALMS], [1], [Realms support]) @@ -1175,7 +1206,7 @@ AC_CHECK_FUNCS([ \ dnl ########################################################################## dnl LARGE if block spans a lot of "configure"! -if test "${enable_clippy_only}" != "yes"; then +if test "$enable_clippy_only" != "yes"; then dnl ########################################################################## # @@ -1243,15 +1274,15 @@ case "${enable_vtysh}" in LIBS="$prev_libs" AC_CHECK_HEADER([readline/history.h]) - if test $ac_cv_header_readline_history_h = no;then + if test "$ac_cv_header_readline_history_h" = "no"; then AC_MSG_ERROR([readline is too old to have readline/history.h, please update to the latest readline library.]) fi AC_CHECK_LIB([readline], [rl_completion_matches], [true], [], [$LIBREADLINE]) - if test $ac_cv_lib_readline_rl_completion_matches = no; then + if test "$ac_cv_lib_readline_rl_completion_matches" = "no"; then AC_DEFINE([rl_completion_matches], [completion_matches], [Old readline]) fi AC_CHECK_LIB([readline], [append_history], [frr_cv_append_history=yes], [frr_cv_append_history=no], [$LIBREADLINE]) - if test "$frr_cv_append_history" = yes; then + if test "$frr_cv_append_history" = "yes"; then AC_DEFINE([HAVE_APPEND_HISTORY], [1], [Have history.h append_history]) fi ;; @@ -1366,10 +1397,10 @@ case "$host_os" in ISIS_METHOD_MACRO="ISIS_METHOD_DLPI" ;; *) - if test $ac_cv_header_net_bpf_h = no; then - if test $ac_cv_header_sys_dlpi_h = no; then + if test "$ac_cv_header_net_bpf_h" = "no"; then + if test "$ac_cv_header_sys_dlpi_h" = "no"; then AC_MSG_RESULT([none]) - if test "${enable_isisd}" = yes -o "${enable_fabricd}" = yes; then + if test "$enable_isisd" = "yes" -o "$enable_fabricd" = "yes"; then AC_MSG_FAILURE([IS-IS support requested but no packet backend found]) fi AC_MSG_WARN([*** IS-IS support will not be built ***]) @@ -1473,7 +1504,7 @@ AC_CHECK_HEADER([netinet/tcp.h], AC_CHECK_DECLS([TCP_MD5SIG], [], [], MD5_INCLUDES)], [], FRR_INCLUDES) -if test $ac_cv_have_decl_TCP_MD5SIG = no; then +if test "$ac_cv_have_decl_TCP_MD5SIG" = "no"; then AC_CHECK_HEADER([linux/tcp.h], [m4_define([MD5_INCLUDES], FRR_INCLUDES @@ -1490,7 +1521,7 @@ AC_CHECK_LIB([resolv], [res_init]) dnl --------------------------- dnl check system has PCRE regexp dnl --------------------------- -if test "x$enable_pcreposix" = "xyes"; then +if test "$enable_pcreposix" = "yes"; then AC_CHECK_LIB([pcreposix], [regexec], [], [ AC_MSG_ERROR([--enable-pcreposix given but unable to find libpcreposix]) ]) @@ -1498,7 +1529,7 @@ fi AC_SUBST([HAVE_LIBPCREPOSIX]) dnl ########################################################################## -dnl test "${enable_clippy_only}" != "yes" +dnl test "$enable_clippy_only" != "yes" fi dnl END OF LARGE if block dnl ########################################################################## @@ -1547,7 +1578,7 @@ dnl -------------------- dnl Daemon disable check dnl -------------------- -AS_IF([test "${enable_ldpd}" != "no"], [ +AS_IF([test "$enable_ldpd" != "no"], [ AC_DEFINE([HAVE_LDPD], [1], [ldpd]) ]) @@ -1569,7 +1600,7 @@ else esac fi -if test "$ac_cv_lib_json_c_json_object_get" = no -a "x$BFDD" = "xbfdd"; then +if test "$ac_cv_lib_json_c_json_object_get" = "no" -a "$BFDD" = "bfdd"; then AC_MSG_ERROR(["you must use json-c library to use bfdd"]) fi @@ -1580,7 +1611,7 @@ case "$host_os" in no) ;; yes) - if test "${enable_clippy_only}" != "yes"; then + if test "$enable_clippy_only" != "yes"; then if test "$c_ares_found" != "true" ; then AC_MSG_ERROR([nhrpd requires libcares. Please install c-ares and its -dev headers.]) fi @@ -1595,34 +1626,34 @@ case "$host_os" in esac ;; *) - if test "${enable_nhrpd}" = "yes"; then + if test "$enable_nhrpd" = "yes"; then AC_MSG_ERROR([nhrpd requires kernel APIs that are only present on Linux.]) fi ;; esac -if test "${enable_watchfrr}" = "no";then +if test "$enable_watchfrr" = "no";then WATCHFRR="" else WATCHFRR="watchfrr" fi OSPFCLIENT="" -if test "${enable_ospfapi}" != "no";then +if test "$enable_ospfapi" != "no";then AC_DEFINE([SUPPORT_OSPF_API], [1], [OSPFAPI]) - if test "${enable_ospfclient}" != "no";then + if test "$enable_ospfclient" != "no";then OSPFCLIENT="ospfclient" fi fi -if test "${enable_bgp_announce}" = "no";then +if test "$enable_bgp_announce" = "no";then AC_DEFINE([DISABLE_BGP_ANNOUNCE], [1], [Disable BGP installation to zebra]) else AC_DEFINE([DISABLE_BGP_ANNOUNCE], [0], [Disable BGP installation to zebra]) fi -if test "${enable_bgp_vnc}" != "no";then +if test "$enable_bgp_vnc" != "no";then AC_DEFINE([ENABLE_BGP_VNC], [1], [Enable BGP VNC support]) fi @@ -1645,15 +1676,15 @@ esac dnl ########################################################################## dnl LARGE if block -if test "${enable_clippy_only}" != "yes"; then +if test "$enable_clippy_only" != "yes"; then dnl ########################################################################## dnl ------------------ dnl check Net-SNMP library dnl ------------------ -if test "${enable_snmp}" != "" -a "${enable_snmp}" != "no"; then +if test "$enable_snmp" != "" -a "$enable_snmp" != "no"; then AC_PATH_TOOL([NETSNMP_CONFIG], [net-snmp-config], [no]) - if test x"$NETSNMP_CONFIG" = x"no"; then + if test "$NETSNMP_CONFIG" = "no"; then AC_MSG_ERROR([--enable-snmp given but unable to find net-snmp-config]) fi SNMP_LIBS="`${NETSNMP_CONFIG} --agent-libs`" @@ -1726,7 +1757,7 @@ dnl confd dnl --------------- if test "$enable_confd" != "" -a "$enable_confd" != "no"; then AC_CHECK_PROG([CONFD], [confd], [confd], [/bin/false], "${enable_confd}/bin") - if test "x$CONFD" = "x/bin/false"; then + if test "$CONFD" = "/bin/false"; then AC_MSG_ERROR([confd was not found on your system.])] fi CONFD_CFLAGS="-I${enable_confd}/include -L${enable_confd}/lib" @@ -1769,12 +1800,12 @@ fi dnl ------ dnl ZeroMQ dnl ------ -if test "x$enable_zeromq" != "xno"; then +if test "$enable_zeromq" != "no"; then PKG_CHECK_MODULES([ZEROMQ], [libzmq >= 4.0.0], [ AC_DEFINE([HAVE_ZEROMQ], [1], [Enable ZeroMQ support]) ZEROMQ=true ], [ - if test "x$enable_zeromq" = "xyes"; then + if test "$enable_zeromq" = "yes"; then AC_MSG_ERROR([configuration specifies --enable-zeromq but libzmq was not found]) fi ]) @@ -1783,7 +1814,7 @@ fi dnl ------------------------------------ dnl Enable RPKI and add librtr to libs dnl ------------------------------------ -if test "${enable_rpki}" = "yes"; then +if test "$enable_rpki" = "yes"; then PKG_CHECK_MODULES([RTRLIB], [rtrlib >= 0.5.0], [RPKI=true], [RPKI=false @@ -1791,6 +1822,42 @@ if test "${enable_rpki}" = "yes"; then ) fi +dnl ------------------------------------ +dnl pimd is not supported on OpenBSD and MacOS +dnl ------------------------------------ +if test "$enable_pimd" != "no"; then +AC_MSG_CHECKING([for pimd OS support]) +case "$host_os" in + darwin*) + AC_MSG_RESULT([no]) + enable_pimd="no" + ;; + openbsd*) + AC_MSG_RESULT([no]) + enable_pimd="no" + ;; + *) + AC_MSG_RESULT([yes]) + ;; +esac +fi + +dnl ------------------------------------- +dnl VRRP is only supported on linux +dnl ------------------------------------- +if test "$enable_vrrpd" != "no"; then +AC_MSG_CHECKING([for VRRP OS support]) +case "$host_os" in + linux*) + AC_MSG_RESULT([yes]) + ;; + *) + AC_MSG_RESULT([no]) + enable_vrrpd="no" + ;; +esac +fi + dnl ------------------------------------------ dnl Check whether rtrlib was build with ssh support dnl ------------------------------------------ @@ -1827,7 +1894,7 @@ AC_CACHE_CHECK([for dlinfo(RTLD_DI_ORIGIN)], [frr_cv_rtld_di_origin], [ frr_cv_rtld_di_origin=no ]) ]) -if test "$frr_cv_rtld_di_origin" = yes; then +if test "$frr_cv_rtld_di_origin" = "yes"; then AC_DEFINE([HAVE_DLINFO_ORIGIN], [1], [Have dlinfo RTLD_DI_ORIGIN]) fi @@ -1847,12 +1914,12 @@ AC_CACHE_CHECK([for dlinfo(RTLD_DI_LINKMAP)], [frr_cv_rtld_di_linkmap], [ frr_cv_rtld_di_linkmap=no ]) ]) -if test "$frr_cv_rtld_di_linkmap" = yes; then +if test "$frr_cv_rtld_di_linkmap" = "yes"; then AC_DEFINE([HAVE_DLINFO_LINKMAP], [1], [Have dlinfo RTLD_DI_LINKMAP]) fi dnl ########################################################################## -dnl test "${enable_clippy_only}" != "yes" +dnl test "$enable_clippy_only" != "yes" fi dnl END OF LARGE if block dnl ########################################################################## @@ -1980,6 +2047,10 @@ AC_CHECK_DECL([CLOCK_MONOTONIC], AC_DEFINE([HAVE_CLOCK_MONOTONIC], [1], [Have monotonic clock]) ], [AC_MSG_RESULT([no])], [FRR_INCLUDES]) +AC_SEARCH_LIBS([clock_nanosleep], [rt], [ + AC_DEFINE([HAVE_CLOCK_NANOSLEEP], [1], [Have clock_nanosleep()]) +]) + dnl -------------------------------------- dnl checking for flex and bison dnl -------------------------------------- @@ -2052,7 +2123,7 @@ fi dnl ------------------- dnl capabilities checks dnl ------------------- -if test "${enable_capabilities}" != "no"; then +if test "$enable_capabilities" != "no"; then AC_MSG_CHECKING([whether prctl PR_SET_KEEPCAPS is available]) AC_TRY_COMPILE([#include <sys/prctl.h>], [prctl(PR_SET_KEEPCAPS, 1, 0, 0, 0);], [AC_MSG_RESULT([yes]) @@ -2060,11 +2131,11 @@ if test "${enable_capabilities}" != "no"; then frr_ac_keepcaps="yes"], AC_MSG_RESULT([no]) ) - if test x"${frr_ac_keepcaps}" = x"yes"; then + if test "$frr_ac_keepcaps" = "yes"; then AC_CHECK_HEADERS([sys/capability.h]) fi - if test x"${ac_cv_header_sys_capability_h}" = x"yes"; then - AC_CHECK_LIB([cap], [cap_init], + if test "$ac_cv_header_sys_capability_h" = "yes"; then + AC_CHECK_LIB([cap], [cap_init], [AC_DEFINE([HAVE_LCAPS], [1], [Capabilities]) LIBCAP="-lcap" frr_ac_lcaps="yes"] @@ -2081,14 +2152,14 @@ if test "${enable_capabilities}" != "no"; then ] ) fi - if test x"${frr_ac_scaps}" = x"yes" \ - -o x"${frr_ac_lcaps}" = x"yes"; then + if test "$frr_ac_scaps" = "yes" \ + -o "$frr_ac_lcaps" = "yes"; then AC_DEFINE([HAVE_CAPABILITIES], [1], [capabilities]) fi case "$host_os" in linux*) - if test "${enable_clippy_only}" != "yes"; then + if test "$enable_clippy_only" != "yes"; then if test "$frr_ac_lcaps" != "yes"; then AC_MSG_ERROR([libcap and/or its headers were not found. Running FRR without libcap support built in causes a huge performance penalty.]) fi @@ -2106,8 +2177,8 @@ AC_SUBST([LIBCAP]) dnl --------------------------- dnl check for glibc 'backtrace' -dnl --------------------------- -if test x"${enable_backtrace}" != x"no" ; then +dnl --------------------------- +if test "$enable_backtrace" != "no" ; then backtrace_ok=no PKG_CHECK_MODULES([UNWIND], [libunwind], [ AC_DEFINE([HAVE_LIBUNWIND], [1], [libunwind]) @@ -2134,7 +2205,7 @@ if test x"${enable_backtrace}" != x"no" ; then ]) ;; esac - if test "$backtrace_ok" = no; then + if test "$backtrace_ok" = "no"; then AC_CHECK_HEADER([execinfo.h], [ AC_SEARCH_LIBS([backtrace], [execinfo], [ AC_DEFINE([HAVE_GLIBC_BACKTRACE], [1], [Glibc backtrace]) @@ -2144,7 +2215,7 @@ if test x"${enable_backtrace}" != x"no" ; then fi fi - if test x"${enable_backtrace}" = x"yes" -a x"${backtrace_ok}" = x"no"; then + if test "$enable_backtrace" = "yes" -a "$backtrace_ok" = "no"; then dnl user explicitly requested backtrace but we failed to find support AC_MSG_FAILURE([failed to find backtrace or libunwind support]) fi @@ -2178,7 +2249,7 @@ struct mallinfo ac_x; ac_x = mallinfo (); frr_cv_mallinfo=no ]) ]) -if test "$frr_cv_mallinfo" = yes; then +if test "$frr_cv_mallinfo" = "yes"; then AC_DEFINE([HAVE_MALLINFO], [1], [mallinfo]) fi @@ -2221,7 +2292,7 @@ dnl configure date dnl ---------- dev_version=`echo $VERSION | grep dev` #don't expire deprecated code in non 'dev' branch -if test "${dev_version}" = ""; then +if test "$dev_version" = ""; then CONFDATE=0 else CONFDATE=`date '+%Y%m%d'` @@ -2232,12 +2303,12 @@ dnl ------------------------------ dnl set paths for state directory dnl ------------------------------ AC_MSG_CHECKING([directory to use for state file]) -if test "${prefix}" = "NONE"; then +if test "$prefix" = "NONE"; then frr_statedir_prefix=""; else frr_statedir_prefix=${prefix} fi -if test "${localstatedir}" = '${prefix}/var'; then +if test "$localstatedir" = '${prefix}/var'; then for FRR_STATE_DIR in ${frr_statedir_prefix}/var/run dnl ${frr_statedir_prefix}/var/adm dnl ${frr_statedir_prefix}/etc dnl @@ -2252,7 +2323,7 @@ if test "${localstatedir}" = '${prefix}/var'; then else frr_statedir=${localstatedir} fi -if test $frr_statedir = "/dev/null"; then +if test "$frr_statedir" = "/dev/null"; then AC_MSG_ERROR([STATE DIRECTORY NOT FOUND! FIX OR SPECIFY --localstatedir!]) fi AC_MSG_RESULT([${frr_statedir}]) @@ -2265,8 +2336,8 @@ AC_DEFINE_UNQUOTED([DAEMON_VTY_DIR], ["$frr_statedir%s%s"], [daemon vty director AC_DEFINE_UNQUOTED([DAEMON_DB_DIR], ["$frr_statedir"], [daemon database directory]) dnl autoconf does this, but it does it too late... -test "x$prefix" = xNONE && prefix=$ac_default_prefix -test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' +test "$prefix" = "NONE" && prefix=$ac_default_prefix +test "$exec_prefix" = "NONE" && exec_prefix='${prefix}' dnl get the full path, recursing through variables... vtysh_bin="$bindir/vtysh" @@ -2298,44 +2369,44 @@ AC_DEFINE_UNQUOTED([YANG_MODELS_PATH], ["$CFG_YANGMODELS"], [path to YANG data m AC_DEFINE_UNQUOTED([WATCHFRR_SH_PATH], ["${CFG_SBIN%/}/watchfrr.sh"], [path to watchfrr.sh]) dnl various features -AM_CONDITIONAL([SUPPORT_REALMS], [test "${enable_realms}" = "yes"]) -AM_CONDITIONAL([ENABLE_BGP_VNC], [test x${enable_bgp_vnc} != xno]) +AM_CONDITIONAL([SUPPORT_REALMS], [test "$enable_realms" = "yes"]) +AM_CONDITIONAL([ENABLE_BGP_VNC], [test "$enable_bgp_vnc" != "no"]) AM_CONDITIONAL([BGP_BMP], [$bgpd_bmp]) dnl northbound AM_CONDITIONAL([SQLITE3], [$SQLITE3]) -AM_CONDITIONAL([CONFD], [test "x$enable_confd" != "x"]) -AM_CONDITIONAL([SYSREPO], [test "x$enable_sysrepo" = "xyes"]) -AM_CONDITIONAL([GRPC], [test "x$enable_grpc" = "xyes"]) -AM_CONDITIONAL([ZEROMQ], [test "x$ZEROMQ" = "xtrue"]) +AM_CONDITIONAL([CONFD], [test "$enable_confd" != ""]) +AM_CONDITIONAL([SYSREPO], [test "$enable_sysrepo" = "yes"]) +AM_CONDITIONAL([GRPC], [test "$enable_grpc" = "yes"]) +AM_CONDITIONAL([ZEROMQ], [test "$ZEROMQ" = "true"]) dnl plugins -AM_CONDITIONAL([RPKI], [test "x$RPKI" = "xtrue"]) -AM_CONDITIONAL([SNMP], [test "x$SNMP_METHOD" = "xagentx"]) +AM_CONDITIONAL([RPKI], [test "$RPKI" = "true"]) +AM_CONDITIONAL([SNMP], [test "$SNMP_METHOD" = "agentx"]) AM_CONDITIONAL([IRDP], [$IRDP]) -AM_CONDITIONAL([FPM], [test "x$enable_fpm" = "xyes"]) -AM_CONDITIONAL([HAVE_PROTOBUF], [test "x$enable_protobuf" = "xyes"]) +AM_CONDITIONAL([FPM], [test "$enable_fpm" = "yes"]) +AM_CONDITIONAL([HAVE_PROTOBUF], [test "$enable_protobuf" = "yes"]) AM_CONDITIONAL([HAVE_PROTOBUF3], [$PROTO3]) dnl daemons -AM_CONDITIONAL([VTYSH], [test "x$VTYSH" = "xvtysh"]) -AM_CONDITIONAL([ZEBRA], [test "${enable_zebra}" != "no"]) -AM_CONDITIONAL([BGPD], [test "x${enable_bgpd}" != "no"]) -AM_CONDITIONAL([RIPD], [test "${enable_ripd}" != "no"]) -AM_CONDITIONAL([OSPFD], [test "${enable_ospfd}" != "no"]) -AM_CONDITIONAL([LDPD], [test "${enable_ldpd}" != "no"]) -AM_CONDITIONAL([BFDD], [test "x$BFDD" = "xbfdd"]) -AM_CONDITIONAL([NHRPD], [test "x$NHRPD" = "xnhrpd"]) -AM_CONDITIONAL([EIGRPD], [test "${enable_eigrpd}" != "no"]) -AM_CONDITIONAL([WATCHFRR], [test "x$WATCHFRR" = "xwatchfrr"]) -AM_CONDITIONAL([OSPFCLIENT], [test "x$OSPFCLIENT" = "xospfclient"]) -AM_CONDITIONAL([RIPNGD], [test "${enable_ripngd}" != "no"]) -AM_CONDITIONAL([BABELD], [test "${enable_babeld}" != "no"]) -AM_CONDITIONAL([OSPF6D], [test "${enable_ospf6d}" != "no"]) -AM_CONDITIONAL([ISISD], [test "${enable_isisd}" != "no"]) -AM_CONDITIONAL([PIMD], [test "${enable_pimd}" != "no"]) -AM_CONDITIONAL([PBRD], [test "${enable_pbrd}" != "no"]) -AM_CONDITIONAL([SHARPD], [test "${enable_sharpd}" = "yes"]) -AM_CONDITIONAL([STATICD], [test "${enable_staticd}" != "no"]) -AM_CONDITIONAL([FABRICD], [test "${enable_fabricd}" != "no"]) -AM_CONDITIONAL([VRRPD], [test "${enable_vrrpd}" != "no"]) +AM_CONDITIONAL([VTYSH], [test "$VTYSH" = "vtysh"]) +AM_CONDITIONAL([ZEBRA], [test "$enable_zebra" != "no"]) +AM_CONDITIONAL([BGPD], [test "$enable_bgpd" != "no"]) +AM_CONDITIONAL([RIPD], [test "$enable_ripd" != "no"]) +AM_CONDITIONAL([OSPFD], [test "$enable_ospfd" != "no"]) +AM_CONDITIONAL([LDPD], [test "$enable_ldpd" != "no"]) +AM_CONDITIONAL([BFDD], [test "$BFDD" = "bfdd"]) +AM_CONDITIONAL([NHRPD], [test "$NHRPD" = "nhrpd"]) +AM_CONDITIONAL([EIGRPD], [test "$enable_eigrpd" != "no"]) +AM_CONDITIONAL([WATCHFRR], [test "$WATCHFRR" = "watchfrr"]) +AM_CONDITIONAL([OSPFCLIENT], [test "$OSPFCLIENT" = "ospfclient"]) +AM_CONDITIONAL([RIPNGD], [test "$enable_ripngd" != "no"]) +AM_CONDITIONAL([BABELD], [test "$enable_babeld" != "no"]) +AM_CONDITIONAL([OSPF6D], [test "$enable_ospf6d" != "no"]) +AM_CONDITIONAL([ISISD], [test "$enable_isisd" != "no"]) +AM_CONDITIONAL([PIMD], [test "$enable_pimd" != "no"]) +AM_CONDITIONAL([PBRD], [test "$enable_pbrd" != "no"]) +AM_CONDITIONAL([SHARPD], [test "$enable_sharpd" = "yes"]) +AM_CONDITIONAL([STATICD], [test "$enable_staticd" != "no"]) +AM_CONDITIONAL([FABRICD], [test "$enable_fabricd" != "no"]) +AM_CONDITIONAL([VRRPD], [test "$enable_vrrpd" != "no"]) AC_CONFIG_FILES([Makefile],[sed -e 's/^#AUTODERP# //' -i Makefile]) @@ -2363,7 +2434,7 @@ AC_CONFIG_COMMANDS([lib/route_types.h], [ ${PERL} "${ac_abs_top_srcdir}/lib/route_types.pl" \ < "${ac_abs_top_srcdir}/lib/route_types.txt" \ > "${dst}.tmp" - test -f "${dst}" \ + test -f "$dst" \ && diff "${dst}.tmp" "${dst}" >/dev/null 2>/dev/null \ && rm "${dst}.tmp" \ || mv "${dst}.tmp" "${dst}" @@ -2371,13 +2442,13 @@ AC_CONFIG_COMMANDS([lib/route_types.h], [ PERL="$PERL" ]) -AS_IF([test "x$with_pkg_git_version" = "xyes"], [ +AS_IF([test "$with_pkg_git_version" = "yes"], [ AC_CONFIG_COMMANDS([lib/gitversion.h], [ dst="${ac_abs_top_builddir}/lib/gitversion.h" ${PERL} "${ac_abs_top_srcdir}/lib/gitversion.pl" \ "${ac_abs_top_srcdir}" \ > "${dst}.tmp" - test -f "${dst}" \ + test -f "$dst" \ && diff "${dst}.tmp" "${dst}" >/dev/null 2>/dev/null \ && rm "${dst}.tmp" \ || mv "${dst}.tmp" "${dst}" @@ -2414,9 +2485,9 @@ zebra protobuf enabled : ${enable_protobuf:-no} The above user and group must have read/write access to the state file directory and to the config files in the config file directory." -if test "${enable_doc}" != "no" -a "$frr_py_mod_sphinx" = false; then +if test "$enable_doc" != "no" -a "$frr_py_mod_sphinx" = "false"; then AC_MSG_WARN([sphinx is missing but required to build documentation]) fi -if test "$frr_py_mod_pytest" = false; then +if test "$frr_py_mod_pytest" = "false"; then AC_MSG_WARN([pytest is missing, unit tests cannot be performed]) fi diff --git a/debian/README.Debian b/debian/README.Debian index cbd70f82f6..01b9213ae4 100644 --- a/debian/README.Debian +++ b/debian/README.Debian @@ -52,31 +52,6 @@ used. This option should only be used for systems that do not have systemd, e.g. Ubuntu 14.04. -* Why has SNMP support been disabled? -===================================== -FRR used to link against the NetSNMP libraries to provide SNMP -support. Those libraries sadly link against the OpenSSL libraries -to provide crypto support for SNMPv3 among others. -OpenSSL now is not compatible with the GNU GENERAL PUBLIC LICENSE (GPL) -licence that FRR is distributed under. For more explanation read: - http://www.gnome.org/~markmc/openssl-and-the-gpl.html - http://www.gnu.org/licenses/gpl-faq.html#GPLIncompatibleLibs -Updating the licence to explicitly allow linking against OpenSSL -would requite the affirmation of all people that ever contributed -a significant part to Zebra / Quagga or FRR and thus are the collective -"copyright holder". That's too much work. Using a shrinked down -version of NetSNMP without OpenSSL or convincing the NetSNMP people -to change to GnuTLS are maybe good solutions but not reachable -during the last days before the Sarge release :-( - - *BUT* - -It is allowed by the used licence mix that you fetch the sources and -build FRR yourself with SNMP with - # apt-get -b source -Ppkg.frr.snmp frr -Just distributing it in binary form, linked against OpenSSL, is forbidden. - - * Debian Policy compliance notes ================================ diff --git a/debian/control b/debian/control index ab2df20432..f4275471d5 100644 --- a/debian/control +++ b/debian/control @@ -31,10 +31,10 @@ Build-Depends: python3-sphinx, python3-pytest <!nocheck>, texinfo (>= 4.7) -Standards-Version: 4.2.1 +Standards-Version: 4.4.1 Homepage: https://www.frrouting.org/ -Vcs-Browser: https://github.com/FRRouting/frr/ -Vcs-Git: https://github.com/FRRouting/frr.git +Vcs-Browser: https://github.com/FRRouting/frr/tree/debian/master +Vcs-Git: https://github.com/FRRouting/frr.git -b debian/master Package: frr Architecture: linux-any @@ -104,6 +104,7 @@ Build-Profiles: <!pkg.frr.nortrlib> Package: frr-doc Section: doc Architecture: all +Multi-Arch: foreign Depends: ${misc:Depends}, libjs-jquery, diff --git a/debian/frr.lintian-overrides b/debian/frr.lintian-overrides index a3e6fcdc25..616f265e01 100644 --- a/debian/frr.lintian-overrides +++ b/debian/frr.lintian-overrides @@ -6,5 +6,8 @@ frr binary: spelling-error-in-binary usr/lib/frr/zebra writen written frr binary: spelling-error-in-binary usr/lib/frr/pimd writen written frr binary: spelling-error-in-binary usr/lib/frr/pimd iif if +# prefixed man pages for off-PATH daemons +manpage-without-executable + # personal name spelling-error-in-copyright Ang And diff --git a/doc/developer/building-frr-for-archlinux.rst b/doc/developer/building-frr-for-archlinux.rst new file mode 100644 index 0000000000..7ede35ad9c --- /dev/null +++ b/doc/developer/building-frr-for-archlinux.rst @@ -0,0 +1,129 @@ +Arch Linux +================ + +Installing Dependencies +----------------------- + +.. code-block:: console + + sudo pacman -Syu + sudo pacman -S \ + git autoconf automake libtool make cmake pcre readline texinfo \ + pkg-config pam json-c bison flex python-pytest \ + c-ares python systemd python2-ipaddress python-sphinx \ + systemd-libs net-snmp perl libcap + +.. include:: building-libyang.rst + +Protobuf +^^^^^^^^ + +.. code-block:: console + + sudo pacman -S protobuf-c + +ZeroMQ +^^^^^^ + +.. code-block:: console + + sudo pacman -S zeromq + +Building & Installing FRR +------------------------- + +Add FRR user and groups +^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: console + + sudo groupadd -r -g 92 frr + sudo groupadd -r -g 85 frrvty + sudo useradd --system -g frr --home-dir /var/run/frr/ \ + -c "FRR suite" --shell /sbin/nologin frr + sudo usermod -a -G frrvty frr + +Compile +^^^^^^^ + +.. include:: include-compile.rst + +Install FRR configuration files +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: console + + sudo install -m 775 -o frr -g frr -d /var/log/frr + sudo install -m 775 -o frr -g frrvty -d /etc/frr + sudo install -m 640 -o frr -g frrvty tools/etc/frr/vtysh.conf /etc/frr/vtysh.conf + sudo install -m 640 -o frr -g frr tools/etc/frr/frr.conf /etc/frr/frr.conf + sudo install -m 640 -o frr -g frr tools/etc/frr/daemons.conf /etc/frr/daemons.conf + sudo install -m 640 -o frr -g frr tools/etc/frr/daemons /etc/frr/daemons + +Tweak sysctls +^^^^^^^^^^^^^ + +Some sysctls need to be changed in order to enable IPv4/IPv6 forwarding and +MPLS (if supported by your platform). If your platform does not support MPLS, +skip the MPLS related configuration in this section. + +Edit :file:`/etc/sysctl.conf`[*Create the file if it doesn't exist*] and +append the following values (ignore the other settings): + +:: + + # Enable packet forwarding for IPv4 + net.ipv4.ip_forward=1 + + # Enable packet forwarding for IPv6 + net.ipv6.conf.all.forwarding=1 + +Reboot or use ``sysctl -p`` to apply the same config to the running system. + +Add MPLS kernel modules +""""""""""""""""""""""" + +To +enable, add the following lines to :file:`/etc/modules-load.d/modules.conf`: + +:: + + # Load MPLS Kernel Modules + mpls_router + mpls_iptunnel + + +And load the kernel modules on the running system: + +.. code-block:: console + + sudo modprobe mpls-router mpls-iptunnel + +Enable MPLS Forwarding +"""""""""""""""""""""" + +Edit :file:`/etc/sysctl.conf` and the following lines. Make sure to add a line +equal to :file:`net.mpls.conf.eth0.input` for each interface used with MPLS. + +:: + + # Enable MPLS Label processing on all interfaces + net.mpls.conf.eth0.input=1 + net.mpls.conf.eth1.input=1 + net.mpls.conf.eth2.input=1 + net.mpls.platform_labels=100000 + +Install service files +^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: console + + sudo install -m 644 tools/frr.service /etc/systemd/system/frr.service + sudo systemctl enable frr + +Start FRR +^^^^^^^^^ + +.. code-block:: shell + + systemctl start frr diff --git a/doc/developer/building-frr-for-centos6.rst b/doc/developer/building-frr-for-centos6.rst index 04c6b922ce..b730a5ee32 100644 --- a/doc/developer/building-frr-for-centos6.rst +++ b/doc/developer/building-frr-for-centos6.rst @@ -116,7 +116,19 @@ Update rpm database & Install newer sphinx sudo yum update sudo yum install python27-sphinx -.. include:: building-libyang.rst +Install libyang and its dependencies: + +.. code-block:: shell + + sudo yum install pcre-devel doxygen cmake + git clone https://github.com/CESNET/libyang.git + cd libyang + git checkout 090926a89d59a3c4000719505d563aaf6ac60f2 + mkdir build ; cd build + cmake -DENABLE_LYD_PRIV=ON -DCMAKE_INSTALL_PREFIX:PATH=/usr -D CMAKE_BUILD_TYPE:String="Release" .. + make build-rpm + sudo yum install ./rpms/RPMS/x86_64/libyang-0.16.111-0.x86_64.rpm ./rpms/RPMS/x86_64/libyang-devel-0.16.111-0.x86_64.rpm + cd ../.. Get FRR, compile it and install it (from Git) --------------------------------------------- diff --git a/doc/developer/building.rst b/doc/developer/building.rst index 859f612313..ef55954ac2 100644 --- a/doc/developer/building.rst +++ b/doc/developer/building.rst @@ -26,3 +26,4 @@ Building FRR building-frr-for-ubuntu1404 building-frr-for-ubuntu1604 building-frr-for-ubuntu1804 + building-frr-for-archlinux diff --git a/doc/developer/ospf-sr.rst b/doc/developer/ospf-sr.rst index d798ba78ef..070465db5b 100644 --- a/doc/developer/ospf-sr.rst +++ b/doc/developer/ospf-sr.rst @@ -22,7 +22,7 @@ Interoperability ---------------- * Tested on various topology including point-to-point and LAN interfaces - in a mix of Free Range Routing instance and Cisco IOS-XR 6.0.x + in a mix of FRRouting instance and Cisco IOS-XR 6.0.x * Check OSPF LSA conformity with latest wireshark release 2.5.0-rc Implementation details diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst index 33ebe06d2f..7e627781e0 100644 --- a/doc/developer/topotests.rst +++ b/doc/developer/topotests.rst @@ -360,6 +360,7 @@ This is the recommended test writing routine: - Write a topology (Graphviz recommended) - Obtain configuration files - Write the test itself +- Format the new code using `black <https://github.com/psf/black>`_ - Create a Pull Request Topotest File Hierarchy @@ -760,6 +761,8 @@ Requirements: inside folders named after the equipment. - Tests must be able to run without any interaction. To make sure your test conforms with this, run it without the :option:`-s` parameter. +- Use `black <https://github.com/psf/black>`_ code formatter before creating + a pull request. This ensures we have a unified code style. Tips: diff --git a/doc/developer/workflow.rst b/doc/developer/workflow.rst index 8ce3bdeeb2..e36b57a5aa 100644 --- a/doc/developer/workflow.rst +++ b/doc/developer/workflow.rst @@ -992,6 +992,11 @@ Miscellaneous When in doubt, follow the guidelines in the Linux kernel style guide, or ask on the development mailing list / public Slack instance. +JSON Output +^^^^^^^^^^^ + +All JSON keys are to be camelCased, with no spaces. + .. _documentation: diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst index 85ccc277a8..91ba37991b 100644 --- a/doc/user/bgp.rst +++ b/doc/user/bgp.rst @@ -1997,6 +1997,18 @@ BGP Extended Communities in Route Map This command set Site of Origin value. +.. index:: set extcommunity bandwidth <(1-25600) | cumulative | num-multipaths> [non-transitive] +.. clicmd:: set extcommunity bandwidth <(1-25600) | cumulative | num-multipaths> [non-transitive] + + This command sets the BGP link-bandwidth extended community for the prefix + (best path) for which it is applied. The link-bandwidth can be specified as + an ``explicit value`` (specified in Mbps), or the router can be told to use + the ``cumulative bandwidth`` of all multipaths for the prefix or to compute + it based on the ``number of multipaths``. The link bandwidth extended + community is encoded as ``transitive`` unless the set command explicitly + configures it as ``non-transitive``. + +.. seealso:: :ref:`wecmp_linkbw` Note that the extended expanded community is only used for `match` rule, not for `set` actions. @@ -3152,6 +3164,8 @@ Example of how to set up a 6-Bone connection. .. include:: rpki.rst +.. include:: wecmp_linkbw.rst + .. include:: flowspec.rst .. [#med-transitivity-rant] For some set of objects to have an order, there *must* be some binary ordering relation that is defined for *every* combination of those objects, and that relation *must* be transitive. I.e.:, if the relation operator is <, and if a < b and b < c then that relation must carry over and it *must* be that a < c for the objects to have an order. The ordering relation may allow for equality, i.e. a < b and b < a may both be true and imply that a and b are equal in the order and not distinguished by it, in which case the set has a partial order. Otherwise, if there is an order, all the objects have a distinct place in the order and the set has a total order) diff --git a/doc/user/conf.py b/doc/user/conf.py index 5582847431..d8a188b152 100644 --- a/doc/user/conf.py +++ b/doc/user/conf.py @@ -132,7 +132,8 @@ language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build', 'rpki.rst', 'routeserver.rst', - 'ospf_fundamentals.rst', 'flowspec.rst', 'snmptrap.rst'] + 'ospf_fundamentals.rst', 'flowspec.rst', 'snmptrap.rst', + 'wecmp_linkbw.rst'] # The reST default role (used for this markup: `text`) to use for all # documents. diff --git a/doc/user/isisd.rst b/doc/user/isisd.rst index 6684a83e1f..9a0a0afb0c 100644 --- a/doc/user/isisd.rst +++ b/doc/user/isisd.rst @@ -111,6 +111,12 @@ writing, *isisd* does not support multiple ISIS processes. Enable or disable :rfc:`6232` purge originator identification. +.. index:: [no] lsp-mtu (128-4352) +.. clicmd:: [no] lsp-mtu (128-4352) + + Configure the maximum size of generated LSPs, in bytes. + + .. _isis-timer: ISIS Timer diff --git a/doc/user/ldpd.rst b/doc/user/ldpd.rst index 977195d6a7..2df4ba3005 100644 --- a/doc/user/ldpd.rst +++ b/doc/user/ldpd.rst @@ -108,6 +108,11 @@ LDP Configuration The following command located under MPLS router node configures the MPLS router-id of the local device. +.. index:: [no] ordered-control +.. clicmd:: [no] ordered-control + + Configure LDP Ordered Label Distribution Control. + .. index:: [no] address-family [ipv4 | ipv6] .. clicmd:: [no] address-family [ipv4 | ipv6] diff --git a/doc/user/nhrpd.rst b/doc/user/nhrpd.rst index 95ef9cb7ee..8d3bea7c94 100644 --- a/doc/user/nhrpd.rst +++ b/doc/user/nhrpd.rst @@ -199,6 +199,31 @@ NHRP Events Configure the Unix path for the event socket. +.. _show-nhrp: + +Show NHRP +========== + +.. index:: show [ip|ipv6] nhrp cache [json] +.. clicmd:: show [ip|ipv6] nhrp cache [json] + + Dump the cache entries. + +.. index:: show [ip|ipv6] nhrp opennhrp [json] +.. clicmd:: show [ip|ipv6] nhrp opennhrp [json] + + Dump the cache entries with opennhrp format. + +.. index:: show [ip|ipv6] nhrp nhs [json] +.. clicmd:: show [ip|ipv6] nhrp nhs [json] + + Dump the hub context. + +.. index:: show dmvpn [json] +.. clicmd:: show dmvpn [json] + + Dump the security contexts. + Configuration Example ===================== diff --git a/doc/user/overview.rst b/doc/user/overview.rst index b72ceb8d38..cf8cc44097 100644 --- a/doc/user/overview.rst +++ b/doc/user/overview.rst @@ -300,10 +300,18 @@ BGP :t:`The Generalized TTL Security Mechanism (GTSM). V. Gill, J. Heasley, D. Meyer, P. Savola, C. Pingnataro. October 2007.` - :rfc:`5575` :t:`Dissemination of Flow Specification Rules. P. Marques, N. Sheth, R. Raszuk, B. Greene, J. Mauch, D. McPherson. August 2009` +- :rfc:`6286` + :t:`Autonomous-System-Wide Unique BGP Identifier for BGP-4. E. Chen, J. Yuan, June 2011.` +- :rfc:`6608` + :t:`Subcodes for BGP Finite State Machine Error. J. Dong, M. Chen, Huawei Technologies, A. Suryanarayana, Cisco Systems. May 2012.` - :rfc:`6810` :t:`The Resource Public Key Infrastructure (RPKI) to Router Protocol. R. Bush, R. Austein. January 2013.` - :rfc:`6811` :t:`BGP Prefix Origin Validation. P. Mohapatra, J. Scudder, D. Ward, R. Bush, R. Austein. January 2013.` +- :rfc:`7606` + :t:`Revised Error Handling for BGP UPDATE Messages. E. Chen, J. Scudder, P. Mohapatra, K. Patel. August 2015.` +- :rfc:`7607` + :t:`Codification of AS 0 Processing. W. Kumari, R. Bush, H. Schiller, K. Patel. August 2015.` - :rfc:`7611` :t:`BGP ACCEPT_OWN Community Attribute. J. Uttaro, P. Mohapatra, D. Smith, R. Raszuk, J. Scudder. August 2015.` - :rfc:`7999` diff --git a/doc/user/pim.rst b/doc/user/pim.rst index 36c8b44aa4..f480c6bdc4 100644 --- a/doc/user/pim.rst +++ b/doc/user/pim.rst @@ -66,6 +66,14 @@ Certain signals have special meanings to *pimd*. prefix of group ranges covered. This command is vrf aware, to configure for a vrf, enter the vrf submode. +.. index:: ip pim register-accept-list PLIST +.. clicmd:: ip pim register-accept-list PLIST + + When pim receives a register packet the source of the packet will be compared + to the prefix-list specified, PLIST, and if a permit is received normal + processing continues. If a deny is returned for the source address of the + register packet a register stop message is sent to the source. + .. index:: ip pim spt-switchover infinity-and-beyond .. clicmd:: ip pim spt-switchover infinity-and-beyond diff --git a/doc/user/routemap.rst b/doc/user/routemap.rst index 472e2c53ff..f557cbe022 100644 --- a/doc/user/routemap.rst +++ b/doc/user/routemap.rst @@ -151,10 +151,15 @@ Route Map Match Command Matches the specified `prefix-len`. This is a Zebra specific command. -.. index:: match ip next-hop IPV4_ADDR -.. clicmd:: match ip next-hop IPV4_ADDR +.. index:: match ip next-hop address IPV4_ADDR +.. clicmd:: match ip next-hop address IPV4_ADDR - Matches the specified `ipv4_addr`. + This is a BGP specific match command. Matches the specified `ipv4_addr`. + +.. index:: match ipv6 next-hop IPV6_ADDR +.. clicmd:: match ipv6 next-hop IPV6_ADDR + + This is a BGP specific match command. Matches the specified `ipv6_addr`. .. index:: match as-path AS_PATH .. clicmd:: match as-path AS_PATH diff --git a/doc/user/sharp.rst b/doc/user/sharp.rst index 111e9dc9e8..199685cdfb 100644 --- a/doc/user/sharp.rst +++ b/doc/user/sharp.rst @@ -86,3 +86,20 @@ keyword. At present, no sharp commands will be preserved in the config. Allow end user to dump associated data with the nexthop tracking that may have been turned on. + +.. index:: sharp lsp +.. clicmd:: sharp lsp (0-100000) nexthop-group NAME [prefix A.B.C.D/M TYPE [instance (0-255)]] + + Install an LSP using the specified in-label, with nexthops as + listed in nexthop-group ``NAME``. The LSP is installed as type + ZEBRA_LSP_SHARP. If ``prefix`` is specified, an existing route with + type ``TYPE`` (and optional ``instance`` id) will be updated to use + the LSP. + +.. index:: sharp remove lsp +.. clicmd:: sharp remove lsp (0-100000) nexthop-group NAME [prefix A.B.C.D/M TYPE [instance (0-255)]] + + Remove a SHARPD LSP that uses the specified in-label, where the + nexthops are specified in nexthop-group ``NAME``. If ``prefix`` is + specified, remove label bindings from the route of type ``TYPE`` + also. diff --git a/doc/user/subdir.am b/doc/user/subdir.am index ce519fbfbf..0b64232f3d 100644 --- a/doc/user/subdir.am +++ b/doc/user/subdir.am @@ -44,6 +44,7 @@ user_RSTFILES = \ doc/user/bfd.rst \ doc/user/flowspec.rst \ doc/user/watchfrr.rst \ + doc/user/wecmp_linkbw.rst \ # end EXTRA_DIST += \ diff --git a/doc/user/wecmp_linkbw.rst b/doc/user/wecmp_linkbw.rst new file mode 100644 index 0000000000..0d2fe9d756 --- /dev/null +++ b/doc/user/wecmp_linkbw.rst @@ -0,0 +1,298 @@ +.. _wecmp_linkbw: + +Weighted ECMP using BGP link bandwidth +====================================== + +.. _features-of-wecmp-linkbw: + +Overview +-------- + +In normal equal cost multipath (ECMP), the route to a destination has +multiple next hops and traffic is expected to be equally distributed +across these next hops. In practice, flow-based hashing is used so that +all traffic associated with a particular flow uses the same next hop, +and by extension, the same path across the network. + +Weigted ECMP using BGP link bandwidth introduces support for network-wide +unequal cost multipathing (UCMP) to an IP destination. The unequal cost +load balancing is implemented by the forwarding plane based on the weights +associated with the next hops of the IP prefix. These weights are computed +based on the bandwidths of the corresponding multipaths which are encoded +in the ``BGP link bandwidth extended community`` as specified in +[Draft-IETF-idr-link-bandwidth]_. Exchange of an appropriate BGP link +bandwidth value for a prefix across the network results in network-wide +unequal cost multipathing. + +One of the primary use cases of this capability is in the data center when +a service (represented by its anycast IP) has an unequal set of resources +across the regions (e.g., PODs) of the data center and the network itself +provides the load balancing function instead of an external load balancer. +Refer to [Draft-IETF-mohanty-bess-ebgp-dmz]_ and :rfc:`7938` for details +on this use case. This use case is applicable in a pure L3 network as +well as in a EVPN network. + +The traditional use case for BGP link bandwidth to load balance traffic +to the exit routers in the AS based on the bandwidth of their external +eBGP peering links is also supported. + + +Design Principles +----------------- + +Next hop weight computation and usage +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +As described, in UCMP, there is a weight associated with each next hop of an +IP prefix, and traffic is expected to be distributed across the next hops in +proportion to their weight. The weight of a next hop is a simple factoring +of the bandwidth of the corresponding path against the total bandwidth of +all multipaths, mapped to the range 1 to 100. What happens if not all the +paths in the multipath set have link bandwidth associated with them? In such +a case, in adherence to [Draft-IETF-idr-link-bandwidth]_, the behavior +reverts to standard ECMP among all the multipaths, with the link bandwidth +being effectively ignored. + +Note that there is no change to either the BGP best path selection algorithm +or to the multipath computation algorithm; the mapping of link bandwidth to +weight happens at the time of installation of the route in the RIB. + +If data forwarding is implemented by means of the Linux kernel, the next hop’s +weight is used in the hash calculation. The kernel uses the Hash threshold +algorithm and use of the next hop weight is built into it; next hops need +not be expanded to achieve UCMP. UCMP for IPv4 is available in older Linux +kernels too, while UCMP for IPv6 is available from the 4.16 kernel onwards. + +If data forwarding is realized in hardware, common implementations expand +the next hops (i.e., they are repeated) in the ECMP container in proportion +to their weight. For example, if the weights associated with 3 next hops for +a particular route are 50, 25 and 25 and the ECMP container has a size of 16 +next hops, the first next hop will be repeated 8 times and the other 2 next +hops repeated 4 times each. Other implementations are also possible. + +Unequal cost multipath across a network +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For the use cases listed above, it is not sufficient to support UCMP on just +one router (e.g., egress router), or individually, on multiple routers; UCMP +must be deployed across the entire network. This is achieved by employing the +BGP link-bandwidth extended community. + +At the router which originates the BGP link bandwidth, there has to be user +configuration to trigger it, which is described below. Receiving routers +would use the received link bandwidth from their downstream routers to +determine the next hop weight as described in the earlier section. Further, +if the received link bandwidth is a transitive attribute, it would be +propagated to eBGP peers, with the additional change that if the next hop +is set to oneself, the cumulative link bandwidth of all downstream paths +is propagated to other routers. In this manner, the entire network will +know how to distribute traffic to an anycast service across the network. + +The BGP link-bandwidth extended community is encoded in bytes-per-second. +In the use case where UCMP must be based on the number of paths, a reference +bandwidth of 1 Mbps is used. So, for example, if there are 4 equal cost paths +to an anycast IP, the encoded bandwidth in the extended community will be +500,000. The actual value itself doesn’t matter as long as all routers +originating the link-bandwidth are doing it in the same way. + + +Configuration Guide +------------------- + +The configuration for weighted ECMP using BGP link bandwidth requires +one essential step - using a route-map to inject the link bandwidth +extended community. An additional option is provided to control the +processing of received link bandwidth. + +Injecting link bandwidth into the network +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +At the "entry point" router that is injecting the prefix to which weighted +load balancing must be performed, a route-map must be configured to +attach the link bandwidth extended community. + +For the use case of providing weighted load balancing for an anycast service, +this configuration will typically need to be applied at the TOR or Leaf +router that is connected to servers which provide the anycast service and +the bandwidth would be based on the number of multipaths for the destination. + +For the use case of load balancing to the exit router, the exit router should +be configured with the route map specifying the a bandwidth value that +corresponds to the bandwidth of the link connecting to its eBGP peer in the +adjoining AS. In addition, the link bandwidth extended community must be +explicitly configured to be non-transitive. + +The complete syntax of the route-map set command can be found at +:ref:`bgp-extended-communities-in-route-map` + +This route-map is supported only at two attachment points: +(a) the outbound route-map attached to a peer or peer-group, per address-family +(b) the EVPN advertise route-map used to inject IPv4 or IPv6 unicast routes +into EVPN as type-5 routes. + +Since the link bandwidth origination is done by using a route-map, it can +be constrained to certain prefixes (e.g., only for anycast services) or it +can be generated for all prefixes. Further, when the route-map is used in +the neighbor context, the link bandwidth usage can be constrained to certain +peers only. + +A sample configuration is shown below and illustrates link bandwidth +advertisement towards the "SPINE" peer-group for anycast IPs in the +range 192.168.x.x + +.. code-block:: frr + + ip prefix-list anycast_ip seq 10 permit 192.168.0.0/16 le 32 + route-map anycast_ip permit 10 + match ip address prefix-list anycast_ip + set extcommunity bandwidth num-multipaths + route-map anycast_ip permit 20 + ! + router bgp 65001 + neighbor SPINE peer-group + neighbor SPINE remote-as external + neighbor 172.16.35.1 peer-group SPINE + neighbor 172.16.36.1 peer-group SPINE + ! + address-family ipv4 unicast + network 110.0.0.1/32 + network 192.168.44.1/32 + neighbor SPINE route-map anycast_ip out + exit-address-family + ! + + +Controlling link bandwidth processing on the receiver +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There is no configuration necessary to process received link bandwidth and +translate it into the weight associated with the corresponding next hop; +that happens by default. If some of the multipaths do not have the link +bandwidth extended community, the default behavior is to revert to normal +ECMP as recommended in [Draft-IETF-idr-link-bandwidth]_. + +The operator can change these behaviors with the following configuration: + +.. index:: bgp bestpath bandwidth <ignore | skip-missing | default-weight-for-missing> +.. clicmd:: bgp bestpath bandwidth <ignore | skip-missing | default-weight-for-missing> + +The different options imply behavior as follows: + +- ignore: Ignore link bandwidth completely for route installation + (i.e., do regular ECMP, not weighted) +- skip-missing: Skip paths without link bandwidth and do UCMP among + the others (if at least some paths have link-bandwidth) +- default-weight-for-missing: Assign a low default weight (value 1) + to paths not having link bandwidth + +This configuration is per BGP instance similar to other BGP route-selection +controls; it operates on both IPv4-unicast and IPv6-unicast routes in that +instance. In an EVPN network, this configuration (if required) should be +implemented in the tenant VRF and is again applicable for IPv4-unicast and +IPv6-unicast, including the ones sourced from EVPN type-5 routes. + +A sample snippet of FRR configuration on a receiver to skip paths without +link bandwidth and do weighted ECMP among the other paths (if some of them +have link bandwidth) is as shown below. + +.. code-block:: frr + + router bgp 65021 + bgp bestpath as-path multipath-relax + bgp bestpath bandwidth skip-missing + neighbor LEAF peer-group + neighbor LEAF remote-as external + neighbor 172.16.35.2 peer-group LEAF + neighbor 172.16.36.2 peer-group LEAF + ! + address-family ipv4 unicast + network 130.0.0.1/32 + exit-address-family + ! + + +Stopping the propagation of the link bandwidth outside a domain +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The link bandwidth extended community will get automatically propagated +with the prefix to EBGP peers, if it is encoded as a transitive attribute +by the originator. If this propagation has to be stopped outside of a +particular domain (e.g., stopped from being propagated to routers outside +of the data center core network), the mechanism available is to disable +the advertisement of all BGP extended communities on the specific peering/s. +In other words, the propagation cannot be blocked just for the link bandwidth +extended community. The configuration to disable all extended communities +can be applied to a peer or peer-group (per address-family). + +Of course, the other common way to stop the propagation of the link bandwidth +outside the domain is to block the prefixes themselves from being advertised +and possibly, announce only an aggregate route. This would be quite common +in a EVPN network. + +BGP link bandwidth and UCMP monitoring & troubleshooting +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Existing operational commands to display the BGP routing table for a specific +prefix will show the link bandwidth extended community also, if present. + +An example of an IPv4-unicast route received with the link bandwidth +attribute from two peers is shown below: + +.. code-block:: frr + + CLI# show bgp ipv4 unicast 192.168.10.1/32 + BGP routing table entry for 192.168.10.1/32 + Paths: (2 available, best #2, table default) + Advertised to non peer-group peers: + l1(swp1) l2(swp2) l3(swp3) l4(swp4) + 65002 + fe80::202:ff:fe00:1b from l2(swp2) (110.0.0.2) + (fe80::202:ff:fe00:1b) (used) + Origin IGP, metric 0, valid, external, multipath, bestpath-from-AS 65002 + Extended Community: LB:65002:125000000 (1000.000 Mbps) + Last update: Thu Feb 20 18:34:16 2020 + + 65001 + fe80::202:ff:fe00:15 from l1(swp1) (110.0.0.1) + (fe80::202:ff:fe00:15) (used) + Origin IGP, metric 0, valid, external, multipath, bestpath-from-AS 65001, best (Older Path) + Extended Community: LB:65001:62500000 (500.000 Mbps) + Last update: Thu Feb 20 18:22:34 2020 + +The weights associated with the next hops of a route can be seen by querying +the RIB for a specific route. + +For example, the next hop weights corresponding to the link bandwidths in the +above example is illustrated below: + +.. code-block:: frr + + spine1# show ip route 192.168.10.1/32 + Routing entry for 192.168.10.1/32 + Known via "bgp", distance 20, metric 0, best + Last update 00:00:32 ago + * fe80::202:ff:fe00:1b, via swp2, weight 66 + * fe80::202:ff:fe00:15, via swp1, weight 33 + +For troubleshooting, existing debug logs ``debug bgp updates``, +``debug bgp bestpath <prefix>``, ``debug bgp zebra`` and +``debug zebra kernel`` can be used. + +A debug log snippet when ``debug bgp zebra`` is enabled and a route is +installed by BGP in the RIB with next hop weights is shown below: + +.. code-block:: frr + + 2020-02-29T06:26:19.927754+00:00 leaf1 bgpd[5459]: bgp_zebra_announce: p=192.168.150.1/32, bgp_is_valid_label: 0 + 2020-02-29T06:26:19.928096+00:00 leaf1 bgpd[5459]: Tx route add VRF 33 192.168.150.1/32 metric 0 tag 0 count 2 + 2020-02-29T06:26:19.928289+00:00 leaf1 bgpd[5459]: nhop [1]: 110.0.0.6 if 35 VRF 33 wt 50 RMAC 0a:11:2f:7d:35:20 + 2020-02-29T06:26:19.928479+00:00 leaf1 bgpd[5459]: nhop [2]: 110.0.0.5 if 35 VRF 33 wt 50 RMAC 32:1e:32:a3:6c:bf + 2020-02-29T06:26:19.928668+00:00 leaf1 bgpd[5459]: bgp_zebra_announce: 192.168.150.1/32: announcing to zebra (recursion NOT set) + + +References +---------- + +.. [Draft-IETF-idr-link-bandwidth] <https://tools.ietf.org/html/draft-ietf-idr-link-bandwidth> +.. [Draft-IETF-mohanty-bess-ebgp-dmz] <https://tools.ietf.org/html/draft-mohanty-bess-ebgp-dmz> + diff --git a/eigrpd/eigrp_network.c b/eigrpd/eigrp_network.c index 39008a01c4..072ff29705 100644 --- a/eigrpd/eigrp_network.c +++ b/eigrpd/eigrp_network.c @@ -218,7 +218,7 @@ int eigrp_network_set(struct eigrp *eigrp, struct prefix *p) struct route_node *rn; struct interface *ifp; - rn = route_node_get(eigrp->networks, (struct prefix *)p); + rn = route_node_get(eigrp->networks, p); if (rn->info) { /* There is already same network statement. */ route_unlock_node(rn); diff --git a/isisd/fabricd.c b/isisd/fabricd.c index b9c27d51bd..4a4b25fa1d 100644 --- a/isisd/fabricd.c +++ b/isisd/fabricd.c @@ -3,7 +3,7 @@ * * Copyright (C) 2018 Christian Franke * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -122,9 +122,9 @@ static bool neighbor_entry_hash_cmp(const void *a, const void *b) return memcmp(na->id, nb->id, sizeof(na->id)) == 0; } -static int neighbor_entry_list_cmp(void *a, void *b) +static int neighbor_entry_list_cmp(const void *a, const void *b) { - struct neighbor_entry *na = a, *nb = b; + const struct neighbor_entry *na = a, *nb = b; return -memcmp(na->id, nb->id, sizeof(na->id)); } diff --git a/isisd/fabricd.h b/isisd/fabricd.h index 6e93440f3a..9455cdb0f0 100644 --- a/isisd/fabricd.h +++ b/isisd/fabricd.h @@ -3,7 +3,7 @@ * * Copyright (C) 2018 Christian Franke * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/isisd/isis_adjacency.c b/isisd/isis_adjacency.c index 1d70521e68..9beed206e8 100644 --- a/isisd/isis_adjacency.c +++ b/isisd/isis_adjacency.c @@ -353,7 +353,7 @@ void isis_adj_print(struct isis_adjacency *adj) if (dyn) zlog_debug("%s", dyn->hostname); - zlog_debug("SystemId %20s SNPA %s, level %d\nHolding Time %d", + zlog_debug("SystemId %20s SNPA %s, level %d; Holding Time %d", sysid_print(adj->sysid), snpa_print(adj->snpa), adj->level, adj->hold_time); if (adj->ipv4_address_count) { diff --git a/isisd/isis_bpf.c b/isisd/isis_bpf.c index e916a50883..19695e7ab4 100644 --- a/isisd/isis_bpf.c +++ b/isisd/isis_bpf.c @@ -73,7 +73,7 @@ static const uint8_t ALL_ISS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x05}; static const uint8_t ALL_ESS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x04}; #endif -static char sock_buff[8192]; +static char sock_buff[16384]; static int open_bpf_dev(struct isis_circuit *circuit) { diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c index e4152a8712..7d4f7b355d 100644 --- a/isisd/isis_circuit.c +++ b/isisd/isis_circuit.c @@ -345,8 +345,7 @@ void isis_circuit_del_addr(struct isis_circuit *circuit, } else { prefix2str(connected->address, buf, sizeof(buf)); zlog_warn( - "Nonexistent ip address %s removal attempt from \ - circuit %s", + "Nonexistent ip address %s removal attempt from circuit %s", buf, circuit->interface->name); zlog_warn("Current ip addresses on %s:", circuit->interface->name); @@ -394,8 +393,7 @@ void isis_circuit_del_addr(struct isis_circuit *circuit, if (!found) { prefix2str(connected->address, buf, sizeof(buf)); zlog_warn( - "Nonexistent ip address %s removal attempt from \ - circuit %s", + "Nonexistent ip address %s removal attempt from circuit %s", buf, circuit->interface->name); zlog_warn("Current ip addresses on %s:", circuit->interface->name); diff --git a/isisd/isis_cli.c b/isisd/isis_cli.c index 3144b3c28e..c12c7fa936 100644 --- a/isisd/isis_cli.c +++ b/isisd/isis_cli.c @@ -402,21 +402,7 @@ DEFPY(no_is_type, no_is_type_cmd, "Act as both a station router and an area router\n" "Act as an area router only\n") { - const char *value = NULL; - struct isis_area *area; - - area = nb_running_get_entry(NULL, VTY_CURR_XPATH, false); - - /* - * Put the is-type back to defaults: - * - level-1-2 on first area - * - level-1 for the rest - */ - if (area && listgetdata(listhead(isis->area_list)) == area) - value = "level-1-2"; - else - value = NULL; - nb_cli_enqueue_change(vty, "./is-type", NB_OP_MODIFY, value); + nb_cli_enqueue_change(vty, "./is-type", NB_OP_MODIFY, NULL); return nb_cli_apply_changes(vty, NULL); } @@ -527,7 +513,7 @@ DEFPY(no_metric_style, no_metric_style_cmd, "Send and accept both styles of TLVs during transition\n" "Use new style of TLVs to carry wider metric\n") { - nb_cli_enqueue_change(vty, "./metric-style", NB_OP_MODIFY, "narrow"); + nb_cli_enqueue_change(vty, "./metric-style", NB_OP_MODIFY, NULL); return nb_cli_apply_changes(vty, NULL); } @@ -640,7 +626,8 @@ void cli_show_isis_domain_pwd(struct vty *vty, struct lyd_node *dnode, } /* - * XPath: /frr-isisd:isis/instance/lsp/generation-interval + * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/generation-interval + * XPath: /frr-isisd:isis/instance/lsp/timers/level-2/generation-interval */ DEFPY(lsp_gen_interval, lsp_gen_interval_cmd, "lsp-gen-interval [level-1|level-2]$level (1-120)$val", @@ -650,11 +637,13 @@ DEFPY(lsp_gen_interval, lsp_gen_interval_cmd, "Minimum interval in seconds\n") { if (!level || strmatch(level, "level-1")) - nb_cli_enqueue_change(vty, "./lsp/generation-interval/level-1", - NB_OP_MODIFY, val_str); + nb_cli_enqueue_change( + vty, "./lsp/timers/level-1/generation-interval", + NB_OP_MODIFY, val_str); if (!level || strmatch(level, "level-2")) - nb_cli_enqueue_change(vty, "./lsp/generation-interval/level-2", - NB_OP_MODIFY, val_str); + nb_cli_enqueue_change( + vty, "./lsp/timers/level-2/generation-interval", + NB_OP_MODIFY, val_str); return nb_cli_apply_changes(vty, NULL); } @@ -668,31 +657,20 @@ DEFPY(no_lsp_gen_interval, no_lsp_gen_interval_cmd, "Minimum interval in seconds\n") { if (!level || strmatch(level, "level-1")) - nb_cli_enqueue_change(vty, "./lsp/generation-interval/level-1", - NB_OP_MODIFY, NULL); + nb_cli_enqueue_change( + vty, "./lsp/timers/level-1/generation-interval", + NB_OP_MODIFY, NULL); if (!level || strmatch(level, "level-2")) - nb_cli_enqueue_change(vty, "./lsp/generation-interval/level-2", - NB_OP_MODIFY, NULL); + nb_cli_enqueue_change( + vty, "./lsp/timers/level-2/generation-interval", + NB_OP_MODIFY, NULL); return nb_cli_apply_changes(vty, NULL); } -void cli_show_isis_lsp_gen_interval(struct vty *vty, struct lyd_node *dnode, - bool show_defaults) -{ - const char *l1 = yang_dnode_get_string(dnode, "./level-1"); - const char *l2 = yang_dnode_get_string(dnode, "./level-2"); - - if (strmatch(l1, l2)) - vty_out(vty, " lsp-gen-interval %s\n", l1); - else { - vty_out(vty, " lsp-gen-interval level-1 %s\n", l1); - vty_out(vty, " lsp-gen-interval level-2 %s\n", l2); - } -} - /* - * XPath: /frr-isisd:isis/instance/lsp/refresh-interval + * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/refresh-interval + * XPath: /frr-isisd:isis/instance/lsp/timers/level-2/refresh-interval */ DEFPY(lsp_refresh_interval, lsp_refresh_interval_cmd, "lsp-refresh-interval [level-1|level-2]$level (1-65235)$val", @@ -702,10 +680,12 @@ DEFPY(lsp_refresh_interval, lsp_refresh_interval_cmd, "LSP refresh interval in seconds\n") { if (!level || strmatch(level, "level-1")) - nb_cli_enqueue_change(vty, "./lsp/refresh-interval/level-1", + nb_cli_enqueue_change(vty, + "./lsp/timers/level-1/refresh-interval", NB_OP_MODIFY, val_str); if (!level || strmatch(level, "level-2")) - nb_cli_enqueue_change(vty, "./lsp/refresh-interval/level-2", + nb_cli_enqueue_change(vty, + "./lsp/timers/level-2/refresh-interval", NB_OP_MODIFY, val_str); return nb_cli_apply_changes(vty, NULL); @@ -720,32 +700,22 @@ DEFPY(no_lsp_refresh_interval, no_lsp_refresh_interval_cmd, "LSP refresh interval in seconds\n") { if (!level || strmatch(level, "level-1")) - nb_cli_enqueue_change(vty, "./lsp/refresh-interval/level-1", + nb_cli_enqueue_change(vty, + "./lsp/timers/level-1/refresh-interval", NB_OP_MODIFY, NULL); if (!level || strmatch(level, "level-2")) - nb_cli_enqueue_change(vty, "./lsp/refresh-interval/level-2", + nb_cli_enqueue_change(vty, + "./lsp/timers/level-2/refresh-interval", NB_OP_MODIFY, NULL); return nb_cli_apply_changes(vty, NULL); } -void cli_show_isis_lsp_ref_interval(struct vty *vty, struct lyd_node *dnode, - bool show_defaults) -{ - const char *l1 = yang_dnode_get_string(dnode, "./level-1"); - const char *l2 = yang_dnode_get_string(dnode, "./level-2"); - - if (strmatch(l1, l2)) - vty_out(vty, " lsp-refresh-interval %s\n", l1); - else { - vty_out(vty, " lsp-refresh-interval level-1 %s\n", l1); - vty_out(vty, " lsp-refresh-interval level-2 %s\n", l2); - } -} - /* - * XPath: /frr-isisd:isis/instance/lsp/maximum-lifetime + * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/maximum-lifetime + * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/maximum-lifetime */ + DEFPY(max_lsp_lifetime, max_lsp_lifetime_cmd, "max-lsp-lifetime [level-1|level-2]$level (350-65535)$val", "Maximum LSP lifetime\n" @@ -754,10 +724,12 @@ DEFPY(max_lsp_lifetime, max_lsp_lifetime_cmd, "LSP lifetime in seconds\n") { if (!level || strmatch(level, "level-1")) - nb_cli_enqueue_change(vty, "./lsp/maximum-lifetime/level-1", + nb_cli_enqueue_change(vty, + "./lsp/timers/level-1/maximum-lifetime", NB_OP_MODIFY, val_str); if (!level || strmatch(level, "level-2")) - nb_cli_enqueue_change(vty, "./lsp/maximum-lifetime/level-2", + nb_cli_enqueue_change(vty, + "./lsp/timers/level-2/maximum-lifetime", NB_OP_MODIFY, val_str); return nb_cli_apply_changes(vty, NULL); @@ -772,26 +744,125 @@ DEFPY(no_max_lsp_lifetime, no_max_lsp_lifetime_cmd, "LSP lifetime in seconds\n") { if (!level || strmatch(level, "level-1")) - nb_cli_enqueue_change(vty, "./lsp/maximum-lifetime/level-1", + nb_cli_enqueue_change(vty, + "./lsp/timers/level-1/maximum-lifetime", NB_OP_MODIFY, NULL); if (!level || strmatch(level, "level-2")) - nb_cli_enqueue_change(vty, "./lsp/maximum-lifetime/level-2", + nb_cli_enqueue_change(vty, + "./lsp/timers/level-2/maximum-lifetime", NB_OP_MODIFY, NULL); return nb_cli_apply_changes(vty, NULL); } -void cli_show_isis_lsp_max_lifetime(struct vty *vty, struct lyd_node *dnode, - bool show_defaults) +/* unified LSP timers command + * XPath: /frr-isisd:isis/instance/lsp/timers + */ + +DEFPY(lsp_timers, lsp_timers_cmd, + "lsp-timers [level-1|level-2]$level gen-interval (1-120)$gen refresh-interval (1-65235)$refresh max-lifetime (350-65535)$lifetime", + "LSP-related timers\n" + "LSP-related timers for Level 1 only\n" + "LSP-related timers for Level 2 only\n" + "Minimum interval between regenerating same LSP\n" + "Generation interval in seconds\n" + "LSP refresh interval\n" + "LSP refresh interval in seconds\n" + "Maximum LSP lifetime\n" + "Maximum LSP lifetime in seconds\n") { - const char *l1 = yang_dnode_get_string(dnode, "./level-1"); - const char *l2 = yang_dnode_get_string(dnode, "./level-2"); + if (!level || strmatch(level, "level-1")) { + nb_cli_enqueue_change( + vty, "./lsp/timers/level-1/generation-interval", + NB_OP_MODIFY, gen_str); + nb_cli_enqueue_change(vty, + "./lsp/timers/level-1/refresh-interval", + NB_OP_MODIFY, refresh_str); + nb_cli_enqueue_change(vty, + "./lsp/timers/level-1/maximum-lifetime", + NB_OP_MODIFY, lifetime_str); + } + if (!level || strmatch(level, "level-2")) { + nb_cli_enqueue_change( + vty, "./lsp/timers/level-2/generation-interval", + NB_OP_MODIFY, gen_str); + nb_cli_enqueue_change(vty, + "./lsp/timers/level-2/refresh-interval", + NB_OP_MODIFY, refresh_str); + nb_cli_enqueue_change(vty, + "./lsp/timers/level-2/maximum-lifetime", + NB_OP_MODIFY, lifetime_str); + } - if (strmatch(l1, l2)) - vty_out(vty, " max-lsp-lifetime %s\n", l1); + return nb_cli_apply_changes(vty, NULL); +} + +DEFPY(no_lsp_timers, no_lsp_timers_cmd, + "no lsp-timers [level-1|level-2]$level [gen-interval (1-120) refresh-interval (1-65235) max-lifetime (350-65535)]", + NO_STR + "LSP-related timers\n" + "LSP-related timers for Level 1 only\n" + "LSP-related timers for Level 2 only\n" + "Minimum interval between regenerating same LSP\n" + "Generation interval in seconds\n" + "LSP refresh interval\n" + "LSP refresh interval in seconds\n" + "Maximum LSP lifetime\n" + "Maximum LSP lifetime in seconds\n") +{ + if (!level || strmatch(level, "level-1")) { + nb_cli_enqueue_change( + vty, "./lsp/timers/level-1/generation-interval", + NB_OP_MODIFY, NULL); + nb_cli_enqueue_change(vty, + "./lsp/timers/level-1/refresh-interval", + NB_OP_MODIFY, NULL); + nb_cli_enqueue_change(vty, + "./lsp/timers/level-1/maximum-lifetime", + NB_OP_MODIFY, NULL); + } + if (!level || strmatch(level, "level-2")) { + nb_cli_enqueue_change( + vty, "./lsp/timers/level-2/generation-interval", + NB_OP_MODIFY, NULL); + nb_cli_enqueue_change(vty, + "./lsp/timers/level-2/refresh-interval", + NB_OP_MODIFY, NULL); + nb_cli_enqueue_change(vty, + "./lsp/timers/level-2/maximum-lifetime", + NB_OP_MODIFY, NULL); + } + + return nb_cli_apply_changes(vty, NULL); +} + +void cli_show_isis_lsp_timers(struct vty *vty, struct lyd_node *dnode, + bool show_defaults) +{ + const char *l1_refresh = + yang_dnode_get_string(dnode, "./level-1/refresh-interval"); + const char *l2_refresh = + yang_dnode_get_string(dnode, "./level-2/refresh-interval"); + const char *l1_lifetime = + yang_dnode_get_string(dnode, "./level-1/maximum-lifetime"); + const char *l2_lifetime = + yang_dnode_get_string(dnode, "./level-2/maximum-lifetime"); + const char *l1_gen = + yang_dnode_get_string(dnode, "./level-1/generation-interval"); + const char *l2_gen = + yang_dnode_get_string(dnode, "./level-2/generation-interval"); + if (strmatch(l1_refresh, l2_refresh) + && strmatch(l1_lifetime, l2_lifetime) && strmatch(l1_gen, l2_gen)) + vty_out(vty, + " lsp-timers gen-interval %s refresh-interval %s max-lifetime %s\n", + l1_gen, l1_refresh, l1_lifetime); else { - vty_out(vty, " max-lsp-lifetime level-1 %s\n", l1); - vty_out(vty, " max-lsp-lifetime level-2 %s\n", l2); + vty_out(vty, + " lsp-timers level-1 gen-interval %s refresh-interval %s max-lifetime %s\n", + l1_gen, l1_refresh, l1_lifetime); + vty_out(vty, + " lsp-timers level-2 gen-interval %s refresh-interval %s max-lifetime %s\n", + l2_gen, l2_refresh, l2_lifetime); } } @@ -1398,8 +1469,8 @@ void cli_show_ip_isis_metric(struct vty *vty, struct lyd_node *dnode, if (strmatch(l1, l2)) vty_out(vty, " isis metric %s\n", l1); else { - vty_out(vty, " isis metric %s level-1\n", l1); - vty_out(vty, " isis metric %s level-2\n", l2); + vty_out(vty, " isis metric level-1 %s\n", l1); + vty_out(vty, " isis metric level-2 %s\n", l2); } } @@ -1456,8 +1527,8 @@ void cli_show_ip_isis_hello_interval(struct vty *vty, struct lyd_node *dnode, if (strmatch(l1, l2)) vty_out(vty, " isis hello-interval %s\n", l1); else { - vty_out(vty, " isis hello-interval %s level-1\n", l1); - vty_out(vty, " isis hello-interval %s level-2\n", l2); + vty_out(vty, " isis hello-interval level-1 %s\n", l1); + vty_out(vty, " isis hello-interval level-2 %s\n", l2); } } @@ -1514,8 +1585,8 @@ void cli_show_ip_isis_hello_multi(struct vty *vty, struct lyd_node *dnode, if (strmatch(l1, l2)) vty_out(vty, " isis hello-multiplier %s\n", l1); else { - vty_out(vty, " isis hello-multiplier %s level-1\n", l1); - vty_out(vty, " isis hello-multiplier %s level-2\n", l2); + vty_out(vty, " isis hello-multiplier level-1 %s\n", l1); + vty_out(vty, " isis hello-multiplier level-2 %s\n", l2); } } @@ -2001,6 +2072,8 @@ void isis_cli_init(void) install_element(ISIS_NODE, &no_lsp_refresh_interval_cmd); install_element(ISIS_NODE, &max_lsp_lifetime_cmd); install_element(ISIS_NODE, &no_max_lsp_lifetime_cmd); + install_element(ISIS_NODE, &lsp_timers_cmd); + install_element(ISIS_NODE, &no_lsp_timers_cmd); install_element(ISIS_NODE, &area_lsp_mtu_cmd); install_element(ISIS_NODE, &no_area_lsp_mtu_cmd); diff --git a/isisd/isis_dlpi.c b/isisd/isis_dlpi.c index ea16f4af7f..5c15d1d29d 100644 --- a/isisd/isis_dlpi.c +++ b/isisd/isis_dlpi.c @@ -62,7 +62,7 @@ static const uint8_t ALL_ISS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x05}; static const uint8_t ALL_ESS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x04}; #endif -static uint8_t sock_buff[8192]; +static uint8_t sock_buff[16384]; static unsigned short pf_filter[] = { ENF_PUSHWORD + 0, /* Get the SSAP/DSAP values */ diff --git a/isisd/isis_misc.c b/isisd/isis_misc.c index 5fa33f5500..96b76da92d 100644 --- a/isisd/isis_misc.c +++ b/isisd/isis_misc.c @@ -562,20 +562,12 @@ void vty_multiline(struct vty *vty, const char *prefix, const char *format, ...) void vty_out_timestr(struct vty *vty, time_t uptime) { - struct tm tm; time_t difftime = time(NULL); + char buf[MONOTIME_STRLEN]; + difftime -= uptime; - gmtime_r(&difftime, &tm); - - if (difftime < ONE_DAY_SECOND) - vty_out(vty, "%02d:%02d:%02d", tm.tm_hour, tm.tm_min, - tm.tm_sec); - else if (difftime < ONE_WEEK_SECOND) - vty_out(vty, "%dd%02dh%02dm", tm.tm_yday, tm.tm_hour, - tm.tm_min); - else - vty_out(vty, "%02dw%dd%02dh", tm.tm_yday / 7, - tm.tm_yday - ((tm.tm_yday / 7) * 7), tm.tm_hour); - vty_out(vty, " ago"); + frrtime_to_interval(difftime, buf, sizeof(buf)); + + vty_out(vty, "%s ago", buf); } diff --git a/isisd/isis_mt.c b/isisd/isis_mt.c index 36413bac59..e8e35ae63b 100644 --- a/isisd/isis_mt.c +++ b/isisd/isis_mt.c @@ -3,7 +3,7 @@ * * Copyright (C) 2017 Christian Franke * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/isisd/isis_mt.h b/isisd/isis_mt.h index b40139c50a..fd9ee133ca 100644 --- a/isisd/isis_mt.h +++ b/isisd/isis_mt.h @@ -3,7 +3,7 @@ * * Copyright (C) 2017 Christian Franke * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/isisd/isis_nb.c b/isisd/isis_nb.c index d84e533240..da4322bd44 100644 --- a/isisd/isis_nb.c +++ b/isisd/isis_nb.c @@ -95,55 +95,43 @@ const struct frr_yang_module_info frr_isisd_info = { }, }, { - .xpath = "/frr-isisd:isis/instance/lsp/refresh-interval", + .xpath = "/frr-isisd:isis/instance/lsp/timers", .cbs = { - .cli_show = cli_show_isis_lsp_ref_interval, + .cli_show = cli_show_isis_lsp_timers, }, }, { - .xpath = "/frr-isisd:isis/instance/lsp/refresh-interval/level-1", + .xpath = "/frr-isisd:isis/instance/lsp/timers/level-1/refresh-interval", .cbs = { .modify = isis_instance_lsp_refresh_interval_level_1_modify, }, }, { - .xpath = "/frr-isisd:isis/instance/lsp/refresh-interval/level-2", - .cbs = { - .modify = isis_instance_lsp_refresh_interval_level_2_modify, - }, - }, - { - .xpath = "/frr-isisd:isis/instance/lsp/maximum-lifetime", - .cbs = { - .cli_show = cli_show_isis_lsp_max_lifetime, - }, - }, - { - .xpath = "/frr-isisd:isis/instance/lsp/maximum-lifetime/level-1", + .xpath = "/frr-isisd:isis/instance/lsp/timers/level-1/maximum-lifetime", .cbs = { .modify = isis_instance_lsp_maximum_lifetime_level_1_modify, }, }, { - .xpath = "/frr-isisd:isis/instance/lsp/maximum-lifetime/level-2", + .xpath = "/frr-isisd:isis/instance/lsp/timers/level-1/generation-interval", .cbs = { - .modify = isis_instance_lsp_maximum_lifetime_level_2_modify, + .modify = isis_instance_lsp_generation_interval_level_1_modify, }, }, { - .xpath = "/frr-isisd:isis/instance/lsp/generation-interval", + .xpath = "/frr-isisd:isis/instance/lsp/timers/level-2/refresh-interval", .cbs = { - .cli_show = cli_show_isis_lsp_gen_interval, + .modify = isis_instance_lsp_refresh_interval_level_2_modify, }, }, { - .xpath = "/frr-isisd:isis/instance/lsp/generation-interval/level-1", + .xpath = "/frr-isisd:isis/instance/lsp/timers/level-2/maximum-lifetime", .cbs = { - .modify = isis_instance_lsp_generation_interval_level_1_modify, + .modify = isis_instance_lsp_maximum_lifetime_level_2_modify, }, }, { - .xpath = "/frr-isisd:isis/instance/lsp/generation-interval/level-2", + .xpath = "/frr-isisd:isis/instance/lsp/timers/level-2/generation-interval", .cbs = { .modify = isis_instance_lsp_generation_interval_level_2_modify, }, diff --git a/isisd/isis_nb.h b/isisd/isis_nb.h index 29a2ded0de..e028dfd11b 100644 --- a/isisd/isis_nb.h +++ b/isisd/isis_nb.h @@ -427,12 +427,8 @@ void cli_show_isis_area_pwd(struct vty *vty, struct lyd_node *dnode, bool show_defaults); void cli_show_isis_domain_pwd(struct vty *vty, struct lyd_node *dnode, bool show_defaults); -void cli_show_isis_lsp_gen_interval(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); -void cli_show_isis_lsp_ref_interval(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); -void cli_show_isis_lsp_max_lifetime(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); +void cli_show_isis_lsp_timers(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); void cli_show_isis_lsp_mtu(struct vty *vty, struct lyd_node *dnode, bool show_defaults); void cli_show_isis_spf_min_interval(struct vty *vty, struct lyd_node *dnode, diff --git a/isisd/isis_nb_config.c b/isisd/isis_nb_config.c index d14704b4ee..4347c85664 100644 --- a/isisd/isis_nb_config.c +++ b/isisd/isis_nb_config.c @@ -371,7 +371,7 @@ int isis_instance_lsp_mtu_modify(enum nb_event event, } /* - * XPath: /frr-isisd:isis/instance/lsp/refresh-interval/level-1 + * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/refresh-interval */ int isis_instance_lsp_refresh_interval_level_1_modify( enum nb_event event, const struct lyd_node *dnode, @@ -391,7 +391,7 @@ int isis_instance_lsp_refresh_interval_level_1_modify( } /* - * XPath: /frr-isisd:isis/instance/lsp/refresh-interval/level-2 + * XPath: /frr-isisd:isis/instance/lsp/timers/level-2/refresh-interval */ int isis_instance_lsp_refresh_interval_level_2_modify( enum nb_event event, const struct lyd_node *dnode, @@ -411,7 +411,7 @@ int isis_instance_lsp_refresh_interval_level_2_modify( } /* - * XPath: /frr-isisd:isis/instance/lsp/maximum-lifetime/level-1 + * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/maximum-lifetime */ int isis_instance_lsp_maximum_lifetime_level_1_modify( enum nb_event event, const struct lyd_node *dnode, @@ -431,7 +431,7 @@ int isis_instance_lsp_maximum_lifetime_level_1_modify( } /* - * XPath: /frr-isisd:isis/instance/lsp/maximum-lifetime/level-2 + * XPath: /frr-isisd:isis/instance/lsp/timers/level-2/maximum-lifetime */ int isis_instance_lsp_maximum_lifetime_level_2_modify( enum nb_event event, const struct lyd_node *dnode, @@ -451,7 +451,7 @@ int isis_instance_lsp_maximum_lifetime_level_2_modify( } /* - * XPath: /frr-isisd:isis/instance/lsp/generation-interval/level-1 + * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/generation-interval */ int isis_instance_lsp_generation_interval_level_1_modify( enum nb_event event, const struct lyd_node *dnode, @@ -471,7 +471,7 @@ int isis_instance_lsp_generation_interval_level_1_modify( } /* - * XPath: /frr-isisd:isis/instance/lsp/generation-interval/level-2 + * XPath: /frr-isisd:isis/instance/lsp/timers/level-2/generation-interval */ int isis_instance_lsp_generation_interval_level_2_modify( enum nb_event event, const struct lyd_node *dnode, diff --git a/isisd/isis_pdu.c b/isisd/isis_pdu.c index cc22aa5ffd..9153512623 100644 --- a/isisd/isis_pdu.c +++ b/isisd/isis_pdu.c @@ -1652,7 +1652,7 @@ int isis_handle_pdu(struct isis_circuit *circuit, uint8_t *ssnpa) if (length != expected_length) { flog_err(EC_ISIS_PACKET, - "Exepected fixed header length = %" PRIu8 + "Expected fixed header length = %" PRIu8 " but got %" PRIu8, expected_length, length); return ISIS_ERROR; diff --git a/isisd/isis_spf_private.h b/isisd/isis_spf_private.h index a8185a8be0..05aae14b94 100644 --- a/isisd/isis_spf_private.h +++ b/isisd/isis_spf_private.h @@ -117,11 +117,11 @@ static bool isis_vertex_queue_hash_cmp(const void *a, const void *b) * Compares vertizes for sorting in the TENT list. Returns true * if candidate should be considered before current, false otherwise. */ -__attribute__((__unused__)) -static int isis_vertex_queue_tent_cmp(void *a, void *b) +__attribute__((__unused__)) static int isis_vertex_queue_tent_cmp(const void *a, + const void *b) { - struct isis_vertex *va = a; - struct isis_vertex *vb = b; + const struct isis_vertex *va = a; + const struct isis_vertex *vb = b; if (va->d_N < vb->d_N) return -1; diff --git a/isisd/isis_tlvs.c b/isisd/isis_tlvs.c index df6280e5c3..5b0b709206 100644 --- a/isisd/isis_tlvs.c +++ b/isisd/isis_tlvs.c @@ -2266,7 +2266,7 @@ static int unpack_tlv_spine_leaf(enum isis_tlv_context context, sbuf_push(log, indent, "Unpacking Spine Leaf Extension TLV...\n"); if (tlv_len < 2) { - sbuf_push(log, indent, "WARNING: Unexepected TLV size\n"); + sbuf_push(log, indent, "WARNING: Unexpected TLV size\n"); stream_forward_getp(s, tlv_len); return 0; } @@ -2382,7 +2382,7 @@ static int unpack_tlv_threeway_adj(enum isis_tlv_context context, sbuf_push(log, indent, "Unpacking P2P Three-Way Adjacency TLV...\n"); if (tlv_len != 5 && tlv_len != 15) { - sbuf_push(log, indent, "WARNING: Unexepected TLV size\n"); + sbuf_push(log, indent, "WARNING: Unexpected TLV size\n"); stream_forward_getp(s, tlv_len); return 0; } diff --git a/isisd/isis_tx_queue.c b/isisd/isis_tx_queue.c index 507fd489bc..27e57db16c 100644 --- a/isisd/isis_tx_queue.c +++ b/isisd/isis_tx_queue.c @@ -3,7 +3,7 @@ * * Copyright (C) 2018 Christian Franke * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/isisd/isis_tx_queue.h b/isisd/isis_tx_queue.h index c2beda45b7..f0f1184d58 100644 --- a/isisd/isis_tx_queue.h +++ b/isisd/isis_tx_queue.h @@ -3,7 +3,7 @@ * * Copyright (C) 2018 Christian Franke * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/isisd/isis_vty_fabricd.c b/isisd/isis_vty_fabricd.c index 88f7337a91..09b8d28258 100644 --- a/isisd/isis_vty_fabricd.c +++ b/isisd/isis_vty_fabricd.c @@ -115,6 +115,7 @@ DEFUN (no_triggered_csnp, static void lsp_print_flooding(struct vty *vty, struct isis_lsp *lsp) { char lspid[255]; + char buf[MONOTIME_STRLEN]; lspid_print(lsp->hdr.lsp_id, lspid, true, true); vty_out(vty, "Flooding information for %s\n", lspid); @@ -129,21 +130,10 @@ static void lsp_print_flooding(struct vty *vty, struct isis_lsp *lsp) lsp->flooding_interface : "(null)"); time_t uptime = time(NULL) - lsp->flooding_time; - struct tm tm; - gmtime_r(&uptime, &tm); + frrtime_to_interval(uptime, buf, sizeof(buf)); - if (uptime < ONE_DAY_SECOND) - vty_out(vty, "%02d:%02d:%02d", tm.tm_hour, tm.tm_min, - tm.tm_sec); - else if (uptime < ONE_WEEK_SECOND) - vty_out(vty, "%dd%02dh%02dm", tm.tm_yday, tm.tm_hour, - tm.tm_min); - else - vty_out(vty, "%02dw%dd%02dh", tm.tm_yday / 7, - tm.tm_yday - ((tm.tm_yday / 7) * 7), - tm.tm_hour); - vty_out(vty, " ago)\n"); + vty_out(vty, "%s ago)\n", buf); if (lsp->flooding_circuit_scoped) { vty_out(vty, " Received as circuit-scoped LSP, so not " diff --git a/isisd/isisd.c b/isisd/isisd.c index 47d2e9faab..c61c7f0e0c 100644 --- a/isisd/isisd.c +++ b/isisd/isisd.c @@ -137,17 +137,17 @@ struct isis_area *isis_area_create(const char *area_tag) enum isis_metric_style default_style; area->max_lsp_lifetime[0] = yang_get_default_uint16( - "/frr-isisd:isis/instance/lsp/maximum-lifetime/level-1"); + "/frr-isisd:isis/instance/lsp/timers/level-1/maximum-lifetime"); area->max_lsp_lifetime[1] = yang_get_default_uint16( - "/frr-isisd:isis/instance/lsp/maximum-lifetime/level-2"); + "/frr-isisd:isis/instance/lsp/timers/level-2/maximum-lifetime"); area->lsp_refresh[0] = yang_get_default_uint16( - "/frr-isisd:isis/instance/lsp/refresh-interval/level-1"); + "/frr-isisd:isis/instance/lsp/timers/level-1/refresh-interval"); area->lsp_refresh[1] = yang_get_default_uint16( - "/frr-isisd:isis/instance/lsp/refresh-interval/level-2"); + "/frr-isisd:isis/instance/lsp/timers/level-2/refresh-interval"); area->lsp_gen_interval[0] = yang_get_default_uint16( - "/frr-isisd:isis/instance/lsp/generation-interval/level-1"); + "/frr-isisd:isis/instance/lsp/timers/level-1/generation-interval"); area->lsp_gen_interval[1] = yang_get_default_uint16( - "/frr-isisd:isis/instance/lsp/generation-interval/level-2"); + "/frr-isisd:isis/instance/lsp/timers/level-2/generation-interval"); area->min_spf_interval[0] = yang_get_default_uint16( "/frr-isisd:isis/instance/spf/minimum-interval/level-1"); area->min_spf_interval[1] = yang_get_default_uint16( diff --git a/ldpd/lde.c b/ldpd/lde.c index 5f94031320..ae883078dd 100644 --- a/ldpd/lde.c +++ b/ldpd/lde.c @@ -61,6 +61,8 @@ static void lde_label_list_init(void); static int lde_get_label_chunk(void); static void on_get_label_chunk_response(uint32_t start, uint32_t end); static uint32_t lde_get_next_label(void); +static bool lde_fec_connected(const struct fec_node *); +static bool lde_fec_outside_mpls_network(const struct fec_node *); RB_GENERATE(nbr_tree, lde_nbr, entry, lde_nbr_compare) RB_GENERATE(lde_map_head, lde_map, entry, lde_map_compare) @@ -658,18 +660,31 @@ lde_acl_check(char *acl_name, int af, union ldpd_addr *addr, uint8_t prefixlen) return ldp_acl_request(iev_main_sync, acl_name, af, addr, prefixlen); } +static bool lde_fec_connected(const struct fec_node *fn) +{ + struct fec_nh *fnh; + + LIST_FOREACH(fnh, &fn->nexthops, entry) + if (fnh->flags & F_FEC_NH_CONNECTED) + return true; + + return false; +} + +static bool lde_fec_outside_mpls_network(const struct fec_node *fn) +{ + struct fec_nh *fnh; + + LIST_FOREACH(fnh, &fn->nexthops, entry) + if (!(fnh->flags & F_FEC_NH_NO_LDP)) + return false; + + return true; +} + uint32_t lde_update_label(struct fec_node *fn) { - struct fec_nh *fnh; - int connected = 0; - - LIST_FOREACH(fnh, &fn->nexthops, entry) { - if (fnh->flags & F_FEC_NH_CONNECTED) { - connected = 1; - break; - } - } /* should we allocate a label for this fec? */ switch (fn->fec.type) { @@ -695,7 +710,14 @@ lde_update_label(struct fec_node *fn) break; } - if (connected) { + /* + * If connected interface act as egress for fec. + * If LDP is not configured on an interface but there + * are other NHs with interfaces configured with LDP + * then don't act as an egress for the fec, otherwise + * act as an egress for the fec + */ + if (lde_fec_connected(fn) || lde_fec_outside_mpls_network(fn)) { /* choose implicit or explicit-null depending on configuration */ switch (fn->fec.type) { case FEC_TYPE_IPV4: @@ -735,6 +757,13 @@ lde_send_change_klabel(struct fec_node *fn, struct fec_nh *fnh) struct zapi_pw zpw; struct l2vpn_pw *pw; + /* + * Ordered Control: don't program label into HW until a + * labelmap msg has been received from upstream router + */ + if (fnh->flags & F_FEC_NH_DEFER) + return; + switch (fn->fec.type) { case FEC_TYPE_IPV4: memset(&kr, 0, sizeof(kr)); @@ -901,6 +930,27 @@ lde_send_labelmapping(struct lde_nbr *ln, struct fec_node *fn, int single) struct lde_req *lre; struct map map; struct l2vpn_pw *pw; + struct fec_nh *fnh; + bool allow = false; + + /* + * Ordered Control: do not send a labelmap msg until + * a labelmap message is received from downstream router + * and don't send labelmap back to downstream router + */ + if (ldeconf->flags & F_LDPD_ORDERED_CONTROL) { + LIST_FOREACH(fnh, &fn->nexthops, entry) { + if (fnh->flags & F_FEC_NH_DEFER) + continue; + + if (lde_address_find(ln, fnh->af, &fnh->nexthop)) + return; + allow = true; + break; + } + if (!allow) + return; + } /* * We shouldn't send a new label mapping if we have a pending @@ -1241,6 +1291,7 @@ lde_nbr_del(struct lde_nbr *ln) struct fec_node *fn; struct fec_nh *fnh; struct l2vpn_pw *pw; + struct lde_nbr *lnbr; if (ln == NULL) return; @@ -1256,6 +1307,25 @@ lde_nbr_del(struct lde_nbr *ln) if (!lde_address_find(ln, fnh->af, &fnh->nexthop)) continue; + + /* + * Ordered Control: must mark any non-connected + * NH to wait until we receive a labelmap msg + * before installing in kernel and sending to + * peer, must do this as NHs are not removed + * when lsps go down. Also send label withdraw + * to other neighbors for all fecs from neighbor + * going down + */ + if (ldeconf->flags & F_LDPD_ORDERED_CONTROL) { + fnh->flags |= F_FEC_NH_DEFER; + + RB_FOREACH(lnbr, nbr_tree, &lde_nbrs) { + if (ln->peerid == lnbr->peerid) + continue; + lde_send_labelwithdraw(lnbr, fn, NULL, NULL); + } + } break; case FEC_TYPE_PWID: if (f->u.pwid.lsr_id.s_addr != ln->id.s_addr) @@ -1567,6 +1637,56 @@ lde_change_egress_label(int af) NULL, 0); } +void +lde_change_host_label(int af) +{ + struct lde_nbr *ln; + struct fec *f; + struct fec_node *fn; + uint32_t new_label; + + RB_FOREACH(f, fec_tree, &ft) { + fn = (struct fec_node *)f; + + switch (af) { + case AF_INET: + if (fn->fec.type != FEC_TYPE_IPV4) + continue; + break; + case AF_INET6: + if (fn->fec.type != FEC_TYPE_IPV6) + continue; + break; + default: + fatalx("lde_change_host_label: unknown af"); + } + + /* + * If the local label has changed to NO_LABEL, send a label + * withdraw to all peers. + * If the local label has changed and it's different from + * NO_LABEL, send a label mapping to all peers advertising + * the new label. + * If the local label hasn't changed, do nothing + */ + new_label = lde_update_label(fn); + if (fn->local_label != new_label) { + if (new_label == NO_LABEL) + RB_FOREACH(ln, nbr_tree, &lde_nbrs) + lde_send_labelwithdraw(ln, fn, + NULL, NULL); + + fn->local_label = new_label; + if (fn->local_label != NO_LABEL) + RB_FOREACH(ln, nbr_tree, &lde_nbrs) + lde_send_labelmapping(ln, fn, 0); + } + } + RB_FOREACH(ln, nbr_tree, &lde_nbrs) + lde_imsg_compose_ldpe(IMSG_MAPPING_ADD_END, ln->peerid, 0, + NULL, 0); +} + static int lde_address_add(struct lde_nbr *ln, struct lde_addr *lde_addr) { @@ -1628,8 +1748,11 @@ lde_address_list_free(struct lde_nbr *ln) static void zclient_sync_init(unsigned short instance) { + struct zclient_options options = zclient_options_default; + options.synchronous = true; + /* Initialize special zclient for synchronous message exchanges. */ - zclient_sync = zclient_new(master, &zclient_options_default); + zclient_sync = zclient_new(master, &options); zclient_sync->sock = -1; zclient_sync->redist_default = ZEBRA_ROUTE_LDP; zclient_sync->instance = instance; @@ -1642,6 +1765,12 @@ static void zclient_sync_init(unsigned short instance) /* make socket non-blocking */ sock_set_nonblock(zclient_sync->sock); + /* Send hello to notify zebra this is a synchronous client */ + while (zclient_send_hello(zclient_sync) < 0) { + log_warnx("Error sending hello for synchronous zclient!"); + sleep(1); + } + /* Connect to label manager */ while (lm_label_manager_connect(zclient_sync, 0) != 0) { log_warnx("Error connecting to label manager!"); diff --git a/ldpd/lde.h b/ldpd/lde.h index ce466c16b9..36196a3d08 100644 --- a/ldpd/lde.h +++ b/ldpd/lde.h @@ -114,6 +114,8 @@ struct fec_nh { }; #define F_FEC_NH_NEW 0x01 #define F_FEC_NH_CONNECTED 0x02 +#define F_FEC_NH_DEFER 0x04 /* running ordered control */ +#define F_FEC_NH_NO_LDP 0x08 /* no ldp on this interface */ struct fec_node { struct fec fec; @@ -181,6 +183,7 @@ void lde_req_del(struct lde_nbr *, struct lde_req *, int); struct lde_wdraw *lde_wdraw_add(struct lde_nbr *, struct fec_node *); void lde_wdraw_del(struct lde_nbr *, struct lde_wdraw *); void lde_change_egress_label(int); +void lde_change_host_label(int); struct lde_addr *lde_address_find(struct lde_nbr *, int, union ldpd_addr *); diff --git a/ldpd/lde_lib.c b/ldpd/lde_lib.c index eb1a6d9434..8f524e0aa9 100644 --- a/ldpd/lde_lib.c +++ b/ldpd/lde_lib.c @@ -20,6 +20,7 @@ #include <zebra.h> #include "ldpd.h" +#include "ldpe.h" #include "lde.h" #include "log.h" @@ -325,6 +326,7 @@ lde_kernel_insert(struct fec *fec, int af, union ldpd_addr *nexthop, { struct fec_node *fn; struct fec_nh *fnh; + struct iface *iface; fn = (struct fec_node *)fec_find(&ft, fec); if (fn == NULL) @@ -333,9 +335,21 @@ lde_kernel_insert(struct fec *fec, int af, union ldpd_addr *nexthop, fn->data = data; fnh = fec_nh_find(fn, af, nexthop, ifindex, route_type, route_instance); - if (fnh == NULL) + if (fnh == NULL) { fnh = fec_nh_add(fn, af, nexthop, ifindex, route_type, route_instance); + /* + * Ordered Control: if not a connected route and not a route + * learned over an interface not running LDP and not a PW + * then mark to wait until we receive labelmap msg before + * installing in kernel and sending to peer + */ + iface = if_lookup(ldeconf, ifindex); + if ((ldeconf->flags & F_LDPD_ORDERED_CONTROL) && + !connected && iface != NULL && fec->type != FEC_TYPE_PWID) + fnh->flags |= F_FEC_NH_DEFER; + } + fnh->flags |= F_FEC_NH_NEW; if (connected) fnh->flags |= F_FEC_NH_CONNECTED; @@ -374,15 +388,25 @@ lde_kernel_update(struct fec *fec) struct fec_nh *fnh, *safe; struct lde_nbr *ln; struct lde_map *me; + struct iface *iface; fn = (struct fec_node *)fec_find(&ft, fec); if (fn == NULL) return; LIST_FOREACH_SAFE(fnh, &fn->nexthops, entry, safe) { - if (fnh->flags & F_FEC_NH_NEW) + if (fnh->flags & F_FEC_NH_NEW) { fnh->flags &= ~F_FEC_NH_NEW; - else { + /* + * if LDP configured on interface or a static route + * clear flag else treat fec as a connected route + */ + iface = if_lookup(ldeconf,fnh->ifindex); + if (iface || fnh->route_type == ZEBRA_ROUTE_STATIC) + fnh->flags &=~F_FEC_NH_NO_LDP; + else + fnh->flags |= F_FEC_NH_NO_LDP; + } else { lde_send_delete_klabel(fn, fnh); fec_nh_del(fnh); } @@ -445,6 +469,7 @@ lde_check_mapping(struct map *map, struct lde_nbr *ln) struct lde_req *lre; struct lde_map *me; struct l2vpn_pw *pw; + bool send_map = false; lde_map2fec(map, ln->id, &fec); @@ -525,6 +550,15 @@ lde_check_mapping(struct map *map, struct lde_nbr *ln) if (!lde_address_find(ln, fnh->af, &fnh->nexthop)) continue; + /* + * Ordered Control: labelmap msg received from + * NH so clear flag and send labelmap msg to + * peer + */ + if (ldeconf->flags & F_LDPD_ORDERED_CONTROL) { + send_map = true; + fnh->flags &= ~F_FEC_NH_DEFER; + } fnh->remote_label = map->label; lde_send_change_klabel(fn, fnh); break; @@ -558,6 +592,15 @@ lde_check_mapping(struct map *map, struct lde_nbr *ln) * loop detection. LMp.28 - LMp.30 are unnecessary because we are * merging capable. */ + + /* + * Ordered Control: just received a labelmap for this fec from NH so + * need to send labelmap to all peers + * LMp.20 - LMp21 Execute procedure to send Label Mapping + */ + if (send_map && fn->local_label != NO_LABEL) + RB_FOREACH(ln, nbr_tree, &lde_nbrs) + lde_send_labelmapping(ln, fn, 1); } void @@ -757,6 +800,7 @@ lde_check_withdraw(struct map *map, struct lde_nbr *ln) struct fec_nh *fnh; struct lde_map *me; struct l2vpn_pw *pw; + struct lde_nbr *lnbr; /* wildcard label withdraw */ if (map->type == MAP_TYPE_WILDCARD || @@ -803,6 +847,26 @@ lde_check_withdraw(struct map *map, struct lde_nbr *ln) if (me && (map->label == NO_LABEL || map->label == me->map.label)) /* LWd.4: remove record of previously received lbl mapping */ lde_map_del(ln, me, 0); + + /* Ordered Control: additional withdraw steps */ + if (ldeconf->flags & F_LDPD_ORDERED_CONTROL) { + /* LWd.8: for each neighbor other that src of withdraw msg */ + RB_FOREACH(lnbr, nbr_tree, &lde_nbrs) { + if (ln->peerid == lnbr->peerid) + continue; + + /* LWd.9: check if previously sent a label mapping */ + me = (struct lde_map *)fec_find(&lnbr->sent_map, + &fn->fec); + /* + * LWd.10: does label sent to peer "map" to withdraw + * label + */ + if (me) + /* LWd.11: send label withdraw */ + lde_send_labelwithdraw(lnbr, fn, NULL, NULL); + } + } } void @@ -813,6 +877,7 @@ lde_check_withdraw_wcard(struct map *map, struct lde_nbr *ln) struct fec_nh *fnh; struct lde_map *me; struct l2vpn_pw *pw; + struct lde_nbr *lnbr; /* LWd.2: send label release */ lde_send_labelrelease(ln, NULL, map, map->label); @@ -859,6 +924,26 @@ lde_check_withdraw_wcard(struct map *map, struct lde_nbr *ln) * label mapping */ lde_map_del(ln, me, 0); + + /* Ordered Control: additional withdraw steps */ + if (ldeconf->flags & F_LDPD_ORDERED_CONTROL) { + /* LWd.8: for each neighbor other that src of withdraw msg */ + RB_FOREACH(lnbr, nbr_tree, &lde_nbrs) { + if (ln->peerid == lnbr->peerid) + continue; + + /* LWd.9: check if previously sent a label mapping */ + me = (struct lde_map *)fec_find(&lnbr->sent_map, + &fn->fec); + /* + * LWd.10: does label sent to peer "map" to withdraw + * label + */ + if (me) + /* LWd.11: send label withdraw */ + lde_send_labelwithdraw(lnbr, fn, NULL, NULL); + } + } } } diff --git a/ldpd/ldp_vty.h b/ldpd/ldp_vty.h index 5e9df4aafe..af5f1d5616 100644 --- a/ldpd/ldp_vty.h +++ b/ldpd/ldp_vty.h @@ -52,6 +52,7 @@ int ldp_vty_label_expnull(struct vty *, const char *, const char *); int ldp_vty_label_accept(struct vty *, const char *, const char *, const char *); int ldp_vty_ttl_security(struct vty *, const char *); int ldp_vty_router_id(struct vty *, const char *, struct in_addr); +int ldp_vty_ordered_control(struct vty *, const char *); int ldp_vty_ds_cisco_interop(struct vty *, const char *); int ldp_vty_trans_pref_ipv4(struct vty *, const char *); int ldp_vty_neighbor_password(struct vty *, const char *, struct in_addr, const char *); diff --git a/ldpd/ldp_vty_cmds.c b/ldpd/ldp_vty_cmds.c index c24e1917cc..c10c6ae35c 100644 --- a/ldpd/ldp_vty_cmds.c +++ b/ldpd/ldp_vty_cmds.c @@ -221,6 +221,15 @@ DEFPY (ldp_router_id, return (ldp_vty_router_id(vty, no, address)); } +DEFPY (ldp_ordered_control, + ldp_ordered_control_cmd, + "[no] ordered-control", + NO_STR + "Configure LDP ordered label distribution control mode\n") +{ + return (ldp_vty_ordered_control(vty, no)); +} + DEFPY (ldp_discovery_targeted_hello_accept, ldp_discovery_targeted_hello_accept_cmd, "[no] discovery targeted-hello accept [from <(1-199)|(1300-2699)|WORD>$from_acl]", @@ -807,6 +816,7 @@ ldp_vty_init (void) install_element(LDP_NODE, &ldp_neighbor_session_holdtime_cmd); install_element(LDP_NODE, &ldp_neighbor_ttl_security_cmd); install_element(LDP_NODE, &ldp_router_id_cmd); + install_element(LDP_NODE, &ldp_ordered_control_cmd); install_element(LDP_IPV4_NODE, &ldp_discovery_link_holdtime_cmd); install_element(LDP_IPV4_NODE, &ldp_discovery_targeted_holdtime_cmd); diff --git a/ldpd/ldp_vty_conf.c b/ldpd/ldp_vty_conf.c index 816fcc64b8..05b8962563 100644 --- a/ldpd/ldp_vty_conf.c +++ b/ldpd/ldp_vty_conf.c @@ -278,6 +278,9 @@ ldp_config_write(struct vty *vty) if (ldpd_conf->flags & F_LDPD_DS_CISCO_INTEROP) vty_out (vty, " dual-stack cisco-interop\n"); + if (ldpd_conf->flags & F_LDPD_ORDERED_CONTROL) + vty_out (vty, " ordered-control\n"); + RB_FOREACH(nbrp, nbrp_head, &ldpd_conf->nbrp_tree) { if (nbrp->flags & F_NBRP_KEEPALIVE) vty_out (vty, " neighbor %s session holdtime %u\n", @@ -997,6 +1000,19 @@ ldp_vty_router_id(struct vty *vty, const char *negate, struct in_addr address) } int +ldp_vty_ordered_control(struct vty *vty, const char *negate) +{ + if (negate) + vty_conf->flags &= ~F_LDPD_ORDERED_CONTROL; + else + vty_conf->flags |= F_LDPD_ORDERED_CONTROL; + + ldp_config_apply(vty, vty_conf); + + return (CMD_SUCCESS); +} + +int ldp_vty_ds_cisco_interop(struct vty *vty, const char * negate) { if (negate) diff --git a/ldpd/ldpd.c b/ldpd/ldpd.c index 78b1c3e544..741c8c4655 100644 --- a/ldpd/ldpd.c +++ b/ldpd/ldpd.c @@ -1285,6 +1285,14 @@ merge_global(struct ldpd_conf *conf, struct ldpd_conf *xconf) conf->rtr_id = xconf->rtr_id; } + /* + * Configuration of ordered-control or independent-control + * requires resetting all neighborships. + */ + if ((conf->flags & F_LDPD_ORDERED_CONTROL) != + (xconf->flags & F_LDPD_ORDERED_CONTROL)) + ldpe_reset_nbrs(AF_UNSPEC); + conf->lhello_holdtime = xconf->lhello_holdtime; conf->lhello_interval = xconf->lhello_interval; conf->thello_holdtime = xconf->thello_holdtime; @@ -1311,6 +1319,7 @@ merge_af(int af, struct ldpd_af_conf *af_conf, struct ldpd_af_conf *xa) int stop_init_backoff = 0; int remove_dynamic_tnbrs = 0; int change_egress_label = 0; + int change_host_label = 0; int reset_nbrs_ipv4 = 0; int reset_nbrs = 0; int update_sockets = 0; @@ -1341,6 +1350,12 @@ merge_af(int af, struct ldpd_af_conf *af_conf, struct ldpd_af_conf *xa) if ((af_conf->flags & F_LDPD_AF_EXPNULL) != (xa->flags & F_LDPD_AF_EXPNULL)) change_egress_label = 1; + + /* changing config of host only fec filtering */ + if ((af_conf->flags & F_LDPD_AF_ALLOCHOSTONLY) + != (xa->flags & F_LDPD_AF_ALLOCHOSTONLY)) + change_host_label = 1; + af_conf->flags = xa->flags; /* update the transport address */ @@ -1350,6 +1365,10 @@ merge_af(int af, struct ldpd_af_conf *af_conf, struct ldpd_af_conf *xa) } /* update ACLs */ + if (strcmp(af_conf->acl_label_allocate_for, + xa->acl_label_allocate_for)) + change_host_label = 1; + if (strcmp(af_conf->acl_label_advertise_to, xa->acl_label_advertise_to) || strcmp(af_conf->acl_label_advertise_for, @@ -1383,6 +1402,8 @@ merge_af(int af, struct ldpd_af_conf *af_conf, struct ldpd_af_conf *xa) case PROC_LDE_ENGINE: if (change_egress_label) lde_change_egress_label(af); + if (change_host_label) + lde_change_host_label(af); break; case PROC_LDP_ENGINE: if (stop_init_backoff) diff --git a/ldpd/ldpd.h b/ldpd/ldpd.h index 006780f032..a736b4ca37 100644 --- a/ldpd/ldpd.h +++ b/ldpd/ldpd.h @@ -511,6 +511,8 @@ DECLARE_QOBJ_TYPE(ldpd_conf) #define F_LDPD_NO_FIB_UPDATE 0x0001 #define F_LDPD_DS_CISCO_INTEROP 0x0002 #define F_LDPD_ENABLED 0x0004 +#define F_LDPD_ORDERED_CONTROL 0x0008 + struct ldpd_af_global { struct thread *disc_ev; diff --git a/ldpd/socket.c b/ldpd/socket.c index 4909ea7ad8..e865707d44 100644 --- a/ldpd/socket.c +++ b/ldpd/socket.c @@ -320,7 +320,7 @@ sock_set_md5sig(int fd, int af, union ldpd_addr *addr, const char *password) int sock_set_ipv4_tos(int fd, int tos) { - if (setsockopt(fd, IPPROTO_IP, IP_TOS, (int *)&tos, sizeof(tos)) < 0) { + if (setsockopt(fd, IPPROTO_IP, IP_TOS, &tos, sizeof(tos)) < 0) { log_warn("%s: error setting IP_TOS to 0x%x", __func__, tos); return (-1); } diff --git a/lib/agg_table.h b/lib/agg_table.h index 40ffe8c755..e98476f1b7 100644 --- a/lib/agg_table.h +++ b/lib/agg_table.h @@ -86,13 +86,13 @@ static inline struct agg_node *agg_route_next(struct agg_node *node) } static inline struct agg_node *agg_node_get(struct agg_table *table, - struct prefix *p) + const struct prefix *p) { return agg_node_from_rnode(route_node_get(table->route_table, p)); } static inline struct agg_node * -agg_node_lookup(const struct agg_table *const table, struct prefix *p) +agg_node_lookup(const struct agg_table *const table, const struct prefix *p) { return agg_node_from_rnode(route_node_lookup(table->route_table, p)); } @@ -109,7 +109,7 @@ static inline struct agg_node *agg_route_next_until(struct agg_node *node, } static inline struct agg_node *agg_node_match(struct agg_table *table, - struct prefix *p) + const struct prefix *p) { return agg_node_from_rnode(route_node_match(table->route_table, p)); } @@ -155,6 +155,16 @@ static inline struct agg_table *agg_get_table(struct agg_node *node) return (struct agg_table *)route_table_get_info(node->table); } +static inline const struct prefix * +agg_node_get_prefix(const struct agg_node *node) +{ + return &node->p; +} + +#ifdef _FRR_ATTRIBUTE_PRINTFRR +#pragma FRR printfrr_ext "%pRN" (struct agg_node *) +#endif + #ifdef __cplusplus } #endif diff --git a/lib/command_match.c b/lib/command_match.c index 0195aebc17..801b05f157 100644 --- a/lib/command_match.c +++ b/lib/command_match.c @@ -88,7 +88,7 @@ enum matcher_rv command_match(struct graph *cmdgraph, vector vline, // prepend a dummy token to match that pesky start node vector vvline = vector_init(vline->alloced + 1); - vector_set_index(vvline, 0, (void *)XSTRDUP(MTYPE_TMP, "dummy")); + vector_set_index(vvline, 0, XSTRDUP(MTYPE_TMP, "dummy")); memcpy(vvline->index + 1, vline->index, sizeof(void *) * vline->alloced); vvline->active = vline->active + 1; diff --git a/lib/compiler.h b/lib/compiler.h index e430925e69..217a60d888 100644 --- a/lib/compiler.h +++ b/lib/compiler.h @@ -305,7 +305,14 @@ extern "C" { #include <inttypes.h> #ifdef _FRR_ATTRIBUTE_PRINTFRR -#define PRINTFRR(a, b) __attribute__((printfrr(a, b))) +#define PRINTFRR(a, b) __attribute__((frr_format("frr_printf", a, b))) + +#undef PRIu64 +#undef PRId64 +#undef PRIx64 +#define PRIu64 "Lu" +#define PRId64 "Ld" +#define PRIx64 "Lx" #else /* !_FRR_ATTRIBUTE_PRINTFRR */ #define PRINTFRR(a, b) __attribute__((format(printf, a, b))) diff --git a/lib/frrlua.c b/lib/frrlua.c index 26610556dc..9f9cf8c1f6 100644 --- a/lib/frrlua.c +++ b/lib/frrlua.c @@ -5,7 +5,7 @@ * Copyright (C) 2016 Cumulus Networks, Inc. * Donald Sharp * - * This file is part of FreeRangeRouting (FRR). + * This file is part of FRRouting (FRR). * * FRR is free software; you can redistribute it and/or modify it under the * terms of the GNU General Public License as published by the Free Software diff --git a/lib/frrlua.h b/lib/frrlua.h index 374eb70311..40c7a67b89 100644 --- a/lib/frrlua.h +++ b/lib/frrlua.h @@ -5,7 +5,7 @@ * Copyright (C) 2016 Cumulus Networks, Inc. * Donald Sharp * - * This file is part of FreeRangeRouting (FRR). + * This file is part of FRRouting (FRR). * * FRR is free software; you can redistribute it and/or modify it under the * terms of the GNU General Public License as published by the Free Software diff --git a/lib/grammar_sandbox_main.c b/lib/grammar_sandbox_main.c index 4bd8f5138a..aa54720dab 100644 --- a/lib/grammar_sandbox_main.c +++ b/lib/grammar_sandbox_main.c @@ -7,7 +7,7 @@ * Copyright (C) 2016 Cumulus Networks, Inc. * Copyright (C) 2017 David Lamparter for NetDEF, Inc. * - * This file is part of FreeRangeRouting (FRR). + * This file is part of FRRouting (FRR). * * FRR is free software; you can redistribute it and/or modify it under the * terms of the GNU General Public License as published by the Free Software @@ -58,7 +58,7 @@ int main(int argc, char **argv) vty_init(master, true); lib_cmd_init(); - yang_init(); + yang_init(true); nb_init(master, NULL, 0); vty_stdio(vty_do_exit); @@ -582,23 +582,39 @@ struct interface *if_get_by_ifindex(ifindex_t ifindex, vrf_id_t vrf_id) return NULL; } -void if_set_index(struct interface *ifp, ifindex_t ifindex) +int if_set_index(struct interface *ifp, ifindex_t ifindex) { struct vrf *vrf; + if (ifp->ifindex == ifindex) + return 0; + vrf = vrf_get(ifp->vrf_id, NULL); assert(vrf); - if (ifp->ifindex == ifindex) - return; + /* + * If there is already an interface with this ifindex, we will collide + * on insertion, so don't even try. + */ + if (if_lookup_by_ifindex(ifindex, ifp->vrf_id)) + return -1; if (ifp->ifindex != IFINDEX_INTERNAL) IFINDEX_RB_REMOVE(vrf, ifp); ifp->ifindex = ifindex; - if (ifp->ifindex != IFINDEX_INTERNAL) - IFINDEX_RB_INSERT(vrf, ifp) + if (ifp->ifindex != IFINDEX_INTERNAL) { + /* + * This should never happen, since we checked if there was + * already an interface with the desired ifindex at the top of + * the function. Nevertheless. + */ + if (IFINDEX_RB_INSERT(vrf, ifp)) + return -1; + } + + return 0; } void if_set_name(struct interface *ifp, const char *name) @@ -1249,8 +1265,6 @@ struct if_link_params *if_link_params_get(struct interface *ifp) struct if_link_params *iflp = XCALLOC(MTYPE_IF_LINK_PARAMS, sizeof(struct if_link_params)); - if (iflp == NULL) - return NULL; /* Set TE metric equal to standard metric */ iflp->te_metric = ifp->metric; @@ -1278,8 +1292,6 @@ struct if_link_params *if_link_params_get(struct interface *ifp) void if_link_params_free(struct interface *ifp) { - if (ifp->link_params == NULL) - return; XFREE(MTYPE_IF_LINK_PARAMS, ifp->link_params); } @@ -1657,31 +1669,7 @@ static int lib_interface_description_destroy(enum nb_event event, /* clang-format off */ -#if defined(__GNUC__) && ((__GNUC__ - 0) < 5) && !defined(__clang__) -/* gcc versions before 5.x miscalculate the size for structs with variable - * length arrays (they just count it as size 0) - */ -struct frr_yang_module_info_size3 { - /* YANG module name. */ - const char *name; - - /* Northbound callbacks. */ - const struct { - /* Data path of this YANG node. */ - const char *xpath; - - /* Callbacks implemented for this node. */ - struct nb_callbacks cbs; - - /* Priority - lower priorities are processed first. */ - uint32_t priority; - } nodes[3]; -}; - -const struct frr_yang_module_info_size3 frr_interface_info_size3 asm("frr_interface_info") = { -#else const struct frr_yang_module_info frr_interface_info = { -#endif .name = "frr-interface", .nodes = { { @@ -308,33 +308,58 @@ RB_HEAD(if_index_head, interface); RB_PROTOTYPE(if_index_head, interface, index_entry, if_cmp_index_func) DECLARE_QOBJ_TYPE(interface) -#define IFNAME_RB_INSERT(vrf, ifp) \ - if (RB_INSERT(if_name_head, &vrf->ifaces_by_name, (ifp))) \ - flog_err(EC_LIB_INTERFACE, \ - "%s(%s): corruption detected -- interface with this " \ - "name exists already in VRF %u!", \ - __func__, (ifp)->name, (ifp)->vrf_id); - -#define IFNAME_RB_REMOVE(vrf, ifp) \ - if (RB_REMOVE(if_name_head, &vrf->ifaces_by_name, (ifp)) == NULL) \ - flog_err(EC_LIB_INTERFACE, \ - "%s(%s): corruption detected -- interface with this " \ - "name doesn't exist in VRF %u!", \ - __func__, (ifp)->name, (ifp)->vrf_id); - -#define IFINDEX_RB_INSERT(vrf, ifp) \ - if (RB_INSERT(if_index_head, &vrf->ifaces_by_index, (ifp))) \ - flog_err(EC_LIB_INTERFACE, \ - "%s(%u): corruption detected -- interface with this " \ - "ifindex exists already in VRF %u!", \ - __func__, (ifp)->ifindex, (ifp)->vrf_id); - -#define IFINDEX_RB_REMOVE(vrf, ifp) \ - if (RB_REMOVE(if_index_head, &vrf->ifaces_by_index, (ifp)) == NULL) \ - flog_err(EC_LIB_INTERFACE, \ - "%s(%u): corruption detected -- interface with this " \ - "ifindex doesn't exist in VRF %u!", \ - __func__, (ifp)->ifindex, (ifp)->vrf_id); +#define IFNAME_RB_INSERT(vrf, ifp) \ + ({ \ + struct interface *_iz = \ + RB_INSERT(if_name_head, &vrf->ifaces_by_name, (ifp)); \ + if (_iz) \ + flog_err( \ + EC_LIB_INTERFACE, \ + "%s(%s): corruption detected -- interface with this " \ + "name exists already in VRF %u!", \ + __func__, (ifp)->name, (ifp)->vrf_id); \ + _iz; \ + }) + +#define IFNAME_RB_REMOVE(vrf, ifp) \ + ({ \ + struct interface *_iz = \ + RB_REMOVE(if_name_head, &vrf->ifaces_by_name, (ifp)); \ + if (_iz == NULL) \ + flog_err( \ + EC_LIB_INTERFACE, \ + "%s(%s): corruption detected -- interface with this " \ + "name doesn't exist in VRF %u!", \ + __func__, (ifp)->name, (ifp)->vrf_id); \ + _iz; \ + }) + + +#define IFINDEX_RB_INSERT(vrf, ifp) \ + ({ \ + struct interface *_iz = RB_INSERT( \ + if_index_head, &vrf->ifaces_by_index, (ifp)); \ + if (_iz) \ + flog_err( \ + EC_LIB_INTERFACE, \ + "%s(%u): corruption detected -- interface with this " \ + "ifindex exists already in VRF %u!", \ + __func__, (ifp)->ifindex, (ifp)->vrf_id); \ + _iz; \ + }) + +#define IFINDEX_RB_REMOVE(vrf, ifp) \ + ({ \ + struct interface *_iz = RB_REMOVE( \ + if_index_head, &vrf->ifaces_by_index, (ifp)); \ + if (_iz == NULL) \ + flog_err( \ + EC_LIB_INTERFACE, \ + "%s(%u): corruption detected -- interface with this " \ + "ifindex doesn't exist in VRF %u!", \ + __func__, (ifp)->ifindex, (ifp)->vrf_id); \ + _iz; \ + }) #define FOR_ALL_INTERFACES(vrf, ifp) \ if (vrf) \ @@ -502,7 +527,7 @@ extern struct interface *if_get_by_name(const char *ifname, vrf_id_t vrf_id); extern struct interface *if_get_by_ifindex(ifindex_t ifindex, vrf_id_t vrf_id); /* Sets the index and adds to index list */ -extern void if_set_index(struct interface *ifp, ifindex_t ifindex); +extern int if_set_index(struct interface *ifp, ifindex_t ifindex); /* Sets the name and adds to name list */ extern void if_set_name(struct interface *ifp, const char *name); diff --git a/lib/ipaddr.h b/lib/ipaddr.h index c6372f1abb..cd7f79a04e 100644 --- a/lib/ipaddr.h +++ b/lib/ipaddr.h @@ -112,7 +112,7 @@ static inline void ipv4_to_ipv4_mapped_ipv6(struct in6_addr *in6, /* * convert an ipv4 mapped ipv6 address back to ipv4 address */ -static inline void ipv4_mapped_ipv6_to_ipv4(struct in6_addr *in6, +static inline void ipv4_mapped_ipv6_to_ipv4(const struct in6_addr *in6, struct in_addr *in) { memset(in, 0, sizeof(struct in_addr)); diff --git a/lib/lib_vty.c b/lib/lib_vty.c index 787da08e28..9c927ca4af 100644 --- a/lib/lib_vty.c +++ b/lib/lib_vty.c @@ -93,7 +93,7 @@ static int qmem_walker(void *arg, struct memgroup *mg, struct memtype *mt) #endif ); } else { - if (mt->n_alloc != 0) { + if (mt->n_max != 0) { char size[32]; snprintf(size, sizeof(size), "%6zu", mt->size); #ifdef HAVE_MALLOC_USABLE_SIZE diff --git a/lib/libfrr.c b/lib/libfrr.c index 3622890e46..9a681103d4 100644 --- a/lib/libfrr.c +++ b/lib/libfrr.c @@ -717,7 +717,7 @@ struct thread_master *frr_init(void) log_ref_vty_init(); lib_error_init(); - yang_init(); + yang_init(true); debug_init_cli(); diff --git a/lib/libfrr.h b/lib/libfrr.h index f964c9e2a1..9d91ea9154 100644 --- a/lib/libfrr.h +++ b/lib/libfrr.h @@ -128,7 +128,8 @@ extern void frr_preinit(struct frr_daemon_info *daemon, int argc, char **argv); extern void frr_opt_add(const char *optstr, const struct option *longopts, const char *helpstr); extern int frr_getopt(int argc, char *const argv[], int *longindex); -extern void frr_help_exit(int status); + +extern __attribute__((__noreturn__)) void frr_help_exit(int status); extern struct thread_master *frr_init(void); extern const char *frr_get_progname(void); @@ -1228,59 +1228,47 @@ int proto_redistnum(int afi, const char *s) return -1; } -void zlog_hexdump(const void *mem, unsigned int len) +void zlog_hexdump(const void *mem, size_t len) { - unsigned long i = 0; - unsigned int j = 0; - unsigned int columns = 8; - /* - * 19 bytes for 0xADDRESS: - * 24 bytes for data; 2 chars plus a space per data byte - * 1 byte for space - * 8 bytes for ASCII representation - * 1 byte for a newline - * ===================== - * 53 bytes per 8 bytes of data - * 1 byte for null term - */ - size_t bs = ((len / 8) + 1) * 53 + 1; - char buf[bs]; - char *s = buf; - const unsigned char *memch = mem; - - memset(buf, 0, sizeof(buf)); - - for (i = 0; i < len + ((len % columns) ? (columns - len % columns) : 0); - i++) { - /* print offset */ - if (i % columns == 0) - s += snprintf(s, bs - (s - buf), - "0x%016lx: ", (unsigned long)memch + i); - - /* print hex data */ - if (i < len) - s += snprintf(s, bs - (s - buf), "%02x ", memch[i]); - - /* end of block, just aligning for ASCII dump */ - else - s += snprintf(s, bs - (s - buf), " "); - - /* print ASCII dump */ - if (i % columns == (columns - 1)) { - for (j = i - (columns - 1); j <= i; j++) { - /* end of block not really printing */ - if (j >= len) - s += snprintf(s, bs - (s - buf), " "); - else if (isprint(memch[j])) - s += snprintf(s, bs - (s - buf), "%c", - memch[j]); - else /* other char */ - s += snprintf(s, bs - (s - buf), "."); - } - s += snprintf(s, bs - (s - buf), "\n"); + char line[64]; + const uint8_t *src = mem; + const uint8_t *end = src + len; + + if (len == 0) { + zlog_debug("%016lx: (zero length / no data)", (long)src); + return; + } + + while (src < end) { + struct fbuf fb = { + .buf = line, + .pos = line, + .len = sizeof(line), + }; + const uint8_t *lineend = src + 8; + unsigned line_bytes = 0; + + bprintfrr(&fb, "%016lx: ", (long)src); + + while (src < lineend && src < end) { + bprintfrr(&fb, "%02x ", *src++); + line_bytes++; + } + if (line_bytes < 8) + bprintfrr(&fb, "%*s", (8 - line_bytes) * 3, ""); + + src -= line_bytes; + while (src < lineend && src < end && fb.pos < fb.buf + fb.len) { + uint8_t byte = *src++; + + if (isprint(byte)) + *fb.pos++ = byte; + else + *fb.pos++ = '.'; } + + zlog_debug("%.*s", (int)(fb.pos - fb.buf), fb.buf); } - zlog_debug("\n%s", buf); } const char *zlog_sanitize(char *buf, size_t bufsz, const void *in, size_t inlen) @@ -152,7 +152,7 @@ extern void zlog_backtrace_sigsafe(int priority, void *program_counter); extern size_t quagga_timestamp(int timestamp_precision /* # subsecond digits */, char *buf, size_t buflen); -extern void zlog_hexdump(const void *mem, unsigned int len); +extern void zlog_hexdump(const void *mem, size_t len); extern const char *zlog_sanitize(char *buf, size_t bufsz, const void *in, size_t inlen); @@ -429,7 +429,7 @@ void hmac_md5(unsigned char *text, int text_len, unsigned char *key, * pass */ MD5Update(&context, k_ipad, 64); /* start with inner pad */ MD5Update(&context, text, text_len); /* then text of datagram */ - MD5Final((uint8_t *)digest, &context); /* finish up 1st pass */ + MD5Final(digest, &context); /* finish up 1st pass */ /* * perform outer MD5 */ @@ -438,5 +438,5 @@ void hmac_md5(unsigned char *text, int text_len, unsigned char *key, MD5Update(&context, k_opad, 64); /* start with outer pad */ MD5Update(&context, digest, 16); /* then results of 1st * hash */ - MD5Final((uint8_t *)digest, &context); /* finish up 2nd pass */ + MD5Final(digest, &context); /* finish up 2nd pass */ } diff --git a/lib/memory.h b/lib/memory.h index 44ea19b557..e4e05faa4f 100644 --- a/lib/memory.h +++ b/lib/memory.h @@ -179,7 +179,8 @@ extern int qmem_walk(qmem_walk_fn *func, void *arg); extern int log_memstats(FILE *fp, const char *); #define log_memstats_stderr(prefix) log_memstats(stderr, prefix) -extern void memory_oom(size_t size, const char *name); +extern __attribute__((__noreturn__)) void memory_oom(size_t size, + const char *name); #ifdef __cplusplus } diff --git a/lib/mlag.c b/lib/mlag.c index 733dd41ea8..653fbe8fe9 100644 --- a/lib/mlag.c +++ b/lib/mlag.c @@ -85,9 +85,12 @@ int mlag_lib_decode_mlag_hdr(struct stream *s, struct mlag_msg *msg, size_t *length) { #define LIB_MLAG_HDR_LENGTH 8 + if (s == NULL || msg == NULL) + return -1; + *length = stream_get_endp(s); - if (s == NULL || msg == NULL || *length < LIB_MLAG_HDR_LENGTH) + if (*length < LIB_MLAG_HDR_LENGTH) return -1; *length -= LIB_MLAG_HDR_LENGTH; diff --git a/lib/monotime.h b/lib/monotime.h index e246f177de..dda763784f 100644 --- a/lib/monotime.h +++ b/lib/monotime.h @@ -112,6 +112,26 @@ static inline char *time_to_string(time_t ts, char *buf) return ctime_r(&tbuf, buf); } +/* Convert interval to human-friendly string, used in cli output e.g. */ +static inline const char *frrtime_to_interval(time_t t, char *buf, + size_t buflen) +{ + struct tm tm; + + gmtime_r(&t, &tm); + + if (t < ONE_DAY_SECOND) + snprintf(buf, buflen, "%02d:%02d:%02d", tm.tm_hour, tm.tm_min, + tm.tm_sec); + else if (t < ONE_WEEK_SECOND) + snprintf(buf, buflen, "%dd%02dh%02dm", tm.tm_yday, tm.tm_hour, + tm.tm_min); + else + snprintf(buf, buflen, "%02dw%dd%02dh", tm.tm_yday / 7, + tm.tm_yday - ((tm.tm_yday / 7) * 7), tm.tm_hour); + return buf; +} + #ifdef __cplusplus } #endif diff --git a/lib/mpls.c b/lib/mpls.c index 759fe1206d..ac5792a686 100644 --- a/lib/mpls.c +++ b/lib/mpls.c @@ -79,7 +79,7 @@ int mpls_str2label(const char *label_str, uint8_t *num_labels, /* * Label to string conversion, labels in string separated by '/'. */ -char *mpls_label2str(uint8_t num_labels, mpls_label_t *labels, char *buf, +char *mpls_label2str(uint8_t num_labels, const mpls_label_t *labels, char *buf, int len, int pretty) { char label_buf[BUFSIZ]; diff --git a/lib/mpls.h b/lib/mpls.h index 635ecc77a1..05cf2935e8 100644 --- a/lib/mpls.h +++ b/lib/mpls.h @@ -209,10 +209,13 @@ static inline char *label2str(mpls_label_t label, char *buf, size_t len) int mpls_str2label(const char *label_str, uint8_t *num_labels, mpls_label_t *labels); +/* Generic string buffer for label-stack-to-str */ +#define MPLS_LABEL_STRLEN 1024 + /* * Label to string conversion, labels in string separated by '/'. */ -char *mpls_label2str(uint8_t num_labels, mpls_label_t *labels, char *buf, +char *mpls_label2str(uint8_t num_labels, const mpls_label_t *labels, char *buf, int len, int pretty); #ifdef __cplusplus diff --git a/lib/nexthop.c b/lib/nexthop.c index e23f8b0792..0d239e091b 100644 --- a/lib/nexthop.c +++ b/lib/nexthop.c @@ -23,11 +23,9 @@ #include "table.h" #include "memory.h" #include "command.h" -#include "if.h" #include "log.h" #include "sockunion.h" #include "linklist.h" -#include "thread.h" #include "prefix.h" #include "nexthop.h" #include "mpls.h" @@ -155,7 +153,24 @@ static int _nexthop_cmp_no_labels(const struct nexthop *next1, } ret = _nexthop_source_cmp(next1, next2); + if (ret != 0) + goto done; + + if (!CHECK_FLAG(next1->flags, NEXTHOP_FLAG_HAS_BACKUP) && + CHECK_FLAG(next2->flags, NEXTHOP_FLAG_HAS_BACKUP)) + return -1; + + if (CHECK_FLAG(next1->flags, NEXTHOP_FLAG_HAS_BACKUP) && + !CHECK_FLAG(next2->flags, NEXTHOP_FLAG_HAS_BACKUP)) + return 1; + + if (next1->backup_idx < next2->backup_idx) + return -1; + if (next1->backup_idx > next2->backup_idx) + return 1; + +done: return ret; } @@ -240,7 +255,7 @@ struct nexthop *nexthop_new(void) * The linux kernel does some weird stuff with adding +1 to * all nexthop weights it gets over netlink. * To handle this, just default everything to 1 right from - * from the beggining so we don't have to special case + * from the beginning so we don't have to special case * default weights in the linux netlink code. * * 1 should be a valid on all platforms anyway. @@ -393,8 +408,8 @@ struct nexthop *nexthop_from_blackhole(enum blackhole_type bh_type) } /* Update nexthop with label information. */ -void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t type, - uint8_t num_labels, mpls_label_t *label) +void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t ltype, + uint8_t num_labels, const mpls_label_t *labels) { struct mpls_label_stack *nh_label; int i; @@ -402,23 +417,26 @@ void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t type, if (num_labels == 0) return; - nexthop->nh_label_type = type; + /* Enforce limit on label stack size */ + if (num_labels > MPLS_MAX_LABELS) + num_labels = MPLS_MAX_LABELS; + + nexthop->nh_label_type = ltype; + nh_label = XCALLOC(MTYPE_NH_LABEL, sizeof(struct mpls_label_stack) + num_labels * sizeof(mpls_label_t)); nh_label->num_labels = num_labels; for (i = 0; i < num_labels; i++) - nh_label->label[i] = *(label + i); + nh_label->label[i] = *(labels + i); nexthop->nh_label = nh_label; } /* Free label information of nexthop, if present. */ void nexthop_del_labels(struct nexthop *nexthop) { - if (nexthop->nh_label) { - XFREE(MTYPE_NH_LABEL, nexthop->nh_label); - nexthop->nh_label_type = ZEBRA_LSP_NONE; - } + XFREE(MTYPE_NH_LABEL, nexthop->nh_label); + nexthop->nh_label_type = ZEBRA_LSP_NONE; } const char *nexthop2str(const struct nexthop *nexthop, char *str, int size) @@ -505,6 +523,7 @@ unsigned int nexthop_level(struct nexthop *nexthop) uint32_t nexthop_hash_quick(const struct nexthop *nexthop) { uint32_t key = 0x45afe398; + uint32_t val; key = jhash_3words(nexthop->type, nexthop->vrf_id, nexthop->nh_label_type, key); @@ -534,8 +553,12 @@ uint32_t nexthop_hash_quick(const struct nexthop *nexthop) key = jhash_1word(nexthop->nh_label->label[i], key); } - key = jhash_2words(nexthop->ifindex, - CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK), + val = 0; + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) + val = (uint32_t)nexthop->backup_idx; + + key = jhash_3words(nexthop->ifindex, + CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK), val, key); return key; @@ -575,6 +598,7 @@ void nexthop_copy_no_recurse(struct nexthop *copy, copy->type = nexthop->type; copy->flags = nexthop->flags; copy->weight = nexthop->weight; + copy->backup_idx = nexthop->backup_idx; memcpy(©->gate, &nexthop->gate, sizeof(nexthop->gate)); memcpy(©->src, &nexthop->src, sizeof(nexthop->src)); memcpy(©->rmap_src, &nexthop->rmap_src, sizeof(nexthop->rmap_src)); diff --git a/lib/nexthop.h b/lib/nexthop.h index 6710914e40..c4e88dd844 100644 --- a/lib/nexthop.h +++ b/lib/nexthop.h @@ -86,6 +86,8 @@ struct nexthop { * active one */ #define NEXTHOP_FLAG_RNH_FILTERED (1 << 5) /* rmap filtered, used by rnh */ +#define NEXTHOP_FLAG_HAS_BACKUP (1 << 6) /* Backup nexthop index is set */ + #define NEXTHOP_IS_ACTIVE(flags) \ (CHECK_FLAG(flags, NEXTHOP_FLAG_ACTIVE) \ && !CHECK_FLAG(flags, NEXTHOP_FLAG_DUPLICATE)) @@ -116,15 +118,31 @@ struct nexthop { /* Weight of the nexthop ( for unequal cost ECMP ) */ uint8_t weight; + + /* Index of a corresponding backup nexthop in a backup list; + * only meaningful if the HAS_BACKUP flag is set. + */ + uint8_t backup_idx; }; +/* Backup index value is limited */ +#define NEXTHOP_BACKUP_IDX_MAX 255 + +/* Utility to append one nexthop to another. */ +#define NEXTHOP_APPEND(to, new) \ + do { \ + (to)->next = (new); \ + (new)->prev = (to); \ + (new)->next = NULL; \ + } while (0) + struct nexthop *nexthop_new(void); void nexthop_free(struct nexthop *nexthop); void nexthops_free(struct nexthop *nexthop); -void nexthop_add_labels(struct nexthop *, enum lsp_types_t, uint8_t, - mpls_label_t *); +void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t ltype, + uint8_t num_labels, const mpls_label_t *labels); void nexthop_del_labels(struct nexthop *); /* @@ -201,6 +219,10 @@ extern struct nexthop *nexthop_dup(const struct nexthop *nexthop, extern struct nexthop *nexthop_dup_no_recurse(const struct nexthop *nexthop, struct nexthop *rparent); +#ifdef _FRR_ATTRIBUTE_PRINTFRR +#pragma FRR printfrr_ext "%pNH" (struct nexthop *) +#endif + #ifdef __cplusplus } #endif diff --git a/lib/nexthop_group.c b/lib/nexthop_group.c index d660428bcd..a4c823e37a 100644 --- a/lib/nexthop_group.c +++ b/lib/nexthop_group.c @@ -43,8 +43,12 @@ struct nexthop_hold { char *intf; char *labels; uint32_t weight; + int backup_idx; /* Index of backup nexthop, if >= 0 */ }; +/* Invalid/unset value for nexthop_hold's backup_idx */ +#define NHH_BACKUP_IDX_INVALID -1 + struct nexthop_group_hooks { void (*new)(const char *name); void (*add_nexthop)(const struct nexthop_group_cmd *nhg, @@ -143,6 +147,59 @@ struct nexthop *nexthop_exists(const struct nexthop_group *nhg, return NULL; } +/* + * Helper that locates a nexthop in an nhg config list. Note that + * this uses a specific matching / equality rule that's different from + * the complete match performed by 'nexthop_same()'. + */ +static struct nexthop *nhg_nh_find(const struct nexthop_group *nhg, + const struct nexthop *nh) +{ + struct nexthop *nexthop; + int ret; + + /* We compare: vrf, gateway, and interface */ + + for (nexthop = nhg->nexthop; nexthop; nexthop = nexthop->next) { + + /* Compare vrf and type */ + if (nexthop->vrf_id != nh->vrf_id) + continue; + if (nexthop->type != nh->type) + continue; + + /* Compare gateway */ + switch (nexthop->type) { + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV6: + ret = nexthop_g_addr_cmp(nexthop->type, + &nexthop->gate, &nh->gate); + if (ret != 0) + continue; + break; + case NEXTHOP_TYPE_IPV4_IFINDEX: + case NEXTHOP_TYPE_IPV6_IFINDEX: + ret = nexthop_g_addr_cmp(nexthop->type, + &nexthop->gate, &nh->gate); + if (ret != 0) + continue; + /* Intentional Fall-Through */ + case NEXTHOP_TYPE_IFINDEX: + if (nexthop->ifindex != nh->ifindex) + continue; + break; + case NEXTHOP_TYPE_BLACKHOLE: + if (nexthop->bh_type != nh->bh_type) + continue; + break; + } + + return nexthop; + } + + return NULL; +} + static bool nexthop_group_equal_common(const struct nexthop_group *nhg1, const struct nexthop_group *nhg2, @@ -225,6 +282,10 @@ void nexthop_group_copy(struct nexthop_group *to, void nexthop_group_delete(struct nexthop_group **nhg) { + /* OK to call with NULL group */ + if ((*nhg) == NULL) + return; + if ((*nhg)->nexthop) nexthops_free((*nhg)->nexthop); @@ -322,6 +383,25 @@ void _nexthop_del(struct nexthop_group *nhg, struct nexthop *nh) nh->next = NULL; } +/* Unlink a nexthop from the list it's on, unconditionally */ +static void nexthop_unlink(struct nexthop_group *nhg, struct nexthop *nexthop) +{ + + if (nexthop->prev) + nexthop->prev->next = nexthop->next; + else { + assert(nhg->nexthop == nexthop); + assert(nexthop->prev == NULL); + nhg->nexthop = nexthop->next; + } + + if (nexthop->next) + nexthop->next->prev = nexthop->prev; + + nexthop->prev = NULL; + nexthop->next = NULL; +} + /* * Copy a list of nexthops in 'nh' to an nhg, enforcing canonical sort order */ @@ -567,11 +647,36 @@ DEFUN_NOSH(no_nexthop_group, no_nexthop_group_cmd, "no nexthop-group NHGNAME", return CMD_SUCCESS; } +DEFPY(nexthop_group_backup, nexthop_group_backup_cmd, + "backup-group WORD$name", + "Specify a group name containing backup nexthops\n" + "The name of the backup group\n") +{ + VTY_DECLVAR_CONTEXT(nexthop_group_cmd, nhgc); + + strlcpy(nhgc->backup_list_name, name, sizeof(nhgc->backup_list_name)); + + return CMD_SUCCESS; +} + +DEFPY(no_nexthop_group_backup, no_nexthop_group_backup_cmd, + "no backup-group [WORD$name]", + NO_STR + "Clear group name containing backup nexthops\n" + "The name of the backup group\n") +{ + VTY_DECLVAR_CONTEXT(nexthop_group_cmd, nhgc); + + nhgc->backup_list_name[0] = 0; + + return CMD_SUCCESS; +} + static void nexthop_group_save_nhop(struct nexthop_group_cmd *nhgc, const char *nhvrf_name, const union sockunion *addr, const char *intf, const char *labels, - const uint32_t weight) + const uint32_t weight, int backup_idx) { struct nexthop_hold *nh; @@ -588,14 +693,22 @@ static void nexthop_group_save_nhop(struct nexthop_group_cmd *nhgc, nh->weight = weight; + nh->backup_idx = backup_idx; + listnode_add_sort(nhgc->nhg_list, nh); } +/* + * Remove config info about a nexthop from group 'nhgc'. Note that we + * use only a subset of the available attributes here to determine + * a 'match'. + * Note that this doesn't change the list of nexthops, only the config + * information. + */ static void nexthop_group_unsave_nhop(struct nexthop_group_cmd *nhgc, const char *nhvrf_name, const union sockunion *addr, - const char *intf, const char *labels, - const uint32_t weight) + const char *intf) { struct nexthop_hold *nh; struct listnode *node; @@ -603,9 +716,7 @@ static void nexthop_group_unsave_nhop(struct nexthop_group_cmd *nhgc, for (ALL_LIST_ELEMENTS_RO(nhgc->nhg_list, node, nh)) { if (nhgc_cmp_helper(nhvrf_name, nh->nhvrf_name) == 0 && nhgc_addr_cmp_helper(addr, nh->addr) == 0 - && nhgc_cmp_helper(intf, nh->intf) == 0 - && nhgc_cmp_helper(labels, nh->labels) == 0 - && weight == nh->weight) + && nhgc_cmp_helper(intf, nh->intf) == 0) break; } @@ -629,7 +740,7 @@ static bool nexthop_group_parse_nexthop(struct nexthop *nhop, const union sockunion *addr, const char *intf, const char *name, const char *labels, int *lbl_ret, - uint32_t weight) + uint32_t weight, int backup_idx) { int ret = 0; struct vrf *vrf; @@ -688,6 +799,15 @@ static bool nexthop_group_parse_nexthop(struct nexthop *nhop, nhop->weight = weight; + if (backup_idx != NHH_BACKUP_IDX_INVALID) { + /* Validate index value */ + if (backup_idx > NEXTHOP_BACKUP_IDX_MAX) + return false; + + SET_FLAG(nhop->flags, NEXTHOP_FLAG_HAS_BACKUP); + nhop->backup_idx = backup_idx; + } + return true; } @@ -699,7 +819,7 @@ static bool nexthop_group_parse_nhh(struct nexthop *nhop, { return (nexthop_group_parse_nexthop(nhop, nhh->addr, nhh->intf, nhh->nhvrf_name, nhh->labels, NULL, - nhh->weight)); + nhh->weight, nhh->backup_idx)); } DEFPY(ecmp_nexthops, ecmp_nexthops_cmd, @@ -712,6 +832,7 @@ DEFPY(ecmp_nexthops, ecmp_nexthops_cmd, nexthop-vrf NAME$vrf_name \ |label WORD \ |weight (1-255) \ + |backup-idx$bi_str (0-254)$idx \ }]", NO_STR "Specify one of the nexthops in this ECMP group\n" @@ -724,16 +845,23 @@ DEFPY(ecmp_nexthops, ecmp_nexthops_cmd, "Specify label(s) for this nexthop\n" "One or more labels in the range (16-1048575) separated by '/'\n" "Weight to be used by the nexthop for purposes of ECMP\n" - "Weight value to be used\n") + "Weight value to be used\n" + "Backup nexthop index in another group\n" + "Nexthop index value\n") { VTY_DECLVAR_CONTEXT(nexthop_group_cmd, nhgc); struct nexthop nhop; struct nexthop *nh; int lbl_ret = 0; bool legal; + int backup_idx = idx; + bool yes = !no; + + if (bi_str == NULL) + backup_idx = NHH_BACKUP_IDX_INVALID; legal = nexthop_group_parse_nexthop(&nhop, addr, intf, vrf_name, label, - &lbl_ret, weight); + &lbl_ret, weight, backup_idx); if (nhop.type == NEXTHOP_TYPE_IPV6 && IN6_IS_ADDR_LINKLOCAL(&nhop.gate.ipv6)) { @@ -763,21 +891,30 @@ DEFPY(ecmp_nexthops, ecmp_nexthops_cmd, return CMD_WARNING_CONFIG_FAILED; } - nh = nexthop_exists(&nhgc->nhg, &nhop); + /* Look for an existing nexthop in the config. Note that the test + * here tests only some attributes - it's not a complete comparison. + * Note that we've got two kinds of objects to manage: 'nexthop_hold' + * that represent config that may or may not be valid (yet), and + * actual nexthops that have been validated and parsed. + */ + nh = nhg_nh_find(&nhgc->nhg, &nhop); - if (no) { - nexthop_group_unsave_nhop(nhgc, vrf_name, addr, intf, label, - weight); - if (nh) { - _nexthop_del(&nhgc->nhg, nh); + /* Always attempt to remove old config info. */ + nexthop_group_unsave_nhop(nhgc, vrf_name, addr, intf); - if (nhg_hooks.del_nexthop) - nhg_hooks.del_nexthop(nhgc, nh); + /* Remove any existing nexthop, for delete and replace cases. */ + if (nh) { + nexthop_unlink(&nhgc->nhg, nh); - nexthop_free(nh); - } - } else if (!nh) { - /* must be adding new nexthop since !no and !nexthop_exists */ + if (nhg_hooks.del_nexthop) + nhg_hooks.del_nexthop(nhgc, nh); + + nexthop_free(nh); + } + if (yes) { + /* Add/replace case: capture nexthop if valid, and capture + * config info always. + */ if (legal) { nh = nexthop_new(); @@ -785,8 +922,9 @@ DEFPY(ecmp_nexthops, ecmp_nexthops_cmd, _nexthop_add(&nhgc->nhg.nexthop, nh); } + /* Save config always */ nexthop_group_save_nhop(nhgc, vrf_name, addr, intf, label, - weight); + weight, backup_idx); if (legal && nhg_hooks.add_nexthop) nhg_hooks.add_nexthop(nhgc, nh); @@ -849,6 +987,9 @@ void nexthop_group_write_nexthop(struct vty *vty, struct nexthop *nh) if (nh->weight) vty_out(vty, " weight %u", nh->weight); + if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_HAS_BACKUP)) + vty_out(vty, " backup-idx %d", nh->backup_idx); + vty_out(vty, "\n"); } @@ -874,6 +1015,9 @@ static void nexthop_group_write_nexthop_internal(struct vty *vty, if (nh->weight) vty_out(vty, " weight %u", nh->weight); + if (nh->backup_idx != NHH_BACKUP_IDX_INVALID) + vty_out(vty, " backup-idx %d", nh->backup_idx); + vty_out(vty, "\n"); } @@ -887,6 +1031,10 @@ static int nexthop_group_write(struct vty *vty) vty_out(vty, "nexthop-group %s\n", nhgc->name); + if (nhgc->backup_list_name[0]) + vty_out(vty, " backup-group %s\n", + nhgc->backup_list_name); + for (ALL_LIST_ELEMENTS_RO(nhgc->nhg_list, node, nh)) { vty_out(vty, " "); nexthop_group_write_nexthop_internal(vty, nh); @@ -1067,6 +1215,8 @@ void nexthop_group_init(void (*new)(const char *name), install_element(CONFIG_NODE, &no_nexthop_group_cmd); install_default(NH_GROUP_NODE); + install_element(NH_GROUP_NODE, &nexthop_group_backup_cmd); + install_element(NH_GROUP_NODE, &no_nexthop_group_backup_cmd); install_element(NH_GROUP_NODE, &ecmp_nexthops_cmd); memset(&nhg_hooks, 0, sizeof(nhg_hooks)); diff --git a/lib/nexthop_group.h b/lib/nexthop_group.h index f99a53f694..3a5a1299c1 100644 --- a/lib/nexthop_group.h +++ b/lib/nexthop_group.h @@ -57,6 +57,8 @@ void copy_nexthops(struct nexthop **tnh, const struct nexthop *nh, uint32_t nexthop_group_hash_no_recurse(const struct nexthop_group *nhg); uint32_t nexthop_group_hash(const struct nexthop_group *nhg); void nexthop_group_mark_duplicates(struct nexthop_group *nhg); + +/* Add a nexthop to a list, enforcing the canonical sort order. */ void nexthop_group_add_sorted(struct nexthop_group *nhg, struct nexthop *nexthop); @@ -79,11 +81,16 @@ void nexthop_group_add_sorted(struct nexthop_group *nhg, (nhop) = nexthop_next(nhop) +#define NHGC_NAME_SIZE 80 + struct nexthop_group_cmd { RB_ENTRY(nexthop_group_cmd) nhgc_entry; - char name[80]; + char name[NHGC_NAME_SIZE]; + + /* Name of group containing backup nexthops (if set) */ + char backup_list_name[NHGC_NAME_SIZE]; struct nexthop_group nhg; diff --git a/lib/northbound.c b/lib/northbound.c index cebedcff09..85e723d7cf 100644 --- a/lib/northbound.c +++ b/lib/northbound.c @@ -1866,6 +1866,13 @@ static void nb_load_callbacks(const struct frr_yang_module_info *module) struct nb_node *nb_node; uint32_t priority; + if (i > YANG_MODULE_MAX_NODES) { + zlog_err( + "%s: %s.yang has more than %u nodes. Please increase YANG_MODULE_MAX_NODES to fix this problem.", + __func__, module->name, YANG_MODULE_MAX_NODES); + exit(1); + } + nb_node = nb_node_find(module->nodes[i].xpath); if (!nb_node) { flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH, diff --git a/lib/northbound.h b/lib/northbound.h index 76a11e518c..19a2ba0865 100644 --- a/lib/northbound.h +++ b/lib/northbound.h @@ -403,6 +403,13 @@ struct nb_node { /* The YANG list doesn't contain key leafs. */ #define F_NB_NODE_KEYLESS_LIST 0x02 +/* + * HACK: old gcc versions (< 5.x) have a bug that prevents C99 flexible arrays + * from working properly on shared libraries. For those compilers, use a fixed + * size array to work around the problem. + */ +#define YANG_MODULE_MAX_NODES 1024 + struct frr_yang_module_info { /* YANG module name. */ const char *name; @@ -417,7 +424,11 @@ struct frr_yang_module_info { /* Priority - lower priorities are processed first. */ uint32_t priority; +#if defined(__GNUC__) && ((__GNUC__ - 0) < 5) && !defined(__clang__) + } nodes[YANG_MODULE_MAX_NODES + 1]; +#else } nodes[]; +#endif }; /* Northbound error codes. */ diff --git a/lib/northbound_grpc.cpp b/lib/northbound_grpc.cpp index 089899368d..b195f1aeca 100644 --- a/lib/northbound_grpc.cpp +++ b/lib/northbound_grpc.cpp @@ -884,7 +884,14 @@ static int frr_grpc_finish(void) return 0; } -static int frr_grpc_module_late_init(struct thread_master *tm) +/* + * This is done this way because module_init and module_late_init are both + * called during daemon pre-fork initialization. Because the GRPC library + * spawns threads internally, we need to delay initializing it until after + * fork. This is done by scheduling this init function as an event task, since + * the event loop doesn't run until after fork. + */ +static int frr_grpc_module_very_late_init(struct thread *thread) { static unsigned long port = GRPC_DEFAULT_PORT; const char *args = THIS_MODULE->load_args; @@ -910,15 +917,19 @@ static int frr_grpc_module_late_init(struct thread_master *tm) if (frr_grpc_init(&port) < 0) goto error; - hook_register(frr_fini, frr_grpc_finish); - - return 0; - error: flog_err(EC_LIB_GRPC_INIT, "failed to initialize the gRPC module"); return -1; } +static int frr_grpc_module_late_init(struct thread_master *tm) +{ + thread_add_event(tm, frr_grpc_module_very_late_init, NULL, 0, NULL); + hook_register(frr_fini, frr_grpc_finish); + + return 0; +} + static int frr_grpc_module_init(void) { hook_register(frr_late_init, frr_grpc_module_late_init); diff --git a/lib/ntop.c b/lib/ntop.c index 066e10e3e4..ccbf8793d3 100644 --- a/lib/ntop.c +++ b/lib/ntop.c @@ -165,7 +165,7 @@ inet4: return dst; } -#ifndef INET_NTOP_NO_OVERRIDE +#if !defined(INET_NTOP_NO_OVERRIDE) && !defined(__APPLE__) /* we want to override libc inet_ntop, but make sure it shows up in backtraces * as frr_inet_ntop (to avoid confusion while debugging) */ diff --git a/lib/plist.c b/lib/plist.c index 40131aebed..b7a020c6f7 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -778,7 +778,7 @@ static void __attribute__((unused)) prefix_list_print(struct prefix_list *plist) p = &pentry->prefix; - printf(" seq %" PRId64 " %s %s/%d", pentry->seq, + printf(" seq %lld %s %s/%d", (long long)pentry->seq, prefix_list_type_str(pentry), inet_ntop(p->family, p->u.val, buf, BUFSIZ), p->prefixlen); diff --git a/lib/prefix.h b/lib/prefix.h index b01f7d1fdc..f2952c38c3 100644 --- a/lib/prefix.h +++ b/lib/prefix.h @@ -531,7 +531,7 @@ static inline int is_host_route(struct prefix *p) return 0; } -static inline int is_default_host_route(struct prefix *p) +static inline int is_default_host_route(const struct prefix *p) { if (p->family == AF_INET) { return (p->u.prefix4.s_addr == INADDR_ANY && @@ -544,6 +544,22 @@ static inline int is_default_host_route(struct prefix *p) return 0; } +#ifdef _FRR_ATTRIBUTE_PRINTFRR +#pragma FRR printfrr_ext "%pI4" (struct in_addr *) +#pragma FRR printfrr_ext "%pI4" (in_addr_t *) + +#pragma FRR printfrr_ext "%pI6" (struct in6_addr *) + +#pragma FRR printfrr_ext "%pFX" (struct prefix *) +#pragma FRR printfrr_ext "%pFX" (struct prefix_ipv4 *) +#pragma FRR printfrr_ext "%pFX" (struct prefix_ipv6 *) +#pragma FRR printfrr_ext "%pFX" (struct prefix_eth *) +#pragma FRR printfrr_ext "%pFX" (struct prefix_evpn *) +#pragma FRR printfrr_ext "%pFX" (struct prefix_fs *) + +#pragma FRR printfrr_ext "%pSG4" (struct prefix_sg *) +#endif + #ifdef __cplusplus } #endif diff --git a/lib/printfrr.h b/lib/printfrr.h index f9584bcacc..7d9e288655 100644 --- a/lib/printfrr.h +++ b/lib/printfrr.h @@ -30,8 +30,7 @@ struct fbuf { size_t len; }; -#define at(a, b) \ - __attribute__((format(printf, a, b))) +#define at(a, b) PRINTFRR(a, b) #define atn(a, b) \ at(a, b) __attribute__((nonnull(1) _RET_NONNULL)) #define atm(a, b) \ @@ -73,8 +72,19 @@ char *vasnprintfrr(struct memtype *mt, char *out, size_t sz, char *asnprintfrr(struct memtype *mt, char *out, size_t sz, const char *fmt, ...) atn(4, 5); +#define printfrr(fmt, ...) \ + do { \ + char buf[256], *out; \ + out = asnprintfrr(MTYPE_TMP, buf, sizeof(buf), fmt, \ + ##__VA_ARGS__); \ + fputs(out, stdout); \ + if (out != buf) \ + XFREE(MTYPE_TMP, out); \ + } while (0) + #undef at #undef atm +#undef atn /* extension specs must start with a capital letter (this is a restriction * for both performance's and human understanding's sake.) diff --git a/lib/routemap_cli.c b/lib/routemap_cli.c index 5b03b5266f..41e8cacd81 100644 --- a/lib/routemap_cli.c +++ b/lib/routemap_cli.c @@ -148,6 +148,12 @@ void route_map_instance_show(struct vty *vty, struct lyd_node *dnode, SKIP_RULE("ipv6 next-hop type"); SKIP_RULE("metric"); SKIP_RULE("tag"); + /* Zebra specific match conditions. */ + SKIP_RULE("ip address prefix-len"); + SKIP_RULE("ipv6 address prefix-len"); + SKIP_RULE("ip next-hop prefix-len"); + SKIP_RULE("source-protocol"); + SKIP_RULE("source-instance"); vty_out(vty, " match %s %s\n", rmr->cmd->str, rmr->rule_str ? rmr->rule_str : ""); @@ -158,6 +164,8 @@ void route_map_instance_show(struct vty *vty, struct lyd_node *dnode, /* Skip all sets implemented by northbound. */ SKIP_RULE("metric"); SKIP_RULE("tag"); + /* Zebra specific set actions. */ + SKIP_RULE("src"); vty_out(vty, " set %s %s\n", rmr->cmd->str, rmr->rule_str ? rmr->rule_str : ""); @@ -666,8 +674,25 @@ void route_map_condition_show(struct vty *vty, struct lyd_node *dnode, vty_out(vty, " match tag %s\n", yang_dnode_get_string(dnode, "./tag")); break; - case 100: - /* NOTHING: custom field, should be handled by daemon. */ + case 100: /* ipv4-prefix-length */ + vty_out(vty, " match ip address prefix-len %s\n", + yang_dnode_get_string(dnode,"./frr-zebra:ipv4-prefix-length")); + break; + case 101: /* ipv6-prefix-length */ + vty_out(vty, " match ipv6 address prefix-len %s\n", + yang_dnode_get_string(dnode, "./frr-zebra:ipv6-prefix-length")); + break; + case 102: /* ipv4-next-hop-prefix-length */ + vty_out(vty, " match ip next-hop prefix-len %s\n", + yang_dnode_get_string(dnode, "./frr-zebra:ipv4-prefix-length")); + break; + case 103: /* source-protocol */ + vty_out(vty, " match source-protocol %s\n", + yang_dnode_get_string(dnode, "./frr-zebra:source-protocol")); + break; + case 104: /* source-instance */ + vty_out(vty, " match source-instance %s\n", + yang_dnode_get_string(dnode, "./frr-zebra:source-instance")); break; } } @@ -868,8 +893,13 @@ void route_map_action_show(struct vty *vty, struct lyd_node *dnode, vty_out(vty, " set tag %s\n", yang_dnode_get_string(dnode, "./tag")); break; - case 100: - /* NOTHING: custom field, should be handled by daemon. */ + case 100: /* source */ + if (yang_dnode_exists(dnode, "./frr-zebra:source-v4")) + vty_out(vty, " set src %s\n", + yang_dnode_get_string(dnode, "./frr-zebra:source-v4")); + else + vty_out(vty, " set src %s\n", + yang_dnode_get_string(dnode, "./frr-zebra:source-v6")); break; } } diff --git a/lib/routemap_northbound.c b/lib/routemap_northbound.c index 69cebbd2a1..dd4cbd7d99 100644 --- a/lib/routemap_northbound.c +++ b/lib/routemap_northbound.c @@ -1221,32 +1221,7 @@ lib_route_map_entry_set_action_tag_destroy(enum nb_event event, } /* clang-format off */ -#if defined(__GNUC__) && ((__GNUC__ - 0) < 5) && !defined(__clang__) -/* - * gcc versions before 5.x miscalculate the size for structs with variable - * length arrays (they just count it as size 0) - */ -struct frr_yang_module_info_sizen { - /* YANG module name. */ - const char *name; - - /* Northbound callbacks. */ - const struct { - /* Data path of this YANG node. */ - const char *xpath; - - /* Callbacks implemented for this node. */ - struct nb_callbacks cbs; - - /* Priority - lower priorities are processed first. */ - uint32_t priority; - } nodes[28]; -}; - -const struct frr_yang_module_info_sizen frr_route_map_info_sizen asm("frr_route_map_info") = { -#else const struct frr_yang_module_info frr_route_map_info = { -#endif .name = "frr-route-map", .nodes = { { diff --git a/lib/skiplist.c b/lib/skiplist.c index d955c6eb9e..790bd71c38 100644 --- a/lib/skiplist.c +++ b/lib/skiplist.c @@ -112,7 +112,7 @@ static int randomLevel(void) return level; } -static int default_cmp(void *key1, void *key2) +static int default_cmp(const void *key1, const void *key2) { if (key1 < key2) return -1; @@ -126,7 +126,8 @@ unsigned int skiplist_count(struct skiplist *l) return l->count; } -struct skiplist *skiplist_new(int flags, int (*cmp)(void *key1, void *key2), +struct skiplist *skiplist_new(int flags, + int (*cmp)(const void *key1, const void *key2), void (*del)(void *val)) { struct skiplist *new; @@ -329,8 +330,8 @@ int skiplist_delete(register struct skiplist *l, register void *key, * Also set a cursor for use with skiplist_next_value. */ int skiplist_first_value(register struct skiplist *l, /* in */ - register void *key, /* in */ - void **valuePointer, /* out */ + register const void *key, /* in */ + void **valuePointer, /* out */ void **cursor) /* out */ { register int k; @@ -374,7 +375,7 @@ int skiplist_search(register struct skiplist *l, register void *key, * last element with the given key, -1 is returned. */ int skiplist_next_value(register struct skiplist *l, /* in */ - register void *key, /* in */ + register const void *key, /* in */ void **valuePointer, /* in/out */ void **cursor) /* in/out */ { @@ -623,7 +624,7 @@ void skiplist_test(struct vty *vty) zlog_debug("%s: (%d:%d)", __func__, i, k); } // keys[k] = (void *)random(); - keys[k] = (void *)scramble(k); + keys[k] = scramble(k); if (skiplist_insert(l, keys[k], keys[k])) zlog_debug("error in insert #%d,#%d", i, k); } @@ -648,7 +649,7 @@ void skiplist_test(struct vty *vty) zlog_debug("<%d:%d>", i, k); if (skiplist_delete(l, keys[k], keys[k])) zlog_debug("error in delete"); - keys[k] = (void *)scramble(k ^ 0xf0f0f0f0); + keys[k] = scramble(k ^ 0xf0f0f0f0); if (skiplist_insert(l, keys[k], keys[k])) zlog_debug("error in insert #%d,#%d", i, k); } diff --git a/lib/skiplist.h b/lib/skiplist.h index 2ab37331c9..a106a455d6 100644 --- a/lib/skiplist.h +++ b/lib/skiplist.h @@ -68,7 +68,7 @@ struct skiplist { * Returns -1 if val1 < val2, 0 if equal?, 1 if val1 > val2. * Used as definition of sorted for listnode_add_sort */ - int (*cmp)(void *val1, void *val2); + int (*cmp)(const void *val1, const void *val2); /* callback to free user-owned data when listnode is deleted. supplying * this callback is very much encouraged! @@ -81,8 +81,9 @@ struct skiplist { extern struct skiplist * skiplist_new(/* encouraged: set list.del callback on new lists */ int flags, - int (*cmp)(void *key1, void *key2), /* NULL => default cmp */ - void (*del)(void *val)); /* NULL => no auto val free */ + int (*cmp)(const void *key1, + const void *key2), /* NULL => default cmp */ + void (*del)(void *val)); /* NULL => no auto val free */ extern void skiplist_free(struct skiplist *); @@ -96,12 +97,12 @@ extern int skiplist_search(register struct skiplist *l, register void *key, void **valuePointer); extern int skiplist_first_value(register struct skiplist *l, /* in */ - register void *key, /* in */ - void **valuePointer, /* in/out */ + register const void *key, /* in */ + void **valuePointer, /* in/out */ void **cursor); /* out */ extern int skiplist_next_value(register struct skiplist *l, /* in */ - register void *key, /* in */ + register const void *key, /* in */ void **valuePointer, /* in/out */ void **cursor); /* in/out */ diff --git a/lib/smux.h b/lib/smux.h index 3f860db0dc..6896f02354 100644 --- a/lib/smux.h +++ b/lib/smux.h @@ -105,7 +105,7 @@ extern int smux_trap(struct variable *, size_t, const oid *, size_t, extern int oid_compare(const oid *, int, const oid *, int); extern void oid2in_addr(oid[], int, struct in_addr *); extern void *oid_copy(void *, const void *, size_t); -extern void oid_copy_addr(oid[], struct in_addr *, int); +extern void oid_copy_addr(oid[], const struct in_addr *, int); #ifdef __cplusplus } diff --git a/lib/snmp.c b/lib/snmp.c index f11d9dc8cf..736a3c62b8 100644 --- a/lib/snmp.c +++ b/lib/snmp.c @@ -64,10 +64,10 @@ void oid2in_addr(oid oid[], int len, struct in_addr *addr) *pnt++ = oid[i]; } -void oid_copy_addr(oid oid[], struct in_addr *addr, int len) +void oid_copy_addr(oid oid[], const struct in_addr *addr, int len) { int i; - uint8_t *pnt; + const uint8_t *pnt; if (len == 0) return; diff --git a/lib/spf_backoff.c b/lib/spf_backoff.c index 41d4e2bb57..4e74714489 100644 --- a/lib/spf_backoff.c +++ b/lib/spf_backoff.c @@ -7,7 +7,7 @@ * Copyright (C) 2017 Orange Labs http://www.orange.com/ * Copyright (C) 2017 by Christian Franke, Open Source Routing / NetDEF Inc. * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/lib/spf_backoff.h b/lib/spf_backoff.h index 11b2701e3e..2617195d79 100644 --- a/lib/spf_backoff.h +++ b/lib/spf_backoff.h @@ -7,7 +7,7 @@ * Copyright (C) 2017 Orange Labs http://www.orange.com/ * Copyright (C) 2017 by Christian Franke, Open Source Routing / NetDEF Inc. * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/lib/srcdest_table.c b/lib/srcdest_table.c index ee87d73077..66b735919b 100644 --- a/lib/srcdest_table.c +++ b/lib/srcdest_table.c @@ -4,7 +4,7 @@ * Copyright (C) 2017 by David Lamparter & Christian Franke, * Open Source Routing / NetDEF Inc. * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/lib/srcdest_table.h b/lib/srcdest_table.h index 90418944c7..7982260777 100644 --- a/lib/srcdest_table.h +++ b/lib/srcdest_table.h @@ -4,7 +4,7 @@ * Copyright (C) 2017 by David Lamparter & Christian Franke, * Open Source Routing / NetDEF Inc. * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/lib/stream.c b/lib/stream.c index dd4d5bd96d..683a130e44 100644 --- a/lib/stream.c +++ b/lib/stream.c @@ -543,6 +543,27 @@ uint64_t stream_getq(struct stream *s) return q; } +bool stream_getq2(struct stream *s, uint64_t *q) +{ + STREAM_VERIFY_SANE(s); + + if (STREAM_READABLE(s) < sizeof(uint64_t)) { + STREAM_BOUND_WARN2(s, "get uint64"); + return false; + } + + *q = ((uint64_t)s->data[s->getp++]) << 56; + *q |= ((uint64_t)s->data[s->getp++]) << 48; + *q |= ((uint64_t)s->data[s->getp++]) << 40; + *q |= ((uint64_t)s->data[s->getp++]) << 32; + *q |= ((uint64_t)s->data[s->getp++]) << 24; + *q |= ((uint64_t)s->data[s->getp++]) << 16; + *q |= ((uint64_t)s->data[s->getp++]) << 8; + *q |= ((uint64_t)s->data[s->getp++]); + + return true; +} + /* Get next long word from the stream. */ uint32_t stream_get_ipv4(struct stream *s) { @@ -898,7 +919,7 @@ int stream_put_prefix(struct stream *s, const struct prefix *p) } /* Put NLRI with label */ -int stream_put_labeled_prefix(struct stream *s, struct prefix *p, +int stream_put_labeled_prefix(struct stream *s, const struct prefix *p, mpls_label_t *label, int addpath_encode, uint32_t addpath_tx_id) { diff --git a/lib/stream.h b/lib/stream.h index 36c65afa3c..5c7d94fab8 100644 --- a/lib/stream.h +++ b/lib/stream.h @@ -196,7 +196,7 @@ extern int stream_put_prefix_addpath(struct stream *s, int addpath_encode, uint32_t addpath_tx_id); extern int stream_put_prefix(struct stream *s, const struct prefix *p); -extern int stream_put_labeled_prefix(struct stream *, struct prefix *, +extern int stream_put_labeled_prefix(struct stream *, const struct prefix *, mpls_label_t *, int addpath_encode, uint32_t addpath_tx_id); extern void stream_get(void *, struct stream *, size_t); @@ -215,6 +215,7 @@ extern bool stream_getl2(struct stream *s, uint32_t *l); extern uint32_t stream_getl_from(struct stream *, size_t); extern uint64_t stream_getq(struct stream *); extern uint64_t stream_getq_from(struct stream *, size_t); +bool stream_getq2(struct stream *s, uint64_t *q); extern uint32_t stream_get_ipv4(struct stream *); /* IEEE-754 floats */ @@ -354,9 +355,10 @@ extern void stream_fifo_free(struct stream_fifo *fifo); * bit), for 64-bit values (you need to cast them anyway), and neither for * encoding (because it's downcasted.) */ -static inline uint8_t *ptr_get_be32(uint8_t *ptr, uint32_t *out) +static inline const uint8_t *ptr_get_be32(const uint8_t *ptr, uint32_t *out) { uint32_t tmp; + memcpy(&tmp, ptr, sizeof(tmp)); *out = ntohl(tmp); return ptr + 4; @@ -401,6 +403,25 @@ static inline uint8_t *ptr_get_be32(uint8_t *ptr, uint32_t *out) (P) = _pval; \ } while (0) +#define STREAM_GETF(S, P) \ + do { \ + union { \ + float r; \ + uint32_t d; \ + } _pval; \ + if (stream_getl2((S), &_pval.d)) \ + goto stream_failure; \ + (P) = _pval.r; \ + } while (0) + +#define STREAM_GETQ(S, P) \ + do { \ + uint64_t _pval; \ + if (!stream_getq2((S), &_pval)) \ + goto stream_failure; \ + (P) = _pval; \ + } while (0) + #define STREAM_GET(P, STR, SIZE) \ do { \ if (!stream_get2((P), (STR), (SIZE))) \ diff --git a/lib/table.c b/lib/table.c index 1a89a95f4f..86347cbacd 100644 --- a/lib/table.c +++ b/lib/table.c @@ -160,7 +160,7 @@ static void route_common(const struct prefix *n, const struct prefix *p, np = (const uint8_t *)&n->u.prefix; pp = (const uint8_t *)&p->u.prefix; - newp = (uint8_t *)&new->u.prefix; + newp = &new->u.prefix; for (i = 0; i < p->prefixlen / 8; i++) { if (np[i] == pp[i]) diff --git a/lib/table.h b/lib/table.h index 7743d51681..9cd9503376 100644 --- a/lib/table.h +++ b/lib/table.h @@ -331,6 +331,10 @@ static inline int route_table_iter_started(route_table_iter_t *iter) return iter->state != RT_ITER_STATE_INIT; } +#ifdef _FRR_ATTRIBUTE_PRINTFRR +#pragma FRR printfrr_ext "%pRN" (struct route_node *) +#endif + #ifdef __cplusplus } #endif diff --git a/lib/thread.c b/lib/thread.c index 2217a60f0a..dbf668a699 100644 --- a/lib/thread.c +++ b/lib/thread.c @@ -114,11 +114,10 @@ static void vty_out_cpu_thread_history(struct vty *vty, struct cpu_thread_history *a) { vty_out(vty, "%5zu %10zu.%03zu %9zu %8zu %9zu %8zu %9zu", - (size_t)a->total_active, - a->cpu.total / 1000, a->cpu.total % 1000, - (size_t)a->total_calls, - a->cpu.total / a->total_calls, a->cpu.max, - a->real.total / a->total_calls, a->real.max); + (size_t)a->total_active, a->cpu.total / 1000, + a->cpu.total % 1000, (size_t)a->total_calls, + (size_t)(a->cpu.total / a->total_calls), a->cpu.max, + (size_t)(a->real.total / a->total_calls), a->real.max); vty_out(vty, " %c%c%c%c%c %s\n", a->types & (1 << THREAD_READ) ? 'R' : ' ', a->types & (1 << THREAD_WRITE) ? 'W' : ' ', @@ -116,7 +116,7 @@ static void vrf_update_vrf_id(ns_id_t ns_id, void *opaqueptr) vrf->vrf_id = vrf_id; RB_INSERT(vrf_id_head, &vrfs_by_id, vrf); if (old_vrf_id == VRF_UNKNOWN) - vrf_enable((struct vrf *)vrf); + vrf_enable(vrf); } int vrf_switch_to_netns(vrf_id_t vrf_id) @@ -324,10 +324,7 @@ const char *vrf_id_to_name(vrf_id_t vrf_id) struct vrf *vrf; vrf = vrf_lookup_by_id(vrf_id); - if (vrf) - return vrf->name; - - return "n/a"; + return VRF_LOGNAME(vrf); } vrf_id_t vrf_name_to_id(const char *name) @@ -593,10 +590,22 @@ int vrf_get_backend(void) return vrf_backend; } -void vrf_configure_backend(int vrf_backend_netns) +int vrf_configure_backend(enum vrf_backend_type backend) { - vrf_backend = vrf_backend_netns; + /* Work around issue in old gcc */ + switch (backend) { + case VRF_BACKEND_UNKNOWN: + case VRF_BACKEND_NETNS: + case VRF_BACKEND_VRF_LITE: + break; + default: + return -1; + } + + vrf_backend = backend; vrf_backend_configured = 1; + + return 0; } int vrf_handler_create(struct vty *vty, const char *vrfname, @@ -101,9 +101,12 @@ RB_PROTOTYPE(vrf_name_head, vrf, name_entry, vrf_name_compare) DECLARE_QOBJ_TYPE(vrf) /* Allow VRF with netns as backend */ -#define VRF_BACKEND_VRF_LITE 0 -#define VRF_BACKEND_NETNS 1 -#define VRF_BACKEND_UNKNOWN 2 +enum vrf_backend_type { + VRF_BACKEND_VRF_LITE, + VRF_BACKEND_NETNS, + VRF_BACKEND_UNKNOWN, + VRF_BACKEND_MAX, +}; extern struct vrf_id_head vrfs_by_id; extern struct vrf_name_head vrfs_by_name; @@ -292,10 +295,10 @@ extern void vrf_install_commands(void); * VRF utilities */ -/* API for configuring VRF backend - * should be called from zebra only +/* + * API for configuring VRF backend */ -extern void vrf_configure_backend(int vrf_backend_netns); +extern int vrf_configure_backend(enum vrf_backend_type backend); extern int vrf_get_backend(void); extern int vrf_is_backend_netns(void); @@ -231,8 +231,13 @@ int vty_out(struct vty *vty, const char *format, ...) strlen(filtered)); break; case VTY_SHELL: - fprintf(vty->of, "%s", filtered); - fflush(vty->of); + if (vty->of) { + fprintf(vty->of, "%s", filtered); + fflush(vty->of); + } else if (vty->of_saved) { + fprintf(vty->of_saved, "%s", filtered); + fflush(vty->of_saved); + } break; case VTY_SHELL_SERV: case VTY_FILE: diff --git a/lib/yang.c b/lib/yang.c index 93e6db3055..0502d4952d 100644 --- a/lib/yang.c +++ b/lib/yang.c @@ -628,7 +628,7 @@ void yang_debugging_set(bool enable) } } -struct ly_ctx *yang_ctx_new_setup(void) +struct ly_ctx *yang_ctx_new_setup(bool embedded_modules) { struct ly_ctx *ctx; const char *yang_models_path = YANG_MODELS_PATH; @@ -647,18 +647,21 @@ struct ly_ctx *yang_ctx_new_setup(void) ctx = ly_ctx_new(yang_models_path, LY_CTX_DISABLE_SEARCHDIR_CWD); if (!ctx) return NULL; - ly_ctx_set_module_imp_clb(ctx, yang_module_imp_clb, NULL); + + if (embedded_modules) + ly_ctx_set_module_imp_clb(ctx, yang_module_imp_clb, NULL); + return ctx; } -void yang_init(void) +void yang_init(bool embedded_modules) { /* Initialize libyang global parameters that affect all containers. */ ly_set_log_clb(ly_log_cb, 1); ly_log_options(LY_LOLOG | LY_LOSTORE); /* Initialize libyang container for native models. */ - ly_native_ctx = yang_ctx_new_setup(); + ly_native_ctx = yang_ctx_new_setup(embedded_modules); if (!ly_native_ctx) { flog_err(EC_LIB_LIBYANG, "%s: ly_ctx_new() failed", __func__); exit(1); diff --git a/lib/yang.h b/lib/yang.h index 6892e36019..8af440d3ed 100644 --- a/lib/yang.h +++ b/lib/yang.h @@ -482,8 +482,11 @@ extern struct yang_data *yang_data_list_find(const struct list *list, /* * Create and set up a libyang context (for use by the translator) + * + * embedded_modules + * Specify whether libyang should attempt to look for embedded YANG modules. */ -extern struct ly_ctx *yang_ctx_new_setup(void); +extern struct ly_ctx *yang_ctx_new_setup(bool embedded_modules); /* * Enable or disable libyang verbose debugging. @@ -496,8 +499,11 @@ extern void yang_debugging_set(bool enable); /* * Initialize the YANG subsystem. Should be called only once during the * daemon initialization process. + * + * embedded_modules + * Specify whether libyang should attempt to look for embedded YANG modules. */ -extern void yang_init(void); +extern void yang_init(bool embedded_modules); /* * Finish the YANG subsystem gracefully. Should be called only when the daemon diff --git a/lib/yang_translator.c b/lib/yang_translator.c index 341420eeda..7dbb1f3f1a 100644 --- a/lib/yang_translator.c +++ b/lib/yang_translator.c @@ -171,7 +171,7 @@ struct yang_translator *yang_translator_load(const char *path) RB_INSERT(yang_translators, &yang_translators, translator); /* Initialize the translator libyang context. */ - translator->ly_ctx = yang_ctx_new_setup(); + translator->ly_ctx = yang_ctx_new_setup(false); if (!translator->ly_ctx) { flog_warn(EC_LIB_LIBYANG, "%s: ly_ctx_new() failed", __func__); goto error; @@ -511,7 +511,7 @@ static unsigned int yang_module_nodes_count(const struct lys_module *module) void yang_translator_init(void) { - ly_translator_ctx = yang_ctx_new_setup(); + ly_translator_ctx = yang_ctx_new_setup(true); if (!ly_translator_ctx) { flog_err(EC_LIB_LIBYANG, "%s: ly_ctx_new() failed", __func__); exit(1); diff --git a/lib/yang_wrappers.c b/lib/yang_wrappers.c index a308b18b73..2b502d635b 100644 --- a/lib/yang_wrappers.c +++ b/lib/yang_wrappers.c @@ -22,6 +22,7 @@ #include "log.h" #include "lib_errors.h" #include "northbound.h" +#include "printfrr.h" static const char *yang_get_default_value(const char *xpath) { @@ -443,7 +444,7 @@ struct yang_data *yang_data_new_int64(const char *xpath, int64_t value) { char value_str[BUFSIZ]; - snprintf(value_str, sizeof(value_str), "%" PRId64, value); + snprintfrr(value_str, sizeof(value_str), "%" PRId64, value); return yang_data_new(xpath, value_str); } @@ -651,7 +652,7 @@ struct yang_data *yang_data_new_uint64(const char *xpath, uint64_t value) { char value_str[BUFSIZ]; - snprintf(value_str, sizeof(value_str), "%" PRIu64, value); + snprintfrr(value_str, sizeof(value_str), "%" PRIu64, value); return yang_data_new(xpath, value_str); } diff --git a/lib/zclient.c b/lib/zclient.c index eac6c7081d..5402e9c3c5 100644 --- a/lib/zclient.c +++ b/lib/zclient.c @@ -52,7 +52,8 @@ static void zclient_event(enum event, struct zclient *); static void zebra_interface_if_set_value(struct stream *s, struct interface *ifp); -struct zclient_options zclient_options_default = {.receive_notify = false}; +struct zclient_options zclient_options_default = {.receive_notify = false, + .synchronous = false}; struct sockaddr_storage zclient_addr; socklen_t zclient_addr_len; @@ -76,6 +77,7 @@ struct zclient *zclient_new(struct thread_master *master, zclient->master = master; zclient->receive_notify = opt->receive_notify; + zclient->synchronous = opt->synchronous; return zclient; } @@ -374,11 +376,11 @@ static int zebra_message_send(struct zclient *zclient, int command, return zclient_send_message(zclient); } -static int zebra_hello_send(struct zclient *zclient) +int zclient_send_hello(struct zclient *zclient) { struct stream *s; - if (zclient->redist_default) { + if (zclient->redist_default || zclient->synchronous) { s = zclient->obuf; stream_reset(s); @@ -390,6 +392,10 @@ static int zebra_hello_send(struct zclient *zclient) stream_putc(s, 1); else stream_putc(s, 0); + if (zclient->synchronous) + stream_putc(s, 1); + else + stream_putc(s, 0); stream_putw_at(s, 0, stream_get_endp(s)); return zclient_send_message(zclient); @@ -629,7 +635,7 @@ int zclient_start(struct zclient *zclient) /* Create read thread. */ zclient_event(ZCLIENT_READ, zclient); - zebra_hello_send(zclient); + zclient_send_hello(zclient); zebra_message_send(zclient, ZEBRA_INTERFACE_ADD, VRF_DEFAULT); @@ -690,8 +696,9 @@ static int zclient_connect(struct thread *t) return zclient_start(zclient); } -int zclient_send_rnh(struct zclient *zclient, int command, struct prefix *p, - bool exact_match, vrf_id_t vrf_id) +int zclient_send_rnh(struct zclient *zclient, int command, + const struct prefix *p, bool exact_match, + vrf_id_t vrf_id) { struct stream *s; @@ -897,6 +904,7 @@ int zapi_nexthop_encode(struct stream *s, const struct zapi_nexthop *api_nh, } } + /* If present, set 'weight' flag before encoding flags */ if (api_nh->weight) SET_FLAG(nh_flags, ZAPI_NEXTHOP_FLAG_WEIGHT); @@ -941,6 +949,10 @@ int zapi_nexthop_encode(struct stream *s, const struct zapi_nexthop *api_nh, stream_put(s, &(api_nh->rmac), sizeof(struct ethaddr)); + /* Index of backup nexthop */ + if (CHECK_FLAG(nh_flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP)) + stream_putc(s, api_nh->backup_idx); + done: return ret; } @@ -978,7 +990,7 @@ int zapi_route_encode(uint8_t cmd, struct stream *s, struct zapi_route *api) stream_putc(s, api->prefix.family); psize = PSIZE(api->prefix.prefixlen); stream_putc(s, api->prefix.prefixlen); - stream_write(s, (uint8_t *)&api->prefix.u.prefix, psize); + stream_write(s, &api->prefix.u.prefix, psize); if (CHECK_FLAG(api->message, ZAPI_MESSAGE_SRCPFX)) { psize = PSIZE(api->src_prefix.prefixlen); @@ -1000,6 +1012,10 @@ int zapi_route_encode(uint8_t cmd, struct stream *s, struct zapi_route *api) return -1; } + /* We canonicalize the nexthops by sorting them; this allows + * zebra to resolve the list of nexthops to a nexthop-group + * more efficiently. + */ zapi_nexthop_group_sort(api->nexthops, api->nexthop_num); stream_putw(s, api->nexthop_num); @@ -1026,6 +1042,50 @@ int zapi_route_encode(uint8_t cmd, struct stream *s, struct zapi_route *api) } } + /* Backup nexthops */ + if (CHECK_FLAG(api->message, ZAPI_MESSAGE_BACKUP_NEXTHOPS)) { + /* limit the number of nexthops if necessary */ + if (api->backup_nexthop_num > MULTIPATH_NUM) { + char buf[PREFIX2STR_BUFFER]; + + prefix2str(&api->prefix, buf, sizeof(buf)); + flog_err( + EC_LIB_ZAPI_ENCODE, + "%s: prefix %s: can't encode %u backup nexthops (maximum is %u)", + __func__, buf, api->backup_nexthop_num, + MULTIPATH_NUM); + return -1; + } + + /* Note that we do not sort the list of backup nexthops - + * this list is treated as an array and indexed by each + * primary nexthop that is associated with a backup. + */ + + stream_putw(s, api->backup_nexthop_num); + + for (i = 0; i < api->backup_nexthop_num; i++) { + api_nh = &api->backup_nexthops[i]; + + /* MPLS labels for BGP-LU or Segment Routing */ + if (api_nh->label_num > MPLS_MAX_LABELS) { + char buf[PREFIX2STR_BUFFER]; + + prefix2str(&api->prefix, buf, sizeof(buf)); + + flog_err(EC_LIB_ZAPI_ENCODE, + "%s: prefix %s: backup: can't encode %u labels (maximum is %u)", + __func__, buf, + api_nh->label_num, + MPLS_MAX_LABELS); + return -1; + } + + if (zapi_nexthop_encode(s, api_nh, api->flags) != 0) + return -1; + } + } + /* Attributes. */ if (CHECK_FLAG(api->message, ZAPI_MESSAGE_DISTANCE)) stream_putc(s, api->distance); @@ -1101,6 +1161,10 @@ static int zapi_nexthop_decode(struct stream *s, struct zapi_nexthop *api_nh, STREAM_GET(&(api_nh->rmac), s, sizeof(struct ethaddr)); + /* Backup nexthop index */ + if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP)) + STREAM_GETC(s, api_nh->backup_idx); + /* Success */ ret = 0; @@ -1207,6 +1271,24 @@ int zapi_route_decode(struct stream *s, struct zapi_route *api) } } + /* Backup nexthops. */ + if (CHECK_FLAG(api->message, ZAPI_MESSAGE_BACKUP_NEXTHOPS)) { + STREAM_GETW(s, api->backup_nexthop_num); + if (api->backup_nexthop_num > MULTIPATH_NUM) { + flog_err(EC_LIB_ZAPI_ENCODE, + "%s: invalid number of backup nexthops (%u)", + __func__, api->backup_nexthop_num); + return -1; + } + + for (i = 0; i < api->backup_nexthop_num; i++) { + api_nh = &api->backup_nexthops[i]; + + if (zapi_nexthop_decode(s, api_nh, api->flags) != 0) + return -1; + } + } + /* Attributes. */ if (CHECK_FLAG(api->message, ZAPI_MESSAGE_DISTANCE)) STREAM_GETC(s, api->distance); @@ -1381,7 +1463,7 @@ stream_failure: return false; } -struct nexthop *nexthop_from_zapi_nexthop(struct zapi_nexthop *znh) +struct nexthop *nexthop_from_zapi_nexthop(const struct zapi_nexthop *znh) { struct nexthop *n = nexthop_new(); @@ -1398,6 +1480,11 @@ struct nexthop *nexthop_from_zapi_nexthop(struct zapi_nexthop *znh) znh->labels); } + if (CHECK_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP)) { + SET_FLAG(n->flags, NEXTHOP_FLAG_HAS_BACKUP); + n->backup_idx = znh->backup_idx; + } + return n; } @@ -1413,10 +1500,16 @@ int zapi_nexthop_from_nexthop(struct zapi_nexthop *znh, znh->type = nh->type; znh->vrf_id = nh->vrf_id; + znh->weight = nh->weight; znh->ifindex = nh->ifindex; znh->gate = nh->gate; if (nh->nh_label && (nh->nh_label->num_labels > 0)) { + + /* Validate */ + if (nh->nh_label->num_labels > MPLS_MAX_LABELS) + return -1; + for (i = 0; i < nh->nh_label->num_labels; i++) znh->labels[i] = nh->nh_label->label[i]; @@ -1424,10 +1517,31 @@ int zapi_nexthop_from_nexthop(struct zapi_nexthop *znh, SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_LABEL); } + if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_HAS_BACKUP)) { + SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP); + znh->backup_idx = nh->backup_idx; + } + return 0; } /* + * Wrapper that converts backup nexthop + */ +int zapi_backup_nexthop_from_nexthop(struct zapi_nexthop *znh, + const struct nexthop *nh) +{ + int ret; + + /* Ensure that zapi flags are correct: backups don't have backups */ + ret = zapi_nexthop_from_nexthop(znh, nh); + if (ret == 0) + UNSET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP); + + return ret; +} + +/* * Decode the nexthop-tracking update message */ bool zapi_nexthop_update_decode(struct stream *s, struct zapi_route *nhr) @@ -1521,33 +1635,34 @@ int zebra_redistribute_default_send(int command, struct zclient *zclient, } /* Get prefix in ZServ format; family should be filled in on prefix */ -static void zclient_stream_get_prefix(struct stream *s, struct prefix *p) +static int zclient_stream_get_prefix(struct stream *s, struct prefix *p) { size_t plen = prefix_blen(p); uint8_t c; p->prefixlen = 0; if (plen == 0) - return; + return -1; - stream_get(&p->u.prefix, s, plen); + STREAM_GET(&p->u.prefix, s, plen); STREAM_GETC(s, c); p->prefixlen = MIN(plen * 8, c); + return 0; stream_failure: - return; + return -1; } /* Router-id update from zebra daemon. */ -void zebra_router_id_update_read(struct stream *s, struct prefix *rid) +int zebra_router_id_update_read(struct stream *s, struct prefix *rid) { /* Fetch interface address. */ STREAM_GETC(s, rid->family); - zclient_stream_get_prefix(s, rid); + return zclient_stream_get_prefix(s, rid); stream_failure: - return; + return -1; } /* Interface addition from zebra daemon. */ @@ -1596,24 +1711,36 @@ stream_failure: * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ -static void zclient_vrf_add(struct zclient *zclient, vrf_id_t vrf_id) +static int zclient_vrf_add(struct zclient *zclient, vrf_id_t vrf_id) { struct vrf *vrf; - char vrfname_tmp[VRF_NAMSIZ]; + char vrfname_tmp[VRF_NAMSIZ + 1] = {}; struct vrf_data data; - stream_get(&data, zclient->ibuf, sizeof(struct vrf_data)); + STREAM_GET(&data, zclient->ibuf, sizeof(struct vrf_data)); /* Read interface name. */ - stream_get(vrfname_tmp, zclient->ibuf, VRF_NAMSIZ); + STREAM_GET(vrfname_tmp, zclient->ibuf, VRF_NAMSIZ); + + if (strlen(vrfname_tmp) == 0) + goto stream_failure; - /* Lookup/create vrf by vrf_id. */ + /* Lookup/create vrf by name, then vrf_id. */ vrf = vrf_get(vrf_id, vrfname_tmp); + + /* If there's already a VRF with this name, don't create vrf */ + if (!vrf) + return 0; + vrf->data.l.table_id = data.l.table_id; memcpy(vrf->data.l.netns_name, data.l.netns_name, NS_NAMSIZ); /* overwrite default vrf */ if (vrf_id == VRF_DEFAULT) vrf_set_default_name(vrfname_tmp, false); vrf_enable(vrf); + + return 0; +stream_failure: + return -1; } static void zclient_vrf_delete(struct zclient *zclient, vrf_id_t vrf_id) @@ -1634,21 +1761,32 @@ static void zclient_vrf_delete(struct zclient *zclient, vrf_id_t vrf_id) vrf_delete(vrf); } -static void zclient_interface_add(struct zclient *zclient, vrf_id_t vrf_id) +static int zclient_interface_add(struct zclient *zclient, vrf_id_t vrf_id) { struct interface *ifp; - char ifname_tmp[INTERFACE_NAMSIZ]; + char ifname_tmp[INTERFACE_NAMSIZ + 1] = {}; struct stream *s = zclient->ibuf; /* Read interface name. */ - stream_get(ifname_tmp, s, INTERFACE_NAMSIZ); + STREAM_GET(ifname_tmp, s, INTERFACE_NAMSIZ); /* Lookup/create interface by name. */ + if (!vrf_get(vrf_id, NULL)) { + zlog_debug( + "Rx'd interface add from Zebra, but VRF %u does not exist", + vrf_id); + return -1; + } + ifp = if_get_by_name(ifname_tmp, vrf_id); zebra_interface_if_set_value(s, ifp); if_new_via_zapi(ifp); + + return 0; +stream_failure: + return -1; } /* @@ -1660,10 +1798,10 @@ static void zclient_interface_add(struct zclient *zclient, vrf_id_t vrf_id) struct interface *zebra_interface_state_read(struct stream *s, vrf_id_t vrf_id) { struct interface *ifp; - char ifname_tmp[INTERFACE_NAMSIZ]; + char ifname_tmp[INTERFACE_NAMSIZ + 1] = {}; /* Read interface name. */ - stream_get(ifname_tmp, s, INTERFACE_NAMSIZ); + STREAM_GET(ifname_tmp, s, INTERFACE_NAMSIZ); /* Lookup this by interface index. */ ifp = if_lookup_by_name(ifname_tmp, vrf_id); @@ -1677,6 +1815,8 @@ struct interface *zebra_interface_state_read(struct stream *s, vrf_id_t vrf_id) zebra_interface_if_set_value(s, ifp); return ifp; +stream_failure: + return NULL; } static void zclient_interface_delete(struct zclient *zclient, vrf_id_t vrf_id) @@ -1730,21 +1870,23 @@ static void zclient_handle_error(ZAPI_CALLBACK_ARGS) (*zclient->handle_error)(error); } -static void link_params_set_value(struct stream *s, struct if_link_params *iflp) +static int link_params_set_value(struct stream *s, struct if_link_params *iflp) { if (iflp == NULL) - return; + return -1; - iflp->lp_status = stream_getl(s); - iflp->te_metric = stream_getl(s); - iflp->max_bw = stream_getf(s); - iflp->max_rsv_bw = stream_getf(s); - uint32_t bwclassnum = stream_getl(s); + uint32_t bwclassnum; + + STREAM_GETL(s, iflp->lp_status); + STREAM_GETL(s, iflp->te_metric); + STREAM_GETF(s, iflp->max_bw); + STREAM_GETF(s, iflp->max_rsv_bw); + STREAM_GETL(s, bwclassnum); { unsigned int i; for (i = 0; i < bwclassnum && i < MAX_CLASS_TYPE; i++) - iflp->unrsv_bw[i] = stream_getf(s); + STREAM_GETF(s, iflp->unrsv_bw[i]); if (i < bwclassnum) flog_err( EC_LIB_ZAPI_MISSMATCH, @@ -1752,19 +1894,23 @@ static void link_params_set_value(struct stream *s, struct if_link_params *iflp) " - outdated library?", __func__, bwclassnum, MAX_CLASS_TYPE); } - iflp->admin_grp = stream_getl(s); - iflp->rmt_as = stream_getl(s); + STREAM_GETL(s, iflp->admin_grp); + STREAM_GETL(s, iflp->rmt_as); iflp->rmt_ip.s_addr = stream_get_ipv4(s); - iflp->av_delay = stream_getl(s); - iflp->min_delay = stream_getl(s); - iflp->max_delay = stream_getl(s); - iflp->delay_var = stream_getl(s); + STREAM_GETL(s, iflp->av_delay); + STREAM_GETL(s, iflp->min_delay); + STREAM_GETL(s, iflp->max_delay); + STREAM_GETL(s, iflp->delay_var); + + STREAM_GETF(s, iflp->pkt_loss); + STREAM_GETF(s, iflp->res_bw); + STREAM_GETF(s, iflp->ava_bw); + STREAM_GETF(s, iflp->use_bw); - iflp->pkt_loss = stream_getf(s); - iflp->res_bw = stream_getf(s); - iflp->ava_bw = stream_getf(s); - iflp->use_bw = stream_getf(s); + return 0; +stream_failure: + return -1; } struct interface *zebra_interface_link_params_read(struct stream *s, @@ -1773,9 +1919,7 @@ struct interface *zebra_interface_link_params_read(struct stream *s, struct if_link_params *iflp; ifindex_t ifindex; - assert(s); - - ifindex = stream_getl(s); + STREAM_GETL(s, ifindex); struct interface *ifp = if_lookup_by_index(ifindex, vrf_id); @@ -1789,36 +1933,41 @@ struct interface *zebra_interface_link_params_read(struct stream *s, if ((iflp = if_link_params_get(ifp)) == NULL) return NULL; - link_params_set_value(s, iflp); + if (link_params_set_value(s, iflp) != 0) + goto stream_failure; return ifp; + +stream_failure: + return NULL; } static void zebra_interface_if_set_value(struct stream *s, struct interface *ifp) { uint8_t link_params_status = 0; - ifindex_t old_ifindex; + ifindex_t old_ifindex, new_ifindex; old_ifindex = ifp->ifindex; /* Read interface's index. */ - if_set_index(ifp, stream_getl(s)); - ifp->status = stream_getc(s); + STREAM_GETL(s, new_ifindex); + if_set_index(ifp, new_ifindex); + STREAM_GETC(s, ifp->status); /* Read interface's value. */ - ifp->flags = stream_getq(s); - ifp->ptm_enable = stream_getc(s); - ifp->ptm_status = stream_getc(s); - ifp->metric = stream_getl(s); - ifp->speed = stream_getl(s); - ifp->mtu = stream_getl(s); - ifp->mtu6 = stream_getl(s); - ifp->bandwidth = stream_getl(s); - ifp->link_ifindex = stream_getl(s); - ifp->ll_type = stream_getl(s); - ifp->hw_addr_len = stream_getl(s); + STREAM_GETQ(s, ifp->flags); + STREAM_GETC(s, ifp->ptm_enable); + STREAM_GETC(s, ifp->ptm_status); + STREAM_GETL(s, ifp->metric); + STREAM_GETL(s, ifp->speed); + STREAM_GETL(s, ifp->mtu); + STREAM_GETL(s, ifp->mtu6); + STREAM_GETL(s, ifp->bandwidth); + STREAM_GETL(s, ifp->link_ifindex); + STREAM_GETL(s, ifp->ll_type); + STREAM_GETL(s, ifp->hw_addr_len); if (ifp->hw_addr_len) - stream_get(ifp->hw_addr, s, + STREAM_GET(ifp->hw_addr, s, MIN(ifp->hw_addr_len, INTERFACE_HWADDR_MAX)); /* Read Traffic Engineering status */ @@ -1830,6 +1979,11 @@ static void zebra_interface_if_set_value(struct stream *s, } nexthop_group_interface_state_change(ifp, old_ifindex); + + return; +stream_failure: + zlog_err("Could not parse interface values; aborting"); + assert(!"Failed to parse interface values"); } size_t zebra_interface_link_params_write(struct stream *s, @@ -1928,7 +2082,7 @@ struct connected *zebra_interface_address_read(int type, struct stream *s, memset(&d, 0, sizeof(d)); /* Get interface index. */ - ifindex = stream_getl(s); + STREAM_GETL(s, ifindex); /* Lookup index. */ ifp = if_lookup_by_index(ifindex, vrf_id); @@ -1941,16 +2095,18 @@ struct connected *zebra_interface_address_read(int type, struct stream *s, } /* Fetch flag. */ - ifc_flags = stream_getc(s); + STREAM_GETC(s, ifc_flags); /* Fetch interface address. */ - d.family = p.family = stream_getc(s); + STREAM_GETC(s, d.family); + p.family = d.family; plen = prefix_blen(&d); - zclient_stream_get_prefix(s, &p); + if (zclient_stream_get_prefix(s, &p) != 0) + goto stream_failure; /* Fetch destination address. */ - stream_get(&d.u.prefix, s, plen); + STREAM_GET(&d.u.prefix, s, plen); /* N.B. NULL destination pointers are encoded as all zeroes */ dp = memconstant(&d.u.prefix, 0, plen) ? NULL : &d; @@ -1986,6 +2142,9 @@ struct connected *zebra_interface_address_read(int type, struct stream *s, } return ifc; + +stream_failure: + return NULL; } /* @@ -2021,7 +2180,7 @@ zebra_interface_nbr_address_read(int type, struct stream *s, vrf_id_t vrf_id) struct nbr_connected *ifc; /* Get interface index. */ - ifindex = stream_getl(s); + STREAM_GETL(s, ifindex); /* Lookup index. */ ifp = if_lookup_by_index(ifindex, vrf_id); @@ -2034,9 +2193,9 @@ zebra_interface_nbr_address_read(int type, struct stream *s, vrf_id_t vrf_id) return NULL; } - p.family = stream_getc(s); - stream_get(&p.u.prefix, s, prefix_blen(&p)); - p.prefixlen = stream_getc(s); + STREAM_GETC(s, p.family); + STREAM_GET(&p.u.prefix, s, prefix_blen(&p)); + STREAM_GETC(s, p.prefixlen); if (type == ZEBRA_INTERFACE_NBR_ADDRESS_ADD) { /* Currently only supporting P2P links, so any new RA source @@ -2060,18 +2219,21 @@ zebra_interface_nbr_address_read(int type, struct stream *s, vrf_id_t vrf_id) } return ifc; + +stream_failure: + return NULL; } struct interface *zebra_interface_vrf_update_read(struct stream *s, vrf_id_t vrf_id, vrf_id_t *new_vrf_id) { - char ifname[INTERFACE_NAMSIZ]; + char ifname[INTERFACE_NAMSIZ + 1] = {}; struct interface *ifp; vrf_id_t new_id; /* Read interface name. */ - stream_get(ifname, s, INTERFACE_NAMSIZ); + STREAM_GET(ifname, s, INTERFACE_NAMSIZ); /* Lookup interface. */ ifp = if_lookup_by_name(ifname, vrf_id); @@ -2083,10 +2245,13 @@ struct interface *zebra_interface_vrf_update_read(struct stream *s, } /* Fetch new VRF Id. */ - new_id = stream_getl(s); + STREAM_GETL(s, new_id); *new_vrf_id = new_id; return ifp; + +stream_failure: + return NULL; } /* filter unwanted messages until the expected one arrives */ @@ -2195,8 +2360,11 @@ int lm_label_manager_connect(struct zclient *zclient, int async) s = zclient->ibuf; /* read instance and proto */ - uint8_t proto = stream_getc(s); - uint16_t instance = stream_getw(s); + uint8_t proto; + uint16_t instance; + + STREAM_GETC(s, proto); + STREAM_GETW(s, instance); /* sanity */ if (proto != zclient->redist_default) @@ -2211,11 +2379,14 @@ int lm_label_manager_connect(struct zclient *zclient, int async) instance, zclient->instance); /* result code */ - result = stream_getc(s); + STREAM_GETC(s, result); if (zclient_debug) zlog_debug("LM connect-response received, result %u", result); return (int)result; + +stream_failure: + return -1; } /* @@ -2323,8 +2494,11 @@ int lm_get_label_chunk(struct zclient *zclient, uint8_t keep, uint32_t base, s = zclient->ibuf; /* read proto and instance */ - uint8_t proto = stream_getc(s); - uint16_t instance = stream_getw(s); + uint8_t proto; + uint8_t instance; + + STREAM_GETC(s, proto); + STREAM_GETW(s, instance); /* sanities */ if (proto != zclient->redist_default) @@ -2346,10 +2520,10 @@ int lm_get_label_chunk(struct zclient *zclient, uint8_t keep, uint32_t base, } /* keep */ - response_keep = stream_getc(s); + STREAM_GETC(s, response_keep); /* start and end labels */ - *start = stream_getl(s); - *end = stream_getl(s); + STREAM_GETL(s, *start); + STREAM_GETL(s, *end); /* not owning this response */ if (keep != response_keep) { @@ -2371,6 +2545,9 @@ int lm_get_label_chunk(struct zclient *zclient, uint8_t keep, uint32_t base, response_keep); return 0; + +stream_failure: + return -1; } /** @@ -2760,7 +2937,7 @@ int zebra_send_pw(struct zclient *zclient, int command, struct zapi_pw *pw) /* * Receive PW status update from Zebra and send it to LDE process. */ -void zebra_read_pw_status_update(ZAPI_CALLBACK_ARGS, struct zapi_pw_status *pw) +int zebra_read_pw_status_update(ZAPI_CALLBACK_ARGS, struct zapi_pw_status *pw) { struct stream *s; @@ -2769,8 +2946,12 @@ void zebra_read_pw_status_update(ZAPI_CALLBACK_ARGS, struct zapi_pw_status *pw) /* Get data. */ stream_get(pw->ifname, s, IF_NAMESIZE); - pw->ifindex = stream_getl(s); - pw->status = stream_getl(s); + STREAM_GETL(s, pw->ifindex); + STREAM_GETL(s, pw->status); + + return 0; +stream_failure: + return -1; } static void zclient_capability_decode(ZAPI_CALLBACK_ARGS) @@ -2781,7 +2962,14 @@ static void zclient_capability_decode(ZAPI_CALLBACK_ARGS) uint8_t mpls_enabled; STREAM_GETL(s, vrf_backend); - vrf_configure_backend(vrf_backend); + + if (vrf_backend < 0 || vrf_configure_backend(vrf_backend)) { + flog_err(EC_LIB_ZAPI_ENCODE, + "%s: Garbage VRF backend type: %d\n", __func__, + vrf_backend); + goto stream_failure; + } + memset(&cap, 0, sizeof(cap)); STREAM_GETC(s, mpls_enabled); diff --git a/lib/zclient.h b/lib/zclient.h index e6f4c747e3..214226cf5f 100644 --- a/lib/zclient.h +++ b/lib/zclient.h @@ -255,6 +255,9 @@ struct zclient { /* Do we care about failure events for route install? */ bool receive_notify; + /* Is this a synchronous client? */ + bool synchronous; + /* Socket to zebra daemon. */ int sock; @@ -338,6 +341,9 @@ struct zclient { #define ZAPI_MESSAGE_TAG 0x08 #define ZAPI_MESSAGE_MTU 0x10 #define ZAPI_MESSAGE_SRCPFX 0x20 +/* Backup nexthops are present */ +#define ZAPI_MESSAGE_BACKUP_NEXTHOPS 0x40 + /* * This should only be used by a DAEMON that needs to communicate * the table being used is not in the VRF. You must pass the @@ -374,14 +380,21 @@ struct zapi_nexthop { struct ethaddr rmac; uint32_t weight; + + /* Index of backup nexthop */ + uint8_t backup_idx; }; /* - * ZAPI nexthop flags values + * ZAPI nexthop flags values - we're encoding a single octet + * initially, so ensure that the on-the-wire encoding continues + * to match the number of valid flags. */ + #define ZAPI_NEXTHOP_FLAG_ONLINK 0x01 #define ZAPI_NEXTHOP_FLAG_LABEL 0x02 #define ZAPI_NEXTHOP_FLAG_WEIGHT 0x04 +#define ZAPI_NEXTHOP_FLAG_HAS_BACKUP 0x08 /* Nexthop has a backup */ /* * Some of these data structures do not map easily to @@ -445,6 +458,10 @@ struct zapi_route { uint16_t nexthop_num; struct zapi_nexthop nexthops[MULTIPATH_NUM]; + /* Support backup routes for IP FRR, TI-LFA, traffic engineering */ + uint16_t backup_nexthop_num; + struct zapi_nexthop backup_nexthops[MULTIPATH_NUM]; + uint8_t distance; uint32_t metric; @@ -569,6 +586,7 @@ enum zebra_neigh_state { ZEBRA_NEIGH_INACTIVE = 0, ZEBRA_NEIGH_ACTIVE = 1 }; struct zclient_options { bool receive_notify; + bool synchronous; }; extern struct zclient_options zclient_options_default; @@ -705,7 +723,7 @@ zebra_interface_nbr_address_read(int, struct stream *, vrf_id_t); extern struct interface *zebra_interface_vrf_update_read(struct stream *s, vrf_id_t vrf_id, vrf_id_t *new_vrf_id); -extern void zebra_router_id_update_read(struct stream *s, struct prefix *rid); +extern int zebra_router_id_update_read(struct stream *s, struct prefix *rid); extern struct interface *zebra_interface_link_params_read(struct stream *s, vrf_id_t vrf_id); @@ -734,11 +752,12 @@ extern int zapi_labels_decode(struct stream *s, struct zapi_labels *zl); extern int zebra_send_pw(struct zclient *zclient, int command, struct zapi_pw *pw); -extern void zebra_read_pw_status_update(ZAPI_CALLBACK_ARGS, struct zapi_pw_status *pw); +extern int zebra_read_pw_status_update(ZAPI_CALLBACK_ARGS, + struct zapi_pw_status *pw); extern int zclient_route_send(uint8_t, struct zclient *, struct zapi_route *); extern int zclient_send_rnh(struct zclient *zclient, int command, - struct prefix *p, bool exact_match, + const struct prefix *p, bool exact_match, vrf_id_t vrf_id); int zapi_nexthop_encode(struct stream *s, const struct zapi_nexthop *api_nh, uint32_t api_flags); @@ -765,9 +784,12 @@ bool zapi_iptable_notify_decode(struct stream *s, uint32_t *unique, enum zapi_iptable_notify_owner *note); -extern struct nexthop *nexthop_from_zapi_nexthop(struct zapi_nexthop *znh); +extern struct nexthop * +nexthop_from_zapi_nexthop(const struct zapi_nexthop *znh); int zapi_nexthop_from_nexthop(struct zapi_nexthop *znh, const struct nexthop *nh); +int zapi_backup_nexthop_from_nexthop(struct zapi_nexthop *znh, + const struct nexthop *nh); extern bool zapi_nexthop_update_decode(struct stream *s, struct zapi_route *nhr); @@ -796,4 +818,9 @@ extern void zclient_send_mlag_deregister(struct zclient *client); extern void zclient_send_mlag_data(struct zclient *client, struct stream *client_s); +/* Send the hello message. + * Returns 0 for success or -1 on an I/O error. + */ +extern int zclient_send_hello(struct zclient *client); + #endif /* _ZEBRA_ZCLIENT_H */ diff --git a/m4/ax_python.m4 b/m4/ax_python.m4 index 69809184ee..d293da5257 100644 --- a/m4/ax_python.m4 +++ b/m4/ax_python.m4 @@ -3,7 +3,7 @@ dnl 2019 David Lamparter for NetDEF, Inc. dnl SPDX-License-Identifier: GPL-2.0-or-later dnl the _ at the beginning will be cut off (to support the empty version string) -m4_define_default([_FRR_PY_VERS], [_3 _ _2 _3.7 _3.6 _3.5 _3.4 _3.3 _3.2 _2.7]) +m4_define_default([_FRR_PY_VERS], [_3 _ _2 _3.8 _3.7 _3.6 _3.5 _3.4 _3.3 _3.2 _2.7]) dnl check basic interpreter properties (py2/py3) dnl doubles as simple check whether the interpreter actually works diff --git a/nhrpd/netlink_arp.c b/nhrpd/netlink_arp.c index 8fd8280c51..cf338a0876 100644 --- a/nhrpd/netlink_arp.c +++ b/nhrpd/netlink_arp.c @@ -65,11 +65,12 @@ static void netlink_neigh_msg(struct nlmsghdr *msg, struct zbuf *zb) struct nhrp_cache *c; struct interface *ifp; struct zbuf payload; - union sockunion addr; + union sockunion addr, lladdr; size_t len; - char buf[SU_ADDRSTRLEN]; + char buf[4][SU_ADDRSTRLEN]; int state; + memset(&lladdr, 0, sizeof(lladdr)); ndm = znl_pull(zb, sizeof(*ndm)); if (!ndm) return; @@ -82,6 +83,10 @@ static void netlink_neigh_msg(struct nlmsghdr *msg, struct zbuf *zb) sockunion_set(&addr, ndm->ndm_family, zbuf_pulln(&payload, len), len); break; + case NDA_LLADDR: + sockunion_set(&lladdr, ndm->ndm_family, + zbuf_pulln(&payload, len), len); + break; } } @@ -93,20 +98,34 @@ static void netlink_neigh_msg(struct nlmsghdr *msg, struct zbuf *zb) if (!c) return; - if (msg->nlmsg_type == RTM_GETNEIGH) { - debugf(NHRP_DEBUG_KERNEL, "Netlink: who-has %s dev %s", - sockunion2str(&addr, buf, sizeof(buf)), ifp->name); + debugf(NHRP_DEBUG_KERNEL, + "Netlink: %s %s dev %s lladdr %s nud 0x%x cache used %u type %u", + (msg->nlmsg_type == RTM_GETNEIGH) + ? "who-has" + : (msg->nlmsg_type == RTM_NEWNEIGH) ? "new-neigh" + : "del-neigh", + sockunion2str(&addr, buf[0], sizeof(buf[0])), ifp->name, + sockunion2str(&lladdr, buf[1], sizeof(buf[1])), ndm->ndm_state, + c->used, c->cur.type); + if (msg->nlmsg_type == RTM_GETNEIGH) { if (c->cur.type >= NHRP_CACHE_CACHED) { nhrp_cache_set_used(c, 1); - netlink_update_binding(ifp, &addr, - &c->cur.peer->vc->remote.nbma); + debugf(NHRP_DEBUG_KERNEL, + "Netlink: update binding for %s dev %s from c %s peer.vc.nbma %s to lladdr %s", + sockunion2str(&addr, buf[0], sizeof(buf[0])), + ifp->name, + sockunion2str(&c->cur.remote_nbma_natoa, buf[1], + sizeof(buf[1])), + sockunion2str(&c->cur.peer->vc->remote.nbma, + buf[2], sizeof(buf[2])), + sockunion2str(&lladdr, buf[3], sizeof(buf[3]))); + /* In case of shortcuts, nbma is given by lladdr, not + * vc->remote.nbma. + */ + netlink_update_binding(ifp, &addr, &lladdr); } } else { - debugf(NHRP_DEBUG_KERNEL, "Netlink: update %s dev %s nud %x", - sockunion2str(&addr, buf, sizeof(buf)), ifp->name, - ndm->ndm_state); - state = (msg->nlmsg_type == RTM_NEWNEIGH) ? ndm->ndm_state : NUD_FAILED; nhrp_cache_set_used(c, state == NUD_REACHABLE); diff --git a/nhrpd/nhrp_cache.c b/nhrpd/nhrp_cache.c index 81f7d99423..42f6a88f95 100644 --- a/nhrpd/nhrp_cache.c +++ b/nhrpd/nhrp_cache.c @@ -119,12 +119,43 @@ static void nhrp_cache_update_route(struct nhrp_cache *c) { struct prefix pfx; struct nhrp_peer *p = c->cur.peer; + char buf[3][SU_ADDRSTRLEN]; + struct nhrp_interface *nifp; sockunion2hostprefix(&c->remote_addr, &pfx); if (p && nhrp_peer_check(p, 1)) { - netlink_update_binding(p->ifp, &c->remote_addr, - &p->vc->remote.nbma); + if (sockunion_family(&c->cur.remote_nbma_natoa) != AF_UNSPEC) { + /* remote_nbma_natoa is already set. Therefore, binding + * should be updated to this value and not vc's remote + * nbma. + */ + debugf(NHRP_DEBUG_COMMON, + "cache (remote_nbma_natoa set): Update binding for %s dev %s from (deleted) peer.vc.nbma %s to %s", + sockunion2str(&c->remote_addr, buf[0], + sizeof(buf[0])), + p->ifp->name, + sockunion2str(&p->vc->remote.nbma, buf[1], + sizeof(buf[1])), + sockunion2str(&c->cur.remote_nbma_natoa, buf[2], + sizeof(buf[2]))); + + netlink_update_binding(p->ifp, &c->remote_addr, + &c->cur.remote_nbma_natoa); + } else { + /* update binding to peer->vc->remote->nbma */ + debugf(NHRP_DEBUG_COMMON, + "cache (remote_nbma_natoa unspec): Update binding for %s dev %s from (deleted) to peer.vc.nbma %s", + sockunion2str(&c->remote_addr, buf[0], + sizeof(buf[0])), + p->ifp->name, + sockunion2str(&p->vc->remote.nbma, buf[1], + sizeof(buf[1]))); + + netlink_update_binding(p->ifp, &c->remote_addr, + &p->vc->remote.nbma); + } + nhrp_route_announce(1, c->cur.type, &pfx, c->ifp, NULL, c->cur.mtu); if (c->cur.type >= NHRP_CACHE_DYNAMIC) { @@ -139,6 +170,17 @@ static void nhrp_cache_update_route(struct nhrp_cache *c) c->route_installed = 1; } } else { + /* debug the reason for peer check fail */ + if (p) { + nifp = p->ifp->info; + debugf(NHRP_DEBUG_COMMON, + "cache (peer check failed: online?%d requested?%d ipsec?%d)", + p->online, p->requested, + nifp->ipsec_profile ? 1 : 0); + } else + debugf(NHRP_DEBUG_COMMON, + "cache (peer check failed: no p)"); + if (c->nhrp_route_installed) { nhrp_route_update_nhrp(&pfx, NULL); c->nhrp_route_installed = 0; @@ -207,10 +249,10 @@ static void nhrp_cache_update_timers(struct nhrp_cache *c) static void nhrp_cache_authorize_binding(struct nhrp_reqid *r, void *arg) { struct nhrp_cache *c = container_of(r, struct nhrp_cache, eventid); - char buf[SU_ADDRSTRLEN]; + char buf[3][SU_ADDRSTRLEN]; debugf(NHRP_DEBUG_COMMON, "cache: %s %s: %s", c->ifp->name, - sockunion2str(&c->remote_addr, buf, sizeof(buf)), + sockunion2str(&c->remote_addr, buf[0], sizeof(buf[0])), (const char *)arg); nhrp_reqid_free(&nhrp_event_reqid, r); @@ -230,6 +272,26 @@ static void nhrp_cache_authorize_binding(struct nhrp_reqid *r, void *arg) if (c->cur.peer) nhrp_peer_notify_add(c->cur.peer, &c->peer_notifier, nhrp_cache_peer_notifier); + + if (sockunion_family(&c->cur.remote_nbma_natoa) != AF_UNSPEC) { + debugf(NHRP_DEBUG_COMMON, + "cache: update binding for %s dev %s from (deleted) peer.vc.nbma %s to %s", + sockunion2str(&c->remote_addr, buf[0], + sizeof(buf[0])), + c->ifp->name, + (c->cur.peer ? sockunion2str( + &c->cur.peer->vc->remote.nbma, buf[1], + sizeof(buf[1])) + : "(no peer)"), + sockunion2str(&c->cur.remote_nbma_natoa, buf[2], + sizeof(buf[2]))); + + if (c->cur.peer) + netlink_update_binding( + c->cur.peer->ifp, &c->remote_addr, + &c->cur.remote_nbma_natoa); + } + nhrp_cache_update_route(c); notifier_call(&c->notifier_list, NOTIFY_CACHE_BINDING_CHANGE); } else { @@ -273,6 +335,8 @@ int nhrp_cache_update_binding(struct nhrp_cache *c, enum nhrp_cache_type type, int holding_time, struct nhrp_peer *p, uint32_t mtu, union sockunion *nbma_oa) { + char buf[2][SU_ADDRSTRLEN]; + if (c->cur.type > type || c->new.type > type) { nhrp_peer_unref(p); return 0; @@ -293,17 +357,31 @@ int nhrp_cache_update_binding(struct nhrp_cache *c, enum nhrp_cache_type type, break; } + sockunion2str(&c->cur.remote_nbma_natoa, buf[0], sizeof(buf[0])); + if (nbma_oa) + sockunion2str(nbma_oa, buf[1], sizeof(buf[1])); + nhrp_cache_reset_new(c); if (c->cur.type == type && c->cur.peer == p && c->cur.mtu == mtu) { + debugf(NHRP_DEBUG_COMMON, + "cache: same type %u, updating expiry and changing nbma addr from %s to %s", + type, buf[0], nbma_oa ? buf[1] : "(NULL)"); if (holding_time > 0) c->cur.expires = monotime(NULL) + holding_time; + if (nbma_oa) c->cur.remote_nbma_natoa = *nbma_oa; else memset(&c->cur.remote_nbma_natoa, 0, sizeof(c->cur.remote_nbma_natoa)); + nhrp_peer_unref(p); } else { + debugf(NHRP_DEBUG_COMMON, + "cache: new type %u/%u, or peer %s, or mtu %u/%u, nbma %s --> %s (map %d)", + c->cur.type, type, (c->cur.peer == p) ? "same" : "diff", + c->cur.mtu, mtu, buf[0], nbma_oa ? buf[1] : "(NULL)", + c->map); c->new.type = type; c->new.peer = p; c->new.mtu = mtu; diff --git a/nhrpd/nhrp_peer.c b/nhrpd/nhrp_peer.c index 5ca477fb58..2dc019ce65 100644 --- a/nhrpd/nhrp_peer.c +++ b/nhrpd/nhrp_peer.c @@ -314,23 +314,29 @@ void nhrp_peer_send(struct nhrp_peer *p, struct zbuf *zb) zbuf_reset(zb); } -static void nhrp_handle_resolution_req(struct nhrp_packet_parser *p) +static void nhrp_handle_resolution_req(struct nhrp_packet_parser *pp) { + struct interface *ifp = pp->ifp; struct zbuf *zb, payload; struct nhrp_packet_header *hdr; struct nhrp_cie_header *cie; struct nhrp_extension_header *ext; - struct nhrp_interface *nifp; + struct nhrp_cache *c; + union sockunion cie_nbma, cie_proto, *proto_addr, *nbma_addr; + int holdtime, prefix_len, hostprefix_len; + struct nhrp_interface *nifp = ifp->info; struct nhrp_peer *peer; + size_t paylen; + char buf[SU_ADDRSTRLEN]; - if (!(p->if_ad->flags & NHRP_IFF_SHORTCUT)) { + if (!(pp->if_ad->flags & NHRP_IFF_SHORTCUT)) { debugf(NHRP_DEBUG_COMMON, "Shortcuts disabled"); /* FIXME: Send error indication? */ return; } - if (p->if_ad->network_id && p->route_type == NHRP_ROUTE_OFF_NBMA - && p->route_prefix.prefixlen < 8) { + if (pp->if_ad->network_id && pp->route_type == NHRP_ROUTE_OFF_NBMA + && pp->route_prefix.prefixlen < 8) { debugf(NHRP_DEBUG_COMMON, "Shortcut to more generic than /8 dropped"); return; @@ -338,45 +344,101 @@ static void nhrp_handle_resolution_req(struct nhrp_packet_parser *p) debugf(NHRP_DEBUG_COMMON, "Parsing and replying to Resolution Req"); - if (nhrp_route_address(p->ifp, &p->src_proto, NULL, &peer) + if (nhrp_route_address(ifp, &pp->src_proto, NULL, &peer) != NHRP_ROUTE_NBMA_NEXTHOP) return; -#if 0 - /* FIXME: Update requestors binding if CIE specifies holding time */ - nhrp_cache_update_binding( - NHRP_CACHE_CACHED, &p->src_proto, - nhrp_peer_get(p->ifp, &p->src_nbma), - htons(cie->holding_time)); -#endif + /* Copy payload CIE */ + hostprefix_len = 8 * sockunion_get_addrlen(&pp->if_ad->addr); + paylen = zbuf_used(&pp->payload); + debugf(NHRP_DEBUG_COMMON, "shortcut res_rep: paylen %zu", paylen); + + while ((cie = nhrp_cie_pull(&pp->payload, pp->hdr, &cie_nbma, + &cie_proto)) + != NULL) { + prefix_len = cie->prefix_length; + debugf(NHRP_DEBUG_COMMON, + "shortcut res_rep: parsing CIE with prefixlen=%u", + prefix_len); + if (prefix_len == 0 || prefix_len >= hostprefix_len) + prefix_len = hostprefix_len; + + if (prefix_len != hostprefix_len + && !(pp->hdr->flags + & htons(NHRP_FLAG_REGISTRATION_UNIQUE))) { + cie->code = NHRP_CODE_BINDING_NON_UNIQUE; + continue; + } + + /* We currently support only unique prefix registrations */ + if (prefix_len != hostprefix_len) { + cie->code = NHRP_CODE_ADMINISTRATIVELY_PROHIBITED; + continue; + } + + proto_addr = (sockunion_family(&cie_proto) == AF_UNSPEC) + ? &pp->src_proto + : &cie_proto; + nbma_addr = (sockunion_family(&cie_nbma) == AF_UNSPEC) + ? &pp->src_nbma + : &cie_nbma; + + holdtime = htons(cie->holding_time); + debugf(NHRP_DEBUG_COMMON, + "shortcut res_rep: holdtime is %u (if 0, using %u)", + holdtime, pp->if_ad->holdtime); + if (!holdtime) + holdtime = pp->if_ad->holdtime; + + c = nhrp_cache_get(ifp, proto_addr, 1); + if (!c) { + debugf(NHRP_DEBUG_COMMON, + "shortcut res_rep: no cache found"); + cie->code = NHRP_CODE_INSUFFICIENT_RESOURCES; + continue; + } + if (nbma_addr) + sockunion2str(nbma_addr, buf, sizeof(buf)); + + debugf(NHRP_DEBUG_COMMON, + "shortcut res_rep: updating binding for nmba addr %s", + nbma_addr ? buf : "(NULL)"); + if (!nhrp_cache_update_binding(c, NHRP_CACHE_DYNAMIC, holdtime, + nhrp_peer_ref(pp->peer), + htons(cie->mtu), nbma_addr)) { + cie->code = NHRP_CODE_ADMINISTRATIVELY_PROHIBITED; + continue; + } - nifp = peer->ifp->info; + cie->code = NHRP_CODE_SUCCESS; + } /* Create reply */ zb = zbuf_alloc(1500); - hdr = nhrp_packet_push(zb, NHRP_PACKET_RESOLUTION_REPLY, &p->src_nbma, - &p->src_proto, &p->dst_proto); + hdr = nhrp_packet_push(zb, NHRP_PACKET_RESOLUTION_REPLY, &pp->src_nbma, + &pp->src_proto, &pp->dst_proto); /* Copied information from request */ - hdr->flags = - p->hdr->flags & htons(NHRP_FLAG_RESOLUTION_SOURCE_IS_ROUTER - | NHRP_FLAG_RESOLUTION_SOURCE_STABLE); + hdr->flags = pp->hdr->flags + & htons(NHRP_FLAG_RESOLUTION_SOURCE_IS_ROUTER + | NHRP_FLAG_RESOLUTION_SOURCE_STABLE); hdr->flags |= htons(NHRP_FLAG_RESOLUTION_DESTINATION_STABLE | NHRP_FLAG_RESOLUTION_AUTHORATIVE); - hdr->u.request_id = p->hdr->u.request_id; + hdr->u.request_id = pp->hdr->u.request_id; - /* CIE payload */ + /* CIE payload for the reply packet */ cie = nhrp_cie_push(zb, NHRP_CODE_SUCCESS, &nifp->nbma, - &p->if_ad->addr); - cie->holding_time = htons(p->if_ad->holdtime); - cie->mtu = htons(p->if_ad->mtu); - if (p->if_ad->network_id && p->route_type == NHRP_ROUTE_OFF_NBMA) - cie->prefix_length = p->route_prefix.prefixlen; + &pp->if_ad->addr); + cie->holding_time = htons(pp->if_ad->holdtime); + cie->mtu = htons(pp->if_ad->mtu); + if (pp->if_ad->network_id && pp->route_type == NHRP_ROUTE_OFF_NBMA) + cie->prefix_length = pp->route_prefix.prefixlen; else - cie->prefix_length = 8 * sockunion_get_addrlen(&p->if_ad->addr); + cie->prefix_length = + 8 * sockunion_get_addrlen(&pp->if_ad->addr); /* Handle extensions */ - while ((ext = nhrp_ext_pull(&p->extensions, &payload)) != NULL) { + while ((ext = nhrp_ext_pull(&pp->extensions, &payload)) != NULL) { switch (htons(ext->type) & ~NHRP_EXTENSION_FLAG_COMPULSORY) { case NHRP_EXTENSION_NAT_ADDRESS: if (sockunion_family(&nifp->nat_nbma) == AF_UNSPEC) @@ -386,13 +448,13 @@ static void nhrp_handle_resolution_req(struct nhrp_packet_parser *p) if (!ext) goto err; cie = nhrp_cie_push(zb, NHRP_CODE_SUCCESS, - &nifp->nat_nbma, &p->if_ad->addr); + &nifp->nat_nbma, &pp->if_ad->addr); if (!cie) goto err; nhrp_ext_complete(zb, ext); break; default: - if (nhrp_ext_reply(zb, hdr, p->ifp, ext, &payload) < 0) + if (nhrp_ext_reply(zb, hdr, ifp, ext, &payload) < 0) goto err; break; } @@ -657,7 +719,7 @@ enum packet_type_t { PACKET_INDICATION, }; -static const struct { +static struct { enum packet_type_t type; const char *name; void (*handler)(struct nhrp_packet_parser *); @@ -897,11 +959,15 @@ void nhrp_peer_recv(struct nhrp_peer *p, struct zbuf *zb) if (extoff) { assert(zb->head > zb->buf); uint32_t header_offset = zb->head - zb->buf; - if ((extoff >= realsize) || (extoff < (header_offset))) { - info = "extoff larger than packet, or smaller than header"; + if (extoff >= realsize) { + info = "extoff larger than packet"; + goto drop; + } + if (extoff < header_offset) { + info = "extoff smaller than header offset"; goto drop; } - paylen = extoff - (zb->head - zb->buf); + paylen = extoff - header_offset; } else { paylen = zbuf_used(zb); } diff --git a/nhrpd/nhrp_shortcut.c b/nhrpd/nhrp_shortcut.c index 2552f9fd10..1c2b2b28f2 100644 --- a/nhrpd/nhrp_shortcut.c +++ b/nhrpd/nhrp_shortcut.c @@ -53,14 +53,21 @@ static int nhrp_shortcut_do_expire(struct thread *t) static void nhrp_shortcut_cache_notify(struct notifier_block *n, unsigned long cmd) { + char buf[PREFIX_STRLEN]; + struct nhrp_shortcut *s = container_of(n, struct nhrp_shortcut, cache_notifier); switch (cmd) { case NOTIFY_CACHE_UP: if (!s->route_installed) { - nhrp_route_announce(1, s->type, s->p, NULL, - &s->cache->remote_addr, 0); + debugf(NHRP_DEBUG_ROUTE, + "Shortcut: route install %s nh (unspec) dev %s", + prefix2str(s->p, buf, sizeof(buf)), + s->cache->ifp->name); + + nhrp_route_announce(1, s->type, s->p, s->cache->ifp, + NULL, 0); s->route_installed = 1; } break; @@ -84,6 +91,8 @@ static void nhrp_shortcut_update_binding(struct nhrp_shortcut *s, enum nhrp_cache_type type, struct nhrp_cache *c, int holding_time) { + char buf[2][PREFIX_STRLEN]; + s->type = type; if (c != s->cache) { if (s->cache) { @@ -98,13 +107,29 @@ static void nhrp_shortcut_update_binding(struct nhrp_shortcut *s, /* Force renewal of Zebra announce on prefix * change */ s->route_installed = 0; + debugf(NHRP_DEBUG_ROUTE, + "Shortcut: forcing renewal of zebra announce on prefix change peer %s ht %u cur nbma %s dev %s", + sockunion2str(&s->cache->remote_addr, + buf[0], sizeof(buf[0])), + holding_time, + sockunion2str( + &s->cache->cur.remote_nbma_natoa, + buf[1], sizeof(buf[1])), + s->cache->ifp->name); nhrp_shortcut_cache_notify(&s->cache_notifier, NOTIFY_CACHE_UP); } } - if (!s->cache || !s->cache->route_installed) + if (!s->cache || !s->cache->route_installed) { + debugf(NHRP_DEBUG_ROUTE, + "Shortcut: notify cache down because cache?%s or ri?%s", + s->cache ? "yes" : "no", + s->cache ? (s->cache->route_installed ? "yes" + : "no") + : "n/a"); nhrp_shortcut_cache_notify(&s->cache_notifier, NOTIFY_CACHE_DOWN); + } } if (s->type == NHRP_CACHE_NEGATIVE && !s->route_installed) { nhrp_route_announce(1, s->type, s->p, NULL, NULL, 0); @@ -141,6 +166,7 @@ static void nhrp_shortcut_delete(struct nhrp_shortcut *s) rn = route_node_lookup(shortcut_rib[afi], s->p); if (rn) { XFREE(MTYPE_NHRP_SHORTCUT, rn->info); + rn->info = NULL; route_unlock_node(rn); route_unlock_node(rn); } @@ -190,11 +216,10 @@ static void nhrp_shortcut_recv_resolution_rep(struct nhrp_reqid *reqid, struct nhrp_extension_header *ext; struct nhrp_cie_header *cie; struct nhrp_cache *c = NULL; - union sockunion *proto, cie_proto, *nbma, *nbma_natoa, cie_nbma, - nat_nbma; + union sockunion *proto, cie_proto, *nbma, cie_nbma, nat_nbma; struct prefix prefix, route_prefix; struct zbuf extpl; - char bufp[PREFIX_STRLEN], buf[3][SU_ADDRSTRLEN]; + char bufp[PREFIX_STRLEN], buf[4][SU_ADDRSTRLEN]; int holding_time = pp->if_ad->holdtime; nhrp_reqid_free(&nhrp_packet_reqid, &s->reqid); @@ -262,39 +287,55 @@ static void nhrp_shortcut_recv_resolution_rep(struct nhrp_reqid *reqid, } debugf(NHRP_DEBUG_COMMON, - "Shortcut: %s is at proto %s cie-nbma %s nat-nbma %s cie-holdtime %d", + "Shortcut: %s is at proto %s dst_proto %s cie-nbma %s nat-nbma %s cie-holdtime %d", prefix2str(&prefix, bufp, sizeof(bufp)), sockunion2str(proto, buf[0], sizeof(buf[0])), - sockunion2str(&cie_nbma, buf[1], sizeof(buf[1])), - sockunion2str(&nat_nbma, buf[2], sizeof(buf[2])), + sockunion2str(&pp->dst_proto, buf[1], sizeof(buf[1])), + sockunion2str(&cie_nbma, buf[2], sizeof(buf[2])), + sockunion2str(&nat_nbma, buf[3], sizeof(buf[3])), htons(cie->holding_time)); /* Update cache entry for the protocol to nbma binding */ - if (sockunion_family(&nat_nbma) != AF_UNSPEC) { + if (sockunion_family(&nat_nbma) != AF_UNSPEC) nbma = &nat_nbma; - nbma_natoa = &cie_nbma; - } else { + else nbma = &cie_nbma; - nbma_natoa = NULL; - } + if (sockunion_family(nbma)) { c = nhrp_cache_get(pp->ifp, proto, 1); if (c) { - nhrp_cache_update_binding(c, NHRP_CACHE_CACHED, + debugf(NHRP_DEBUG_COMMON, + "Shortcut: cache found, update binding"); + nhrp_cache_update_binding(c, NHRP_CACHE_DYNAMIC, holding_time, nhrp_peer_get(pp->ifp, nbma), - htons(cie->mtu), nbma_natoa); + htons(cie->mtu), nbma); + } else { + debugf(NHRP_DEBUG_COMMON, + "Shortcut: no cache for nbma %s", buf[2]); } } /* Update shortcut entry for subnet to protocol gw binding */ - if (c && !sockunion_same(proto, &pp->dst_proto)) { + if (c) { ps = nhrp_shortcut_get(&prefix); if (ps) { ps->addr = s->addr; - nhrp_shortcut_update_binding(ps, NHRP_CACHE_CACHED, c, + debugf(NHRP_DEBUG_COMMON, + "Shortcut: calling update_binding"); + nhrp_shortcut_update_binding(ps, NHRP_CACHE_DYNAMIC, c, holding_time); + } else { + debugf(NHRP_DEBUG_COMMON, + "Shortcut: proto diff but no ps"); } + } else { + debugf(NHRP_DEBUG_COMMON, + "NO Shortcut because c NULL?%s or same proto?%s", + c ? "no" : "yes", + proto && pp && sockunion_same(proto, &pp->dst_proto) + ? "yes" + : "no"); } debugf(NHRP_DEBUG_COMMON, "Shortcut: Resolution reply handled"); @@ -306,7 +347,9 @@ static void nhrp_shortcut_send_resolution_req(struct nhrp_shortcut *s) struct nhrp_packet_header *hdr; struct interface *ifp; struct nhrp_interface *nifp; + struct nhrp_afi_data *if_ad; struct nhrp_peer *peer; + struct nhrp_cie_header *cie; if (nhrp_route_address(NULL, &s->addr, NULL, &peer) != NHRP_ROUTE_NBMA_NEXTHOP) @@ -336,7 +379,15 @@ static void nhrp_shortcut_send_resolution_req(struct nhrp_shortcut *s) * - MTU: MTU of the source station * - Holding Time: Max time to cache the source information * */ - /* FIXME: Send holding time, and MTU */ + /* FIXME: push CIE for each local protocol address */ + cie = nhrp_cie_push(zb, NHRP_CODE_SUCCESS, NULL, NULL); + cie->prefix_length = 0xff; + if_ad = &nifp->afi[family2afi(sockunion_family(&s->addr))]; + cie->holding_time = htons(if_ad->holdtime); + cie->mtu = htons(if_ad->mtu); + debugf(NHRP_DEBUG_COMMON, + "Shortcut res_req: set cie ht to %u and mtu to %u. shortcut ht is %u", + ntohs(cie->holding_time), ntohs(cie->mtu), s->holding_time); nhrp_ext_request(zb, hdr, ifp); diff --git a/nhrpd/nhrp_vty.c b/nhrpd/nhrp_vty.c index f6d18fb77f..a3066f917e 100644 --- a/nhrpd/nhrp_vty.c +++ b/nhrpd/nhrp_vty.c @@ -12,6 +12,7 @@ #include "zclient.h" #include "stream.h" #include "filter.h" +#include "json.h" #include "nhrpd.h" #include "netlink.h" @@ -594,6 +595,7 @@ struct info_ctx { struct vty *vty; afi_t afi; int count; + struct json_object *json; }; static void show_ip_nhrp_cache(struct nhrp_cache *c, void *pctx) @@ -601,22 +603,60 @@ static void show_ip_nhrp_cache(struct nhrp_cache *c, void *pctx) struct info_ctx *ctx = pctx; struct vty *vty = ctx->vty; char buf[2][SU_ADDRSTRLEN]; + struct json_object *json = NULL; if (ctx->afi != family2afi(sockunion_family(&c->remote_addr))) return; - if (!ctx->count) { + + if (!ctx->count && !ctx->json) { vty_out(vty, "%-8s %-8s %-24s %-24s %-6s %s\n", "Iface", "Type", "Protocol", "NBMA", "Flags", "Identity"); } ctx->count++; + sockunion2str(&c->remote_addr, buf[0], sizeof(buf[0])); + if (c->cur.peer) + sockunion2str(&c->cur.peer->vc->remote.nbma, + buf[1], sizeof(buf[1])); + else + snprintf(buf[1], sizeof(buf[1]), "-"); + + if (json) { + json = json_object_new_object(); + json_object_string_add(json, "interface", c->ifp->name); + json_object_string_add(json, "type", + nhrp_cache_type_str[c->cur.type]); + json_object_string_add(json, "protocol", buf[0]); + json_object_string_add(json, "nbma", buf[1]); + + if (c->used) + json_object_boolean_true_add(json, "used"); + else + json_object_boolean_false_add(json, "used"); + + if (c->t_timeout) + json_object_boolean_true_add(json, "timeout"); + else + json_object_boolean_false_add(json, "timeout"); + + if (c->t_auth) + json_object_boolean_true_add(json, "auth"); + else + json_object_boolean_false_add(json, "auth"); + + if (c->cur.peer) + json_object_string_add(json, "identity", + c->cur.peer->vc->remote.id); + else + json_object_string_add(json, "identity", "-"); + + json_object_array_add(ctx->json, json); + return; + } vty_out(ctx->vty, "%-8s %-8s %-24s %-24s %c%c%c %s\n", c->ifp->name, nhrp_cache_type_str[c->cur.type], - sockunion2str(&c->remote_addr, buf[0], sizeof(buf[0])), - c->cur.peer ? sockunion2str(&c->cur.peer->vc->remote.nbma, - buf[1], sizeof(buf[1])) - : "-", + buf[0], buf[1], c->used ? 'U' : ' ', c->t_timeout ? 'T' : ' ', c->t_auth ? 'A' : ' ', c->cur.peer ? c->cur.peer->vc->remote.id : "-"); @@ -628,19 +668,35 @@ static void show_ip_nhrp_nhs(struct nhrp_nhs *n, struct nhrp_registration *reg, struct info_ctx *ctx = pctx; struct vty *vty = ctx->vty; char buf[2][SU_ADDRSTRLEN]; + struct json_object *json = NULL; - if (!ctx->count) { + if (!ctx->count && !ctx->json) { vty_out(vty, "%-8s %-24s %-16s %-16s\n", "Iface", "FQDN", "NBMA", "Protocol"); } ctx->count++; + if (reg && reg->peer) + sockunion2str(®->peer->vc->remote.nbma, + buf[0], sizeof(buf[0])); + else + snprintf(buf[0], sizeof(buf[0]), "-"); + sockunion2str(reg ? ®->proto_addr : &n->proto_addr, buf[1], + sizeof(buf[1])); + + if (ctx->json) { + json = json_object_new_object(); + json_object_string_add(json, "interface", n->ifp->name); + json_object_string_add(json, "fqdn", n->nbma_fqdn); + json_object_string_add(json, "nbma", buf[0]); + json_object_string_add(json, "protocol", buf[1]); + + json_object_array_add(ctx->json, json); + return; + } + vty_out(vty, "%-8s %-24s %-16s %-16s\n", n->ifp->name, n->nbma_fqdn, - (reg && reg->peer) ? sockunion2str(®->peer->vc->remote.nbma, - buf[0], sizeof(buf[0])) - : "-", - sockunion2str(reg ? ®->proto_addr : &n->proto_addr, buf[1], - sizeof(buf[1]))); + buf[0], buf[1]); } static void show_ip_nhrp_shortcut(struct nhrp_shortcut *s, void *pctx) @@ -649,6 +705,7 @@ static void show_ip_nhrp_shortcut(struct nhrp_shortcut *s, void *pctx) struct nhrp_cache *c; struct vty *vty = ctx->vty; char buf1[PREFIX_STRLEN], buf2[SU_ADDRSTRLEN]; + struct json_object *json = NULL; if (!ctx->count) { vty_out(vty, "%-8s %-24s %-24s %s\n", "Type", "Prefix", "Via", @@ -657,20 +714,82 @@ static void show_ip_nhrp_shortcut(struct nhrp_shortcut *s, void *pctx) ctx->count++; c = s->cache; - vty_out(ctx->vty, "%-8s %-24s %-24s %s\n", nhrp_cache_type_str[s->type], - prefix2str(s->p, buf1, sizeof(buf1)), - c ? sockunion2str(&c->remote_addr, buf2, sizeof(buf2)) : "", + if (c) + sockunion2str(&c->remote_addr, buf2, sizeof(buf2)); + prefix2str(s->p, buf1, sizeof(buf1)); + + if (ctx->json) { + json = json_object_new_object(); + json_object_string_add(json, "type", + nhrp_cache_type_str[s->type]); + json_object_string_add(json, "prefix", buf1); + + if (c) + json_object_string_add(json, "via", buf2); + + if (c && c->cur.peer) + json_object_string_add(json, "identity", + c->cur.peer->vc->remote.id); + else + json_object_string_add(json, "identity", ""); + + json_object_array_add(ctx->json, json); + return; + } + + vty_out(ctx->vty, "%-8s %-24s %-24s %s\n", + nhrp_cache_type_str[s->type], + buf1, buf2, (c && c->cur.peer) ? c->cur.peer->vc->remote.id : ""); } static void show_ip_opennhrp_cache(struct nhrp_cache *c, void *pctx) { struct info_ctx *ctx = pctx; - char buf[SU_ADDRSTRLEN]; + char buf[3][SU_ADDRSTRLEN]; + struct json_object *json = NULL; + if (ctx->afi != family2afi(sockunion_family(&c->remote_addr))) return; + sockunion2str(&c->remote_addr, buf[0], sizeof(buf[0])); + if (c->cur.peer) + sockunion2str(&c->cur.peer->vc->remote.nbma, buf[1], + sizeof(buf[1])); + if (sockunion_family(&c->cur.remote_nbma_natoa) != AF_UNSPEC) + sockunion2str(&c->cur.remote_nbma_natoa, buf[2], + sizeof(buf[2])); + if (ctx->json) { + json = json_object_new_object(); + json_object_string_add(json, "type", + nhrp_cache_type_str[c->cur.type]); + + if (c->cur.peer && c->cur.peer->online) + json_object_boolean_true_add(json, "up"); + else + json_object_boolean_false_add(json, "up"); + + if (c->used) + json_object_boolean_true_add(json, "used"); + else + json_object_boolean_false_add(json, "used"); + + json_object_string_add(json, "protocolAddress", buf[0]); + json_object_int_add(json, "protocolAddressSize", + 8 * family2addrsize(sockunion_family + (&c->remote_addr))); + + if (c->cur.peer) + json_object_string_add(json, "nbmaAddress", buf[1]); + + if (sockunion_family(&c->cur.remote_nbma_natoa) != AF_UNSPEC) + json_object_string_add(json, "nbmaNatOaAddress", + buf[2]); + + json_object_array_add(ctx->json, json); + return; + } vty_out(ctx->vty, "Type: %s\n" "Flags:%s%s\n" @@ -678,40 +797,45 @@ static void show_ip_opennhrp_cache(struct nhrp_cache *c, void *pctx) nhrp_cache_type_str[c->cur.type], (c->cur.peer && c->cur.peer->online) ? " up" : "", c->used ? " used" : "", - sockunion2str(&c->remote_addr, buf, sizeof(buf)), + buf[0], 8 * family2addrsize(sockunion_family(&c->remote_addr))); - if (c->cur.peer) { - vty_out(ctx->vty, "NBMA-Address: %s\n", - sockunion2str(&c->cur.peer->vc->remote.nbma, buf, - sizeof(buf))); - } + if (c->cur.peer) + vty_out(ctx->vty, "NBMA-Address: %s\n", buf[1]); - if (sockunion_family(&c->cur.remote_nbma_natoa) != AF_UNSPEC) { - vty_out(ctx->vty, "NBMA-NAT-OA-Address: %s\n", - sockunion2str(&c->cur.remote_nbma_natoa, buf, - sizeof(buf))); - } + if (sockunion_family(&c->cur.remote_nbma_natoa) != AF_UNSPEC) + vty_out(ctx->vty, "NBMA-NAT-OA-Address: %s\n", buf[2]); vty_out(ctx->vty, "\n\n"); } DEFUN(show_ip_nhrp, show_ip_nhrp_cmd, - "show " AFI_CMD " nhrp [cache|nhs|shortcut|opennhrp]", + "show " AFI_CMD " nhrp [cache|nhs|shortcut|opennhrp] [json]", SHOW_STR AFI_STR "NHRP information\n" "Forwarding cache information\n" "Next hop server information\n" "Shortcut information\n" - "opennhrpctl style cache dump\n") + "opennhrpctl style cache dump\n" + JSON_STR) { struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT); struct interface *ifp; struct info_ctx ctx = { - .vty = vty, .afi = cmd_to_afi(argv[1]), + .vty = vty, .afi = cmd_to_afi(argv[1]), .json = NULL }; - + bool uj = use_json(argc, argv); + struct json_object *json_path = NULL; + struct json_object *json_vrf = NULL, *json_vrf_path = NULL; + int ret = CMD_SUCCESS; + + if (uj) { + json_vrf = json_object_new_object(); + json_vrf_path = json_object_new_object(); + json_path = json_object_new_array(); + ctx.json = json_path; + } if (argc <= 3 || argv[3]->text[0] == 'c') { FOR_ALL_INTERFACES (vrf, ifp) nhrp_cache_foreach(ifp, show_ip_nhrp_cache, &ctx); @@ -721,49 +845,104 @@ DEFUN(show_ip_nhrp, show_ip_nhrp_cmd, } else if (argv[3]->text[0] == 's') { nhrp_shortcut_foreach(ctx.afi, show_ip_nhrp_shortcut, &ctx); } else { - vty_out(vty, "Status: ok\n\n"); + if (!ctx.json) + vty_out(vty, "Status: ok\n\n"); + else + json_object_string_add(json_vrf, "status", "ok"); + ctx.count++; FOR_ALL_INTERFACES (vrf, ifp) nhrp_cache_foreach(ifp, show_ip_opennhrp_cache, &ctx); } + if (uj) + json_object_int_add(json_vrf, "entriesCount", ctx.count); if (!ctx.count) { - vty_out(vty, "%% No entries\n"); - return CMD_WARNING; + if (!ctx.json) + vty_out(vty, "%% No entries\n"); + ret = CMD_WARNING; } - - return CMD_SUCCESS; + if (uj) { + json_object_object_add(json_vrf_path, "attr", json_vrf); + json_object_object_add(json_vrf_path, "table", ctx.json); + vty_out(vty, "%s", + json_object_to_json_string_ext( + json_vrf_path, JSON_C_TO_STRING_PRETTY)); + json_object_free(json_vrf_path); + } + return ret; } +struct dmvpn_cfg { + struct vty *vty; + struct json_object *json; +}; + static void show_dmvpn_entry(struct nhrp_vc *vc, void *ctx) { - struct vty *vty = ctx; + struct dmvpn_cfg *ctxt = ctx; + struct vty *vty; char buf[2][SU_ADDRSTRLEN]; + struct json_object *json = NULL; + + if (!ctxt || !ctxt->vty) + return; + vty = ctxt->vty; + sockunion2str(&vc->local.nbma, buf[0], sizeof(buf[0])); + sockunion2str(&vc->remote.nbma, buf[1], sizeof(buf[1])); + if (ctxt->json) { + json = json_object_new_object(); + json_object_string_add(json, "src", buf[0]); + json_object_string_add(json, "dst", buf[1]); + + if (notifier_active(&vc->notifier_list)) + json_object_boolean_true_add(json, "notifierActive"); + else + json_object_boolean_false_add(json, "notifierActive"); - vty_out(vty, "%-24s %-24s %c %-4d %-24s\n", - sockunion2str(&vc->local.nbma, buf[0], sizeof(buf[0])), - sockunion2str(&vc->remote.nbma, buf[1], sizeof(buf[1])), - notifier_active(&vc->notifier_list) ? 'n' : ' ', vc->ipsec, - vc->remote.id); + json_object_int_add(json, "sas", vc->ipsec); + json_object_string_add(json, "identity", vc->remote.id); + json_object_array_add(ctxt->json, json); + } else { + vty_out(vty, "%-24s %-24s %c %-4d %-24s\n", + buf[0], buf[1], notifier_active(&vc->notifier_list) ? + 'n' : ' ', vc->ipsec, vc->remote.id); + } } DEFUN(show_dmvpn, show_dmvpn_cmd, - "show dmvpn", + "show dmvpn [json]", SHOW_STR - "DMVPN information\n") + "DMVPN information\n" + JSON_STR) { - vty_out(vty, "%-24s %-24s %-6s %-4s %-24s\n", "Src", "Dst", "Flags", - "SAs", "Identity"); - - nhrp_vc_foreach(show_dmvpn_entry, vty); - + bool uj = use_json(argc, argv); + struct dmvpn_cfg ctxt; + struct json_object *json_path = NULL; + + ctxt.vty = vty; + if (!uj) { + ctxt.json = NULL; + vty_out(vty, "%-24s %-24s %-6s %-4s %-24s\n", + "Src", "Dst", "Flags", "SAs", "Identity"); + } else { + json_path = json_object_new_array(); + ctxt.json = json_path; + } + nhrp_vc_foreach(show_dmvpn_entry, &ctxt); + if (uj) { + vty_out(vty, "%s", + json_object_to_json_string_ext( + json_path, JSON_C_TO_STRING_PRETTY)); + json_object_free(json_path); + } return CMD_SUCCESS; } static void clear_nhrp_cache(struct nhrp_cache *c, void *data) { struct info_ctx *ctx = data; - if (c->cur.type <= NHRP_CACHE_CACHED) { + if (c->cur.type <= NHRP_CACHE_DYNAMIC) { nhrp_cache_update_binding(c, c->cur.type, -1, NULL, 0, NULL); ctx->count++; } diff --git a/nhrpd/zbuf.h b/nhrpd/zbuf.h index d03f4ca3a2..e6f7101d63 100644 --- a/nhrpd/zbuf.h +++ b/nhrpd/zbuf.h @@ -86,9 +86,9 @@ static inline void *__zbuf_pull(struct zbuf *zb, size_t size, int error) } #define zbuf_pull(zb, type) ((type *)__zbuf_pull(zb, sizeof(type), 1)) -#define zbuf_pulln(zb, sz) ((void *)__zbuf_pull(zb, sz, 1)) +#define zbuf_pulln(zb, sz) (__zbuf_pull(zb, sz, 1)) #define zbuf_may_pull(zb, type) ((type *)__zbuf_pull(zb, sizeof(type), 0)) -#define zbuf_may_pulln(zb, sz) ((void *)__zbuf_pull(zb, sz, 0)) +#define zbuf_may_pulln(zb, sz) (__zbuf_pull(zb, sz, 0)) void *zbuf_may_pull_until(struct zbuf *zb, const char *sep, struct zbuf *msg); @@ -149,9 +149,9 @@ static inline void *__zbuf_push(struct zbuf *zb, size_t size, int error) } #define zbuf_push(zb, type) ((type *)__zbuf_push(zb, sizeof(type), 1)) -#define zbuf_pushn(zb, sz) ((void *)__zbuf_push(zb, sz, 1)) +#define zbuf_pushn(zb, sz) (__zbuf_push(zb, sz, 1)) #define zbuf_may_push(zb, type) ((type *)__zbuf_may_push(zb, sizeof(type), 0)) -#define zbuf_may_pushn(zb, sz) ((void *)__zbuf_push(zb, sz, 0)) +#define zbuf_may_pushn(zb, sz) (__zbuf_push(zb, sz, 0)) static inline void zbuf_put(struct zbuf *zb, const void *src, size_t len) { diff --git a/ospf6d/ospf6_abr.c b/ospf6d/ospf6_abr.c index ead186b6fc..1f6cc9d527 100644 --- a/ospf6d/ospf6_abr.c +++ b/ospf6d/ospf6_abr.c @@ -1200,9 +1200,23 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa) listcount(old_route->nh_list)); } } else { - /* adv. router exists in the list, update the nhs */ - list_delete_all_node(o_path->nh_list); - ospf6_copy_nexthops(o_path->nh_list, route->nh_list); + struct ospf6_route *tmp_route = ospf6_route_create(); + + ospf6_copy_nexthops(tmp_route->nh_list, + o_path->nh_list); + + if (ospf6_route_cmp_nexthops(tmp_route, route) != 0) { + /* adv. router exists in the list, update nhs */ + list_delete_all_node(o_path->nh_list); + ospf6_copy_nexthops(o_path->nh_list, + route->nh_list); + ospf6_route_delete(tmp_route); + } else { + /* adv. router has no change in nhs */ + old_entry_updated = false; + ospf6_route_delete(tmp_route); + continue; + } } if (is_debug) @@ -1427,7 +1441,7 @@ void install_element_ospf6_debug_abr(void) install_element(CONFIG_NODE, &no_debug_ospf6_abr_cmd); } -static const struct ospf6_lsa_handler inter_prefix_handler = { +static struct ospf6_lsa_handler inter_prefix_handler = { .lh_type = OSPF6_LSTYPE_INTER_PREFIX, .lh_name = "Inter-Prefix", .lh_short_name = "IAP", @@ -1435,7 +1449,7 @@ static const struct ospf6_lsa_handler inter_prefix_handler = { .lh_get_prefix_str = ospf6_inter_area_prefix_lsa_get_prefix_str, .lh_debug = 0}; -static const struct ospf6_lsa_handler inter_router_handler = { +static struct ospf6_lsa_handler inter_router_handler = { .lh_type = OSPF6_LSTYPE_INTER_ROUTER, .lh_name = "Inter-Router", .lh_short_name = "IAR", diff --git a/ospf6d/ospf6_asbr.c b/ospf6d/ospf6_asbr.c index 805e411c7b..6e71a21bd5 100644 --- a/ospf6d/ospf6_asbr.c +++ b/ospf6d/ospf6_asbr.c @@ -918,8 +918,8 @@ void ospf6_asbr_distribute_list_update(int type) ZROUTE_NAME(type)); ospf6->t_distribute_update = NULL; - thread_add_timer_msec(master, ospf6_asbr_routemap_update_timer, - (void **)args, OSPF_MIN_LS_INTERVAL, + thread_add_timer_msec(master, ospf6_asbr_routemap_update_timer, args, + OSPF_MIN_LS_INTERVAL, &ospf6->t_distribute_update); } @@ -1853,7 +1853,7 @@ DEFUN (show_ipv6_ospf6_redistribute, return CMD_SUCCESS; } -static const struct ospf6_lsa_handler as_external_handler = { +static struct ospf6_lsa_handler as_external_handler = { .lh_type = OSPF6_LSTYPE_AS_EXTERNAL, .lh_name = "AS-External", .lh_short_name = "ASE", diff --git a/ospf6d/ospf6_intra.c b/ospf6d/ospf6_intra.c index 9c239b75ff..b700899ccf 100644 --- a/ospf6d/ospf6_intra.c +++ b/ospf6d/ospf6_intra.c @@ -2235,7 +2235,7 @@ void ospf6_intra_brouter_calculation(struct ospf6_area *oa) __func__, oa->name); } -static const struct ospf6_lsa_handler router_handler = { +static struct ospf6_lsa_handler router_handler = { .lh_type = OSPF6_LSTYPE_ROUTER, .lh_name = "Router", .lh_short_name = "Rtr", @@ -2243,7 +2243,7 @@ static const struct ospf6_lsa_handler router_handler = { .lh_get_prefix_str = ospf6_router_lsa_get_nbr_id, .lh_debug = 0}; -static const struct ospf6_lsa_handler network_handler = { +static struct ospf6_lsa_handler network_handler = { .lh_type = OSPF6_LSTYPE_NETWORK, .lh_name = "Network", .lh_short_name = "Net", @@ -2251,7 +2251,7 @@ static const struct ospf6_lsa_handler network_handler = { .lh_get_prefix_str = ospf6_network_lsa_get_ar_id, .lh_debug = 0}; -static const struct ospf6_lsa_handler link_handler = { +static struct ospf6_lsa_handler link_handler = { .lh_type = OSPF6_LSTYPE_LINK, .lh_name = "Link", .lh_short_name = "Lnk", @@ -2259,7 +2259,7 @@ static const struct ospf6_lsa_handler link_handler = { .lh_get_prefix_str = ospf6_link_lsa_get_prefix_str, .lh_debug = 0}; -static const struct ospf6_lsa_handler intra_prefix_handler = { +static struct ospf6_lsa_handler intra_prefix_handler = { .lh_type = OSPF6_LSTYPE_INTRA_PREFIX, .lh_name = "Intra-Prefix", .lh_short_name = "INP", diff --git a/ospf6d/ospf6_lsa.c b/ospf6d/ospf6_lsa.c index 9acbd09b1a..bcfd975879 100644 --- a/ospf6d/ospf6_lsa.c +++ b/ospf6d/ospf6_lsa.c @@ -77,16 +77,16 @@ static struct ospf6_lsa_handler unknown_handler = { .lh_debug = 0 /* No default debug */ }; -void ospf6_install_lsa_handler(const struct ospf6_lsa_handler *handler) +void ospf6_install_lsa_handler(struct ospf6_lsa_handler *handler) { /* type in handler is host byte order */ int index = handler->lh_type & OSPF6_LSTYPE_FCODE_MASK; vector_set_index(ospf6_lsa_handler_vector, index, (void *)handler); } -const struct ospf6_lsa_handler *ospf6_get_lsa_handler(uint16_t type) +struct ospf6_lsa_handler *ospf6_get_lsa_handler(uint16_t type) { - const struct ospf6_lsa_handler *handler = NULL; + struct ospf6_lsa_handler *handler = NULL; unsigned int index = ntohs(type) & OSPF6_LSTYPE_FCODE_MASK; if (index >= vector_active(ospf6_lsa_handler_vector)) @@ -527,7 +527,7 @@ struct ospf6_lsa *ospf6_lsa_create(struct ospf6_lsa_header *header) /* allocate memory */ lsa = XCALLOC(MTYPE_OSPF6_LSA, sizeof(struct ospf6_lsa)); - lsa->header = (struct ospf6_lsa_header *)new_header; + lsa->header = new_header; /* dump string */ ospf6_lsa_printbuf(lsa, lsa->name, sizeof(lsa->name)); @@ -554,7 +554,7 @@ struct ospf6_lsa *ospf6_lsa_create_headeronly(struct ospf6_lsa_header *header) /* allocate memory */ lsa = XCALLOC(MTYPE_OSPF6_LSA, sizeof(struct ospf6_lsa)); - lsa->header = (struct ospf6_lsa_header *)new_header; + lsa->header = new_header; SET_FLAG(lsa->flag, OSPF6_LSA_HEADERONLY); /* dump string */ diff --git a/ospf6d/ospf6_lsa.h b/ospf6d/ospf6_lsa.h index d871a8842e..02f9f9d26c 100644 --- a/ospf6d/ospf6_lsa.h +++ b/ospf6d/ospf6_lsa.h @@ -237,8 +237,8 @@ extern int ospf6_lsa_checksum_valid(struct ospf6_lsa_header *); extern int ospf6_lsa_prohibited_duration(uint16_t type, uint32_t id, uint32_t adv_router, void *scope); -extern void ospf6_install_lsa_handler(const struct ospf6_lsa_handler *handler); -extern const struct ospf6_lsa_handler *ospf6_get_lsa_handler(uint16_t type); +extern void ospf6_install_lsa_handler(struct ospf6_lsa_handler *handler); +extern struct ospf6_lsa_handler *ospf6_get_lsa_handler(uint16_t type); extern void ospf6_lsa_init(void); extern void ospf6_lsa_terminate(void); diff --git a/ospf6d/ospf6_spf.c b/ospf6d/ospf6_spf.c index b0fe890d33..80bff5795f 100644 --- a/ospf6d/ospf6_spf.c +++ b/ospf6d/ospf6_spf.c @@ -989,7 +989,7 @@ struct ospf6_lsa *ospf6_create_single_router_lsa(struct ospf6_area *area, rtr_lsa = ospf6_lsdb_next(end, rtr_lsa); continue; } - lsa_header = (struct ospf6_lsa_header *)rtr_lsa->header; + lsa_header = rtr_lsa->header; total_lsa_length += (ntohs(lsa_header->length) - lsa_length); num_lsa++; rtr_lsa = ospf6_lsdb_next(end, rtr_lsa); @@ -1027,7 +1027,7 @@ struct ospf6_lsa *ospf6_create_single_router_lsa(struct ospf6_area *area, assert(rtr_lsa); if (!OSPF6_LSA_IS_MAXAGE(rtr_lsa)) { /* Append first Link State ID LSA */ - lsa_header = (struct ospf6_lsa_header *)rtr_lsa->header; + lsa_header = rtr_lsa->header; memcpy(new_header, lsa_header, ntohs(lsa_header->length)); /* Assign new lsa length as aggregated length. */ ((struct ospf6_lsa_header *)new_header)->length = @@ -1057,7 +1057,7 @@ struct ospf6_lsa *ospf6_create_single_router_lsa(struct ospf6_area *area, } /* Append Next Link State ID LSA */ - lsa_header = (struct ospf6_lsa_header *)rtr_lsa->header; + lsa_header = rtr_lsa->header; memcpy(new_header, (OSPF6_LSA_HEADER_END(rtr_lsa->header) + 4), (ntohs(lsa_header->length) - lsa_length)); new_header += (ntohs(lsa_header->length) - lsa_length); diff --git a/ospf6d/ospf6_top.c b/ospf6d/ospf6_top.c index 96eee51929..dc10fa52cb 100644 --- a/ospf6d/ospf6_top.c +++ b/ospf6d/ospf6_top.c @@ -52,8 +52,8 @@ DEFINE_QOBJ_TYPE(ospf6) FRR_CFG_DEFAULT_BOOL(OSPF6_LOG_ADJACENCY_CHANGES, - { .val_long = true, .match_profile = "datacenter", }, - { .val_long = false }, + { .val_bool = true, .match_profile = "datacenter", }, + { .val_bool = false }, ) /* global ospf6d variable */ diff --git a/ospf6d/ospf6_zebra.c b/ospf6d/ospf6_zebra.c index 1717f1e650..2773a666a3 100644 --- a/ospf6d/ospf6_zebra.c +++ b/ospf6d/ospf6_zebra.c @@ -171,8 +171,8 @@ static int ospf6_zebra_read_route(ZAPI_CALLBACK_ARGS) if (IS_OSPF6_DEBUG_ZEBRA(RECV)) { char prefixstr[PREFIX2STR_BUFFER], nexthopstr[128]; - prefix2str((struct prefix *)&api.prefix, prefixstr, - sizeof(prefixstr)); + + prefix2str(&api.prefix, prefixstr, sizeof(prefixstr)); inet_ntop(AF_INET6, nexthop, nexthopstr, sizeof(nexthopstr)); zlog_debug( @@ -256,7 +256,7 @@ static void ospf6_zebra_route_update(int type, struct ospf6_route *request) && ospf6_route_is_same(request, request->next)) { if (IS_OSPF6_DEBUG_ZEBRA(SEND)) zlog_debug( - " Best-path removal resulted Sencondary addition"); + " Best-path removal resulted Secondary addition"); type = ADD; request = request->next; } diff --git a/ospfd/ospf_abr.c b/ospfd/ospf_abr.c index a8dfcbb36b..eb3323997f 100644 --- a/ospfd/ospf_abr.c +++ b/ospfd/ospf_abr.c @@ -708,8 +708,7 @@ void ospf_abr_announce_network_to_area(struct prefix_ipv4 *p, uint32_t cost, else full_cost = cost; - old = ospf_lsa_lookup_by_prefix(area->lsdb, OSPF_SUMMARY_LSA, - (struct prefix_ipv4 *)p, + old = ospf_lsa_lookup_by_prefix(area->lsdb, OSPF_SUMMARY_LSA, p, area->ospf->router_id); if (old) { if (IS_DEBUG_OSPF_EVENT) @@ -761,8 +760,7 @@ void ospf_abr_announce_network_to_area(struct prefix_ipv4 *p, uint32_t cost, zlog_debug( "ospf_abr_announce_network_to_area(): " "creating new summary"); - lsa = ospf_summary_lsa_originate((struct prefix_ipv4 *)p, - full_cost, area); + lsa = ospf_summary_lsa_originate(p, full_cost, area); /* This will flood through area. */ if (!lsa) { diff --git a/ospfd/ospf_ase.c b/ospfd/ospf_ase.c index 2c80d485a3..30940cf010 100644 --- a/ospfd/ospf_ase.c +++ b/ospfd/ospf_ase.c @@ -691,7 +691,7 @@ static int ospf_ase_calculate_timer(struct thread *t) if (IS_DEBUG_OSPF_EVENT) zlog_info( - "SPF Processing Time(usecs): External Routes: %lld\n", + "SPF Processing Time(usecs): External Routes: %lld", (stop_time.tv_sec - start_time.tv_sec) * 1000000LL + (stop_time.tv_usec diff --git a/ospfd/ospf_ext.c b/ospfd/ospf_ext.c index df64fca883..47883d5f39 100644 --- a/ospfd/ospf_ext.c +++ b/ospfd/ospf_ext.c @@ -1684,7 +1684,7 @@ static uint16_t show_vty_link_info(struct vty *vty, struct tlv_header *ext) /* Extended Link TLVs */ static void ospf_ext_link_show_info(struct vty *vty, struct ospf_lsa *lsa) { - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; struct tlv_header *tlvh; uint16_t length = 0, sum = 0; @@ -1758,7 +1758,7 @@ static uint16_t show_vty_pref_info(struct vty *vty, struct tlv_header *ext) /* Extended Prefix TLVs */ static void ospf_ext_pref_show_info(struct vty *vty, struct ospf_lsa *lsa) { - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; struct tlv_header *tlvh; uint16_t length = 0, sum = 0; diff --git a/ospfd/ospf_lsa.c b/ospfd/ospf_lsa.c index 55ec638522..088f7f31c7 100644 --- a/ospfd/ospf_lsa.c +++ b/ospfd/ospf_lsa.c @@ -123,7 +123,7 @@ int get_age(struct ospf_lsa *lsa) one-based. */ uint16_t ospf_lsa_checksum(struct lsa_header *lsa) { - uint8_t *buffer = (uint8_t *)&lsa->options; + uint8_t *buffer = &lsa->options; int options_offset = buffer - (uint8_t *)&lsa->ls_age; /* should be 2 */ /* Skip the AGE field */ @@ -138,7 +138,7 @@ uint16_t ospf_lsa_checksum(struct lsa_header *lsa) int ospf_lsa_checksum_valid(struct lsa_header *lsa) { - uint8_t *buffer = (uint8_t *)&lsa->options; + uint8_t *buffer = &lsa->options; int options_offset = buffer - (uint8_t *)&lsa->ls_age; /* should be 2 */ /* Skip the AGE field */ @@ -2805,7 +2805,7 @@ static int ospf_maxage_lsa_remover(struct thread *thread) if (CHECK_FLAG(lsa->flags, OSPF_LSA_PREMATURE_AGE)) { if (IS_DEBUG_OSPF(lsa, LSA_FLOODING)) zlog_debug( - "originating new lsa for lsa 0x%p\n", + "originating new lsa for lsa 0x%p", (void *)lsa); ospf_lsa_refresh(ospf, lsa); } @@ -2845,8 +2845,7 @@ void ospf_lsa_maxage_delete(struct ospf *ospf, struct ospf_lsa *lsa) lsa_prefix.prefixlen = sizeof(lsa_prefix.u.ptr) * CHAR_BIT; lsa_prefix.u.ptr = (uintptr_t)lsa; - if ((rn = route_node_lookup(ospf->maxage_lsa, - (struct prefix *)&lsa_prefix))) { + if ((rn = route_node_lookup(ospf->maxage_lsa, &lsa_prefix))) { if (rn->info == lsa) { UNSET_FLAG(lsa->flags, OSPF_LSA_IN_MAXAGE); ospf_lsa_unlock(&lsa); /* maxage_lsa */ @@ -2888,7 +2887,7 @@ void ospf_lsa_maxage(struct ospf *ospf, struct ospf_lsa *lsa) lsa_prefix.prefixlen = sizeof(lsa_prefix.u.ptr) * CHAR_BIT; lsa_prefix.u.ptr = (uintptr_t)lsa; - rn = route_node_get(ospf->maxage_lsa, (struct prefix *)&lsa_prefix); + rn = route_node_get(ospf->maxage_lsa, &lsa_prefix); if (rn->info != NULL) { if (IS_DEBUG_OSPF(lsa, LSA_FLOODING)) zlog_debug( diff --git a/ospfd/ospf_nsm.c b/ospfd/ospf_nsm.c index 58f087ca4f..9cd83c245c 100644 --- a/ospfd/ospf_nsm.c +++ b/ospfd/ospf_nsm.c @@ -731,7 +731,7 @@ static void nsm_change_state(struct ospf_neighbor *nbr, int state) OSPF_DD_FLAG_I | OSPF_DD_FLAG_M | OSPF_DD_FLAG_MS; if (CHECK_FLAG(oi->ospf->config, OSPF_LOG_ADJACENCY_DETAIL)) zlog_info( - "%s: Intializing [DD]: %s with seqnum:%x , flags:%x", + "%s: Initializing [DD]: %s with seqnum:%x , flags:%x", (oi->ospf->name) ? oi->ospf->name : VRF_DEFAULT_NAME, inet_ntoa(nbr->router_id), nbr->dd_seqnum, diff --git a/ospfd/ospf_opaque.c b/ospfd/ospf_opaque.c index b042a06372..35fa5da74b 100644 --- a/ospfd/ospf_opaque.c +++ b/ospfd/ospf_opaque.c @@ -1161,7 +1161,7 @@ void ospf_opaque_config_write_debug(struct vty *vty) void show_opaque_info_detail(struct vty *vty, struct ospf_lsa *lsa) { - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; uint32_t lsid = ntohl(lsah->id.s_addr); uint8_t opaque_type = GET_OPAQUE_TYPE(lsid); uint32_t opaque_id = GET_OPAQUE_ID(lsid); diff --git a/ospfd/ospf_packet.c b/ospfd/ospf_packet.c index d87f02627b..aa50aeacbc 100644 --- a/ospfd/ospf_packet.c +++ b/ospfd/ospf_packet.c @@ -611,7 +611,7 @@ static void ospf_write_frags(int fd, struct ospf_packet *op, struct ip *iph, if (IS_DEBUG_OSPF_PACKET(type - 1, SEND)) { zlog_debug( - "ospf_write_frags: sent id %d, off %d, len %d to %s\n", + "ospf_write_frags: sent id %d, off %d, len %d to %s", iph->ip_id, iph->ip_off, iph->ip_len, inet_ntoa(iph->ip_dst)); } @@ -2335,7 +2335,7 @@ static struct stream *ospf_recv_packet(struct ospf *ospf, int fd, ip_len = iph->ip_len; -#if !defined(GNU_LINUX) && (OpenBSD < 200311) && (__FreeBSD_version < 1000000) +#if defined(__FreeBSD__) && (__FreeBSD_version < 1000000) /* * Kernel network code touches incoming IP header parameters, * before protocol specific processing. diff --git a/ospfd/ospf_ri.c b/ospfd/ospf_ri.c index 5f01edfbdf..c3d53ad5ed 100644 --- a/ospfd/ospf_ri.c +++ b/ospfd/ospf_ri.c @@ -1382,9 +1382,8 @@ static uint16_t show_vty_sr_algorithm(struct vty *vty, struct tlv_header *tlvh) zlog_debug(" Algorithm %d: Strict SPF", i); break; default: - zlog_debug( - " Algorithm %d: Unknown value %d\n", - i, algo->value[i]); + zlog_debug(" Algorithm %d: Unknown value %d", + i, algo->value[i]); break; } } @@ -1439,7 +1438,7 @@ static uint16_t show_vty_sr_msd(struct vty *vty, struct tlv_header *tlvh) static void ospf_router_info_show_info(struct vty *vty, struct ospf_lsa *lsa) { - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; struct tlv_header *tlvh; uint16_t length = 0, sum = 0; diff --git a/ospfd/ospf_route.c b/ospfd/ospf_route.c index b6e8338ee7..a661c80a91 100644 --- a/ospfd/ospf_route.c +++ b/ospfd/ospf_route.c @@ -631,27 +631,15 @@ void ospf_route_table_dump(struct route_table *rt) { struct route_node *rn; struct ospf_route * or ; - char buf1[BUFSIZ]; - char buf2[BUFSIZ]; struct listnode *pnode; struct ospf_path *path; -#if 0 - zlog_debug ("Type Dest Area Path Type Cost Next Adv."); - zlog_debug (" Hop(s) Router(s)"); -#endif /* 0 */ - zlog_debug("========== OSPF routing table =========="); for (rn = route_top(rt); rn; rn = route_next(rn)) if ((or = rn->info) != NULL) { if (or->type == OSPF_DESTINATION_NETWORK) { - zlog_debug("N %s/%d\t%s\t%s\t%d", - inet_ntop(AF_INET, &rn->p.u.prefix4, - buf1, BUFSIZ), - rn->p.prefixlen, - inet_ntop(AF_INET, - & or->u.std.area_id, buf2, - BUFSIZ), + zlog_debug("N %-18pFX %-15pI4 %s %d", &rn->p, + &or->u.std.area_id, ospf_path_type_str[or->path_type], or->cost); for (ALL_LIST_ELEMENTS_RO(or->paths, pnode, @@ -659,12 +647,9 @@ void ospf_route_table_dump(struct route_table *rt) zlog_debug(" -> %s", inet_ntoa(path->nexthop)); } else - zlog_debug("R %s\t%s\t%s\t%d", - inet_ntop(AF_INET, &rn->p.u.prefix4, - buf1, BUFSIZ), - inet_ntop(AF_INET, - & or->u.std.area_id, buf2, - BUFSIZ), + zlog_debug("R %-18pI4 %-15pI4 %s %d", + &rn->p.u.prefix4, + &or->u.std.area_id, ospf_path_type_str[or->path_type], or->cost); } diff --git a/ospfd/ospf_spf.c b/ospfd/ospf_spf.c index 8b605b3bac..ae70a5c789 100644 --- a/ospfd/ospf_spf.c +++ b/ospfd/ospf_spf.c @@ -1401,13 +1401,13 @@ static int ospf_spf_calculate_timer(struct thread *thread) if (IS_DEBUG_OSPF_EVENT) { zlog_info("SPF Processing Time(usecs): %ld", total_spf_time); - zlog_info("\t SPF Time: %ld", spf_time); - zlog_info("\t InterArea: %ld", ia_time); - zlog_info("\t Prune: %ld", prune_time); - zlog_info("\tRouteInstall: %ld", rt_time); + zlog_info(" SPF Time: %ld", spf_time); + zlog_info(" InterArea: %ld", ia_time); + zlog_info(" Prune: %ld", prune_time); + zlog_info(" RouteInstall: %ld", rt_time); if (IS_OSPF_ABR(ospf)) - zlog_info("\t ABR: %ld (%d areas)", abr_time, - areas_processed); + zlog_info(" ABR: %ld (%d areas)", + abr_time, areas_processed); zlog_info("Reason(s) for SPF: %s", rbuf); } diff --git a/ospfd/ospf_sr.c b/ospfd/ospf_sr.c index b5a54a0bc4..7a786ba7ab 100644 --- a/ospfd/ospf_sr.c +++ b/ospfd/ospf_sr.c @@ -1035,7 +1035,7 @@ void ospf_sr_ri_lsa_update(struct ospf_lsa *lsa) { struct sr_node *srn; struct tlv_header *tlvh; - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; struct ri_sr_tlv_sid_label_range *ri_srgb; struct ri_sr_tlv_sr_algorithm *algo; struct sr_srgb srgb; @@ -1156,7 +1156,7 @@ void ospf_sr_ri_lsa_update(struct ospf_lsa *lsa) void ospf_sr_ri_lsa_delete(struct ospf_lsa *lsa) { struct sr_node *srn; - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; if (IS_DEBUG_OSPF_SR) zlog_debug("SR (%s): Remove SR node %s from lsa_id 4.0.0.%u", @@ -1198,7 +1198,7 @@ void ospf_sr_ext_link_lsa_update(struct ospf_lsa *lsa) { struct sr_node *srn; struct tlv_header *tlvh; - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; struct sr_link *srl; uint16_t length, sum; @@ -1308,7 +1308,7 @@ void ospf_sr_ext_prefix_lsa_update(struct ospf_lsa *lsa) { struct sr_node *srn; struct tlv_header *tlvh; - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; struct sr_prefix *srp; uint16_t length, sum; diff --git a/ospfd/ospf_te.c b/ospfd/ospf_te.c index 8da99843e6..1009c7577e 100644 --- a/ospfd/ospf_te.c +++ b/ospfd/ospf_te.c @@ -1780,7 +1780,7 @@ static uint16_t show_vty_link_subtlv_unrsv_bw(struct vty *vty, i, fval1, i + 1, fval2); else zlog_debug( - " [%d]: %g (Bytes/sec),\t[%d]: %g (Bytes/sec)", + " [%d]: %g (Bytes/sec), [%d]: %g (Bytes/sec)", i, fval1, i + 1, fval2); } @@ -2119,7 +2119,7 @@ static uint16_t ospf_mpls_te_show_link_subtlv(struct vty *vty, static void ospf_mpls_te_show_info(struct vty *vty, struct ospf_lsa *lsa) { - struct lsa_header *lsah = (struct lsa_header *)lsa->data; + struct lsa_header *lsah = lsa->data; struct tlv_header *tlvh, *next; uint16_t sum, total; uint16_t (*subfunc)(struct vty * vty, struct tlv_header * tlvh, diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c index 75f556e39f..ea73834a66 100644 --- a/ospfd/ospf_vty.c +++ b/ospfd/ospf_vty.c @@ -54,8 +54,8 @@ #include "ospfd/ospf_bfd.h" FRR_CFG_DEFAULT_BOOL(OSPF_LOG_ADJACENCY_CHANGES, - { .val_long = true, .match_profile = "datacenter", }, - { .val_long = false }, + { .val_bool = true, .match_profile = "datacenter", }, + { .val_bool = false }, ) static const char *const ospf_network_type_str[] = { diff --git a/ospfd/ospf_zebra.c b/ospfd/ospf_zebra.c index 385a7ece7b..c7e6bd9cbf 100644 --- a/ospfd/ospf_zebra.c +++ b/ospfd/ospf_zebra.c @@ -1023,8 +1023,8 @@ void ospf_distribute_list_update(struct ospf *ospf, int type, /* Set timer. */ ospf->t_distribute_update = NULL; - thread_add_timer_msec(master, ospf_distribute_list_update_timer, - (void **)args, ospf->min_ls_interval, + thread_add_timer_msec(master, ospf_distribute_list_update_timer, args, + ospf->min_ls_interval, &ospf->t_distribute_update); } diff --git a/ospfd/ospfd.c b/ospfd/ospfd.c index f3fe9e17b2..e9f622d217 100644 --- a/ospfd/ospfd.c +++ b/ospfd/ospfd.c @@ -1787,7 +1787,7 @@ static void ospf_nbr_nbma_add(struct ospf_nbr_nbma *nbr_nbma, p.prefixlen = IPV4_MAX_BITLEN; p.u.prefix4 = nbr_nbma->addr; - rn = route_node_get(oi->nbrs, (struct prefix *)&p); + rn = route_node_get(oi->nbrs, &p); if (rn->info) { nbr = rn->info; nbr->nbr_nbma = nbr_nbma; diff --git a/pbrd/pbr_vty.c b/pbrd/pbr_vty.c index e395b7831d..dfc8bec1bc 100644 --- a/pbrd/pbr_vty.c +++ b/pbrd/pbr_vty.c @@ -423,7 +423,6 @@ DEFPY(pbr_map_vrf, pbr_map_vrf_cmd, "Use the interface's VRF for lookup\n") { struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence); - int ret = CMD_SUCCESS; if (no) { pbr_map_delete_vrf(pbrms); @@ -434,28 +433,51 @@ DEFPY(pbr_map_vrf, pbr_map_vrf_cmd, pbrms->vrf_lookup = false; pbrms->vrf_unchanged = false; - goto done; + return CMD_SUCCESS; } if (pbrms->nhgrp_name || pbrms->nhg) { vty_out(vty, "A `set nexthop/nexthop-group XX` command already exits, please remove that first\n"); - ret = CMD_WARNING_CONFIG_FAILED; - goto done; + return CMD_WARNING_CONFIG_FAILED; } - if (pbrms->vrf_lookup || pbrms->vrf_unchanged) { - vty_out(vty, SET_VRF_EXISTS_STR); - ret = CMD_WARNING_CONFIG_FAILED; - goto done; + /* + * Determine if a set vrf * command already exists. + * + * If its equivalent, just return success. + * + * Else, return failure, we don't allow atomic swaps yet. + */ + if (vrf_name && pbrms->vrf_lookup) { + /* New vrf specified and one already exists */ + + /* Is this vrf different from one already configured? */ + if (strncmp(pbrms->vrf_name, vrf_name, sizeof(pbrms->vrf_name)) + != 0) + goto vrf_exists; + + return CMD_SUCCESS; + + } else if (!vrf_name && pbrms->vrf_unchanged) { + /* Unchanged specified and unchanged already exists */ + return CMD_SUCCESS; + + } else if (vrf_name && pbrms->vrf_unchanged) { + /* New vrf specified and unchanged is already set */ + goto vrf_exists; + + } else if (!vrf_name && pbrms->vrf_lookup) { + /* Unchanged specified and vrf to lookup already exists */ + goto vrf_exists; } + /* Create new lookup VRF or Unchanged */ if (vrf_name) { if (!pbr_vrf_lookup_by_name(vrf_name)) { vty_out(vty, "Specified: %s is non-existent\n", vrf_name); - ret = CMD_WARNING_CONFIG_FAILED; - goto done; + return CMD_WARNING_CONFIG_FAILED; } pbrms->vrf_lookup = true; @@ -465,8 +487,11 @@ DEFPY(pbr_map_vrf, pbr_map_vrf_cmd, pbr_map_check(pbrms); -done: - return ret; + return CMD_SUCCESS; + +vrf_exists: + vty_out(vty, SET_VRF_EXISTS_STR); + return CMD_WARNING_CONFIG_FAILED; } DEFPY (pbr_policy, diff --git a/pimd/README b/pimd/README index 3d03979a9a..1db0aad83c 100644 --- a/pimd/README +++ b/pimd/README @@ -33,7 +33,7 @@ HOME SITE qpimd lives at: - https://github.com/freerangerouting/frr + https://github.com/frrouting/frr PLATFORMS @@ -57,7 +57,7 @@ SUPPORT Please post comments, questions, patches, bug reports at the support site: - https://freerangerouting/frr + https://frrouting.org/frr RELATED WORK diff --git a/pimd/pim_bfd.c b/pimd/pim_bfd.c index 84f6733eaf..0df8ea6922 100644 --- a/pimd/pim_bfd.c +++ b/pimd/pim_bfd.c @@ -47,7 +47,7 @@ void pim_bfd_write_config(struct vty *vty, struct interface *ifp) if (!pim_ifp) return; - bfd_info = (struct bfd_info *)pim_ifp->bfd_info; + bfd_info = pim_ifp->bfd_info; if (!bfd_info) return; @@ -92,7 +92,7 @@ void pim_bfd_info_nbr_create(struct pim_interface *pim_ifp, if (!neigh->bfd_info) return; - nbr_bfd_info = (struct bfd_info *)neigh->bfd_info; + nbr_bfd_info = neigh->bfd_info; nbr_bfd_info->detect_mult = pim_ifp->bfd_info->detect_mult; nbr_bfd_info->desired_min_tx = pim_ifp->bfd_info->desired_min_tx; nbr_bfd_info->required_min_rx = pim_ifp->bfd_info->required_min_rx; @@ -118,7 +118,7 @@ static void pim_bfd_reg_dereg_nbr(struct pim_neighbor *nbr, int command) if (!nbr) return; pim_ifp = nbr->interface->info; - bfd_info = (struct bfd_info *)pim_ifp->bfd_info; + bfd_info = pim_ifp->bfd_info; if (!bfd_info) return; if (PIM_DEBUG_PIM_TRACE) { @@ -194,8 +194,8 @@ void pim_bfd_if_param_set(struct interface *ifp, uint32_t min_rx, if (!pim_ifp) return; - bfd_set_param((struct bfd_info **)&(pim_ifp->bfd_info), min_rx, min_tx, - detect_mult, defaults, &command); + bfd_set_param(&(pim_ifp->bfd_info), min_rx, min_tx, detect_mult, + defaults, &command); if (pim_ifp->bfd_info) { if (PIM_DEBUG_PIM_TRACE) diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c index bcf11aedbd..d949c657bd 100644 --- a/pimd/pim_bsm.c +++ b/pimd/pim_bsm.c @@ -601,7 +601,8 @@ static bool is_preferred_bsr(struct pim_instance *pim, struct in_addr bsr, return true; else if (bsr_prio == pim->global_scope.current_bsr_prio) { - if (bsr.s_addr >= pim->global_scope.current_bsr.s_addr) + if (ntohl(bsr.s_addr) + >= ntohl(pim->global_scope.current_bsr.s_addr)) return true; else return false; @@ -874,6 +875,17 @@ static void pim_bsm_fwd_whole_sz(struct pim_instance *pim, uint8_t *buf, pim_ifp = ifp->info; if ((!pim_ifp) || (!pim_ifp->bsm_enable)) continue; + + /* + * RFC 5059 Sec 3.4: + * When a Bootstrap message is forwarded, it is forwarded out + * of every multicast-capable interface that has PIM neighbors. + * + * So skipping pim interfaces with no neighbors. + */ + if (listcount(pim_ifp->pim_neighbor_list) == 0) + continue; + pim_hello_require(ifp); pim_mtu = ifp->mtu - MAX_IP_HDR_LEN; if (pim_mtu < len) { @@ -1056,13 +1068,13 @@ static bool pim_install_bsm_grp_rp(struct pim_instance *pim, if (listnode_add_sort_nodup(grpnode->partial_bsrp_list, bsm_rpinfo)) { if (PIM_DEBUG_BSM) zlog_debug( - "%s, bs_rpinfo node added to the partial bs_rplist.\r\n", + "%s, bs_rpinfo node added to the partial bs_rplist.", __func__); return true; } if (PIM_DEBUG_BSM) - zlog_debug("%s: list node not added\n", __func__); + zlog_debug("%s: list node not added", __func__); XFREE(MTYPE_PIM_BSRP_NODE, bsm_rpinfo); return false; @@ -1080,7 +1092,7 @@ static void pim_update_pending_rp_cnt(struct bsm_scope *sz, if (bsm_frag_tag != bsgrp->frag_tag) { if (PIM_DEBUG_BSM) zlog_debug( - "%s,Received a new BSM ,so clear the pending bs_rpinfo list.\r\n", + "%s,Received a new BSM ,so clear the pending bs_rpinfo list.", __func__); list_delete_all_node(bsgrp->partial_bsrp_list); bsgrp->pend_rp_cnt = total_rp_count; @@ -1120,7 +1132,7 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf, pim_inet4_dump("<Group?>", grpinfo.group.addr, grp_str, sizeof(grp_str)); zlog_debug( - "%s, Group %s Rpcount:%d Fragment-Rp-count:%d\r\n", + "%s, Group %s Rpcount:%d Fragment-Rp-count:%d", __func__, grp_str, grpinfo.rp_count, grpinfo.frag_rp_count); } @@ -1134,9 +1146,8 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf, pim_inet4_dump("<Group?>", grpinfo.group.addr, grp_str, sizeof(grp_str)); - zlog_debug( - "%s, Rp count is zero for group: %s\r\n", - __func__, grp_str); + zlog_debug("%s, Rp count is zero for group: %s", + __func__, grp_str); } return false; } @@ -1157,9 +1168,8 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf, if (!bsgrp) { if (PIM_DEBUG_BSM) - zlog_debug( - "%s, Create new BSM Group node.\r\n", - __func__); + zlog_debug("%s, Create new BSM Group node.", + __func__); /* create a new node to be added to the tree. */ bsgrp = pim_bsm_new_bsgrp_node(scope->bsrp_table, @@ -1167,7 +1177,7 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf, if (!bsgrp) { zlog_debug( - "%s, Failed to get the BSM group node.\r\n", + "%s, Failed to get the BSM group node.", __func__); continue; } @@ -1202,7 +1212,7 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf, pim_inet4_dump("<Rpaddr?>", rpinfo.rpaddr.addr, rp_str, sizeof(rp_str)); zlog_debug( - "%s, Rp address - %s; pri:%d hold:%d\r\n", + "%s, Rp address - %s; pri:%d hold:%d", __func__, rp_str, rpinfo.rp_pri, rpinfo.rp_holdtime); } @@ -1272,6 +1282,13 @@ int pim_bsm_process(struct interface *ifp, struct ip *ip_hdr, uint8_t *buf, bshdr = (struct bsm_hdr *)(buf + PIM_MSG_HEADER_LEN); pim_inet4_dump("<bsr?>", bshdr->bsr_addr.addr, bsr_str, sizeof(bsr_str)); + if (bshdr->hm_len > 32) { + zlog_warn("Bad hashmask length for IPv4; got %" PRIu8 + ", expected value in range 0-32", + bshdr->hm_len); + pim->bsm_dropped++; + return -1; + } pim->global_scope.hashMasklen = bshdr->hm_len; frag_tag = ntohs(bshdr->frag_tag); @@ -1366,7 +1383,7 @@ int pim_bsm_process(struct interface *ifp, struct ip *ip_hdr, uint8_t *buf, (buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN), frag_tag)) { if (PIM_DEBUG_BSM) { - zlog_debug("%s, Parsing BSM failed.\r\n", __func__); + zlog_debug("%s, Parsing BSM failed.", __func__); } pim->bsm_dropped++; return -1; diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index abc1548967..e94d15effd 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -1520,6 +1520,10 @@ static void pim_show_interface_traffic(struct pim_instance *pim, pim_ifp->pim_ifstat_join_recv); json_object_int_add(json_row, "joinTx", pim_ifp->pim_ifstat_join_send); + json_object_int_add(json_row, "pruneTx", + pim_ifp->pim_ifstat_prune_send); + json_object_int_add(json_row, "pruneRx", + pim_ifp->pim_ifstat_prune_recv); json_object_int_add(json_row, "registerRx", pim_ifp->pim_ifstat_reg_recv); json_object_int_add(json_row, "registerTx", @@ -1706,7 +1710,10 @@ static void pim_show_join_helper(struct vty *vty, struct pim_interface *pim_ifp, pim_ifchannel_ifjoin_name(ch->ifjoin_state, ch->flags)); if (PIM_IF_FLAG_TEST_S_G_RPT(ch->flags)) json_object_int_add(json_row, "SGRpt", 1); - + if (PIM_IF_FLAG_TEST_PROTO_PIM(ch->flags)) + json_object_int_add(json_row, "protocolPim", 1); + if (PIM_IF_FLAG_TEST_PROTO_IGMP(ch->flags)) + json_object_int_add(json_row, "protocolIgmp", 1); json_object_object_get_ex(json_iface, ch_grp_str, &json_grp); if (!json_grp) { json_grp = json_object_new_object(); @@ -3016,7 +3023,7 @@ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj) } for (ALL_LIST_ELEMENTS_RO(pim->global_scope.bsm_list, bsmnode, bsm)) { - char grp_str[INET_ADDRSTRLEN]; + char grp_str[PREFIX_STRLEN]; char rp_str[INET_ADDRSTRLEN]; char bsr_str[INET_ADDRSTRLEN]; struct bsmmsg_grpinfo *group; @@ -3185,7 +3192,7 @@ static void pim_show_group_rp_mappings_info(struct pim_instance *pim, if (!bsgrp) continue; - char grp_str[INET_ADDRSTRLEN]; + char grp_str[PREFIX_STRLEN]; prefix2str(&bsgrp->group, grp_str, sizeof(grp_str)); @@ -3312,12 +3319,9 @@ static void pim_show_statistics(struct pim_instance *pim, struct vty *vty, if (uj) { json = json_object_new_object(); - json_object_int_add(json, "Number of Received BSMs", - pim->bsm_rcvd); - json_object_int_add(json, "Number of Forwared BSMs", - pim->bsm_sent); - json_object_int_add(json, "Number of Dropped BSMs", - pim->bsm_dropped); + json_object_int_add(json, "bsmRx", pim->bsm_rcvd); + json_object_int_add(json, "bsmTx", pim->bsm_sent); + json_object_int_add(json, "bsmDropped", pim->bsm_dropped); } else { vty_out(vty, "BSM Statistics :\n"); vty_out(vty, "----------------\n"); @@ -3359,15 +3363,13 @@ static void pim_show_statistics(struct pim_instance *pim, struct vty *vty, json_row = json_object_new_object(); json_object_string_add(json_row, "If Name", ifp->name); + json_object_int_add(json_row, "bsmDroppedConfig", + pim_ifp->pim_ifstat_bsm_cfg_miss); json_object_int_add( - json_row, - "Number of BSMs dropped due to config miss", - pim_ifp->pim_ifstat_bsm_cfg_miss); - json_object_int_add( - json_row, "Number of unicast BSMs dropped", + json_row, "bsmDroppedUnicast", pim_ifp->pim_ifstat_ucast_bsm_cfg_miss); json_object_int_add(json_row, - "Number of BSMs dropped due to invalid scope zone", + "bsmDroppedInvalidScopeZone", pim_ifp->pim_ifstat_bsm_invalid_sz); json_object_object_add(json, ifp->name, json_row); } @@ -3732,8 +3734,6 @@ static void pim_show_bsr(struct pim_instance *pim, char bsr_str[PREFIX_STRLEN]; json_object *json = NULL; - vty_out(vty, "PIMv2 Bootstrap information\n"); - if (pim->global_scope.current_bsr.s_addr == INADDR_ANY) { strlcpy(bsr_str, "0.0.0.0", sizeof(bsr_str)); pim_time_uptime(uptime, sizeof(uptime), @@ -3771,14 +3771,15 @@ static void pim_show_bsr(struct pim_instance *pim, json_object_string_add(json, "bsr", bsr_str); json_object_int_add(json, "priority", pim->global_scope.current_bsr_prio); - json_object_int_add(json, "fragment_tag", + json_object_int_add(json, "fragmentTag", pim->global_scope.bsm_frag_tag); json_object_string_add(json, "state", bsr_state); json_object_string_add(json, "upTime", uptime); - json_object_string_add(json, "last_bsm_seen", last_bsm_seen); + json_object_string_add(json, "lastBsmSeen", last_bsm_seen); } else { + vty_out(vty, "PIMv2 Bootstrap information\n"); vty_out(vty, "Current preferred BSR address: %s\n", bsr_str); vty_out(vty, "Priority Fragment-Tag State UpTime\n"); @@ -5790,13 +5791,18 @@ static void show_mroute(struct pim_instance *pim, struct vty *vty, int oif_vif_index; struct interface *ifp_in; char proto[100]; + char state_str[PIM_REG_STATE_STR_LEN]; char mroute_uptime[10]; if (uj) { json = json_object_new_object(); } else { + vty_out(vty, "IP Multicast Routing Table\n"); + vty_out(vty, "Flags: S- Sparse, C - Connected, P - Pruned\n"); vty_out(vty, - "Source Group Proto Input Output TTL Uptime\n"); + " R - RP-bit set, F - Register flag, T - SPT-bit set\n"); + vty_out(vty, + "\nSource Group Flags Proto Input Output TTL Uptime\n"); } now = pim_time_monotonic_sec(); @@ -5819,6 +5825,23 @@ static void show_mroute(struct pim_instance *pim, struct vty *vty, sizeof(grp_str)); pim_inet4_dump("<source?>", c_oil->oil.mfcc_origin, src_str, sizeof(src_str)); + + strlcpy(state_str, "S", sizeof(state_str)); + /* When a non DR receives a igmp join, it creates a (*,G) + * channel_oil without any upstream creation */ + if (c_oil->up) { + if (PIM_UPSTREAM_FLAG_TEST_SRC_IGMP(c_oil->up->flags)) + strlcat(state_str, "C", sizeof(state_str)); + if (pim_upstream_is_sg_rpt(c_oil->up)) + strlcat(state_str, "R", sizeof(state_str)); + if (PIM_UPSTREAM_FLAG_TEST_FHR(c_oil->up->flags)) + strlcat(state_str, "F", sizeof(state_str)); + if (c_oil->up->sptbit == PIM_UPSTREAM_SPTBIT_TRUE) + strlcat(state_str, "T", sizeof(state_str)); + } + if (pim_channel_oil_empty(c_oil)) + strlcat(state_str, "P", sizeof(state_str)); + ifp_in = pim_if_find_by_vif_index(pim, c_oil->oil.mfcc_parent); if (ifp_in) @@ -5842,7 +5865,8 @@ static void show_mroute(struct pim_instance *pim, struct vty *vty, } /* Find the source nested under the group, create it if - * it doesn't exist */ + * it doesn't exist + */ json_object_object_get_ex(json_group, src_str, &json_source); @@ -5965,14 +5989,16 @@ static void show_mroute(struct pim_instance *pim, struct vty *vty, } vty_out(vty, - "%-15s %-15s %-6s %-16s %-16s %-3d %8s\n", - src_str, grp_str, proto, in_ifname, - out_ifname, ttl, mroute_uptime); + "%-15s %-15s %-15s %-6s %-16s %-16s %-3d %8s\n", + src_str, grp_str, state_str, proto, + in_ifname, out_ifname, ttl, + mroute_uptime); if (first) { src_str[0] = '\0'; grp_str[0] = '\0'; in_ifname[0] = '\0'; + state_str[0] = '\0'; mroute_uptime[0] = '\0'; first = 0; } @@ -5980,9 +6006,10 @@ static void show_mroute(struct pim_instance *pim, struct vty *vty, } if (!uj && !found_oif) { - vty_out(vty, "%-15s %-15s %-6s %-16s %-16s %-3d %8s\n", - src_str, grp_str, "none", in_ifname, "none", 0, - "--:--:--"); + vty_out(vty, + "%-15s %-15s %-15s %-6s %-16s %-16s %-3d %8s\n", + src_str, grp_str, state_str, "none", in_ifname, + "none", 0, "--:--:--"); } } @@ -6600,18 +6627,18 @@ static int pim_cmd_spt_switchover(struct pim_instance *pim, switch (pim->spt.switchover) { case PIM_SPT_IMMEDIATE: - XFREE(MTYPE_PIM_SPT_PLIST_NAME, pim->spt.plist); + XFREE(MTYPE_PIM_PLIST_NAME, pim->spt.plist); pim_upstream_add_lhr_star_pimreg(pim); break; case PIM_SPT_INFINITY: pim_upstream_remove_lhr_star_pimreg(pim, plist); - XFREE(MTYPE_PIM_SPT_PLIST_NAME, pim->spt.plist); + XFREE(MTYPE_PIM_PLIST_NAME, pim->spt.plist); if (plist) pim->spt.plist = - XSTRDUP(MTYPE_PIM_SPT_PLIST_NAME, plist); + XSTRDUP(MTYPE_PIM_PLIST_NAME, plist); break; } @@ -6672,6 +6699,26 @@ DEFUN (no_ip_pim_spt_switchover_infinity_plist, return pim_cmd_spt_switchover(pim, PIM_SPT_IMMEDIATE, NULL); } +DEFPY (pim_register_accept_list, + pim_register_accept_list_cmd, + "[no] ip pim register-accept-list WORD$word", + NO_STR + IP_STR + PIM_STR + "Only accept registers from a specific source prefix list\n" + "Prefix-List name\n") +{ + PIM_DECLVAR_CONTEXT(vrf, pim); + + if (no) + XFREE(MTYPE_PIM_PLIST_NAME, pim->register_plist); + else { + XFREE(MTYPE_PIM_PLIST_NAME, pim->register_plist); + pim->register_plist = XSTRDUP(MTYPE_PIM_PLIST_NAME, word); + } + return CMD_SUCCESS; +} + DEFUN (ip_pim_joinprune_time, ip_pim_joinprune_time_cmd, "ip pim join-prune-interval (60-600)", @@ -7237,11 +7284,20 @@ DEFUN (no_ip_pim_ecmp_rebalance, static int pim_cmd_igmp_start(struct vty *vty, struct interface *ifp) { struct pim_interface *pim_ifp; + struct pim_instance *pim; uint8_t need_startup = 0; pim_ifp = ifp->info; if (!pim_ifp) { + pim = pim_get_pim_instance(ifp->vrf_id); + /* Limit mcast interfaces to number of vifs available */ + if (pim->mcast_if_count == MAXVIFS) { + vty_out(vty, + "Max multicast interfaces(%d) Reached. Could not enable IGMP on interface %s\n", + MAXVIFS, ifp->name); + return CMD_WARNING_CONFIG_FAILED; + } (void)pim_if_new(ifp, true, false, false, false); need_startup = 1; } else { @@ -7991,13 +8047,21 @@ DEFPY_HIDDEN (interface_ip_igmp_query_generate, return CMD_SUCCESS; } -static int pim_cmd_interface_add(struct interface *ifp) +static int pim_cmd_interface_add(struct vty *vty, struct interface *ifp) { struct pim_interface *pim_ifp = ifp->info; + struct pim_instance *pim; - if (!pim_ifp) + if (!pim_ifp) { + pim = pim_get_pim_instance(ifp->vrf_id); + /* Limiting mcast interfaces to number of VIFs */ + if (pim->mcast_if_count == MAXVIFS) { + vty_out(vty, "Max multicast interfaces(%d) reached.", + MAXVIFS); + return 0; + } pim_ifp = pim_if_new(ifp, false, true, false, false); - else + } else PIM_IF_DO_PIM(pim_ifp->options); pim_if_addr_add_all(ifp); @@ -8068,15 +8132,17 @@ DEFPY (interface_ip_pim_activeactive, VTY_DECLVAR_CONTEXT(interface, ifp); struct pim_interface *pim_ifp; - if (!no && !pim_cmd_interface_add(ifp)) { - vty_out(vty, "Could not enable PIM SM active-active on interface\n"); + if (!no && !pim_cmd_interface_add(vty, ifp)) { + vty_out(vty, + "Could not enable PIM SM active-active on interface %s\n", + ifp->name); return CMD_WARNING_CONFIG_FAILED; } - if (PIM_DEBUG_MLAG) - zlog_debug("%sConfiguring PIM active-active on Interface: %s", - no ? "Un-":" ", ifp->name); + if (PIM_DEBUG_MLAG) + zlog_debug("%sConfiguring PIM active-active on Interface: %s", + no ? "Un-" : " ", ifp->name); pim_ifp = ifp->info; if (no) @@ -8096,8 +8162,9 @@ DEFUN_HIDDEN (interface_ip_pim_ssm, { VTY_DECLVAR_CONTEXT(interface, ifp); - if (!pim_cmd_interface_add(ifp)) { - vty_out(vty, "Could not enable PIM SM on interface\n"); + if (!pim_cmd_interface_add(vty, ifp)) { + vty_out(vty, "Could not enable PIM SM on interface %s\n", + ifp->name); return CMD_WARNING_CONFIG_FAILED; } @@ -8113,8 +8180,9 @@ static int interface_ip_pim_helper(struct vty *vty) VTY_DECLVAR_CONTEXT(interface, ifp); - if (!pim_cmd_interface_add(ifp)) { - vty_out(vty, "Could not enable PIM SM on interface\n"); + if (!pim_cmd_interface_add(vty, ifp)) { + vty_out(vty, "Could not enable PIM SM on interface %s\n", + ifp->name); return CMD_WARNING_CONFIG_FAILED; } @@ -8402,8 +8470,10 @@ DEFUN (interface_ip_pim_hello, struct pim_interface *pim_ifp = ifp->info; if (!pim_ifp) { - if (!pim_cmd_interface_add(ifp)) { - vty_out(vty, "Could not enable PIM SM on interface\n"); + if (!pim_cmd_interface_add(vty, ifp)) { + vty_out(vty, + "Could not enable PIM SM on interface %s\n", + ifp->name); return CMD_WARNING_CONFIG_FAILED; } } @@ -9145,8 +9215,10 @@ DEFUN (ip_pim_bfd, struct bfd_info *bfd_info = NULL; if (!pim_ifp) { - if (!pim_cmd_interface_add(ifp)) { - vty_out(vty, "Could not enable PIM SM on interface\n"); + if (!pim_cmd_interface_add(vty, ifp)) { + vty_out(vty, + "Could not enable PIM SM on interface %s\n", + ifp->name); return CMD_WARNING; } } @@ -9196,8 +9268,10 @@ DEFUN (ip_pim_bsm, struct pim_interface *pim_ifp = ifp->info; if (!pim_ifp) { - if (!pim_cmd_interface_add(ifp)) { - vty_out(vty, "Could not enable PIM SM on interface\n"); + if (!pim_cmd_interface_add(vty, ifp)) { + vty_out(vty, + "Could not enable PIM SM on interface %s\n", + ifp->name); return CMD_WARNING; } } @@ -9240,8 +9314,10 @@ DEFUN (ip_pim_ucast_bsm, struct pim_interface *pim_ifp = ifp->info; if (!pim_ifp) { - if (!pim_cmd_interface_add(ifp)) { - vty_out(vty, "Could not enable PIM SM on interface\n"); + if (!pim_cmd_interface_add(vty, ifp)) { + vty_out(vty, + "Could not enable PIM SM on interface %s\n", + ifp->name); return CMD_WARNING; } } @@ -9308,8 +9384,10 @@ DEFUN( struct pim_interface *pim_ifp = ifp->info; if (!pim_ifp) { - if (!pim_cmd_interface_add(ifp)) { - vty_out(vty, "Could not enable PIM SM on interface\n"); + if (!pim_cmd_interface_add(vty, ifp)) { + vty_out(vty, + "Could not enable PIM SM on interface %s\n", + ifp->name); return CMD_WARNING; } } @@ -10770,6 +10848,8 @@ void pim_cmd_init(void) install_element(CONFIG_NODE, &no_ip_pim_spt_switchover_infinity_plist_cmd); install_element(VRF_NODE, &no_ip_pim_spt_switchover_infinity_plist_cmd); + install_element(CONFIG_NODE, &pim_register_accept_list_cmd); + install_element(VRF_NODE, &pim_register_accept_list_cmd); install_element(CONFIG_NODE, &ip_pim_joinprune_time_cmd); install_element(VRF_NODE, &ip_pim_joinprune_time_cmd); install_element(CONFIG_NODE, &no_ip_pim_joinprune_time_cmd); diff --git a/pimd/pim_hello.c b/pimd/pim_hello.c index ecdb3a34a7..e50504ec10 100644 --- a/pimd/pim_hello.c +++ b/pimd/pim_hello.c @@ -484,7 +484,7 @@ int pim_hello_build_tlv(struct interface *ifp, uint8_t *tlv_buf, return -1; } if (can_disable_join_suppression) { - *((uint8_t *)(curr) + 4) |= 0x80; /* enable T bit */ + *(curr + 4) |= 0x80; /* enable T bit */ } curr = tmp; diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c index cb31878e01..95b81d5dcb 100644 --- a/pimd/pim_iface.c +++ b/pimd/pim_iface.c @@ -186,6 +186,7 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim, pim_sock_reset(ifp); pim_if_add_vif(ifp, ispimreg, is_vxlan_term); + pim_ifp->pim->mcast_if_count++; return pim_ifp; } @@ -209,6 +210,7 @@ void pim_if_delete(struct interface *ifp) pim_neighbor_delete_all(ifp, "Interface removed from configuration"); pim_if_del_vif(ifp); + pim_ifp->pim->mcast_if_count--; list_delete(&pim_ifp->igmp_socket_list); list_delete(&pim_ifp->pim_neighbor_list); @@ -275,7 +277,7 @@ static void pim_addr_change(struct interface *ifp) 1) Before an interface goes down or changes primary IP address, a Hello message with a zero HoldTime should be sent immediately (with the old IP address if the IP address changed). - -- FIXME See CAVEAT C13 + -- Done at the caller of the function as new ip already updated here 2) After an interface has changed its IP address, it MUST send a Hello message with its new IP address. @@ -320,6 +322,10 @@ static int detect_primary_address_change(struct interface *ifp, } if (changed) { + /* Before updating pim_ifp send Hello time with 0 hold time */ + if (PIM_IF_TEST_PIM(pim_ifp->options)) { + pim_hello_send(ifp, 0 /* zero-sec holdtime */); + } pim_ifp->primary_address = new_prim_addr; } @@ -894,15 +900,16 @@ struct in_addr pim_find_primary_addr(struct interface *ifp) * So let's grab the loopbacks v4 address * and use that as the primary address */ - if (!v4_addrs && v6_addrs && !if_is_loopback(ifp)) { + if (!v4_addrs && v6_addrs) { struct interface *lo_ifp; + // DBS - Come back and check here if (ifp->vrf_id == VRF_DEFAULT) lo_ifp = if_lookup_by_name("lo", vrf->vrf_id); else lo_ifp = if_lookup_by_name(vrf->name, vrf->vrf_id); - if (lo_ifp) + if (lo_ifp && (lo_ifp != ifp)) return pim_find_primary_addr(lo_ifp); } @@ -1583,8 +1590,14 @@ int pim_ifp_create(struct interface *ifp) } if (!strncmp(ifp->name, PIM_VXLAN_TERM_DEV_NAME, - sizeof(PIM_VXLAN_TERM_DEV_NAME))) - pim_vxlan_add_term_dev(pim, ifp); + sizeof(PIM_VXLAN_TERM_DEV_NAME))) { + if (pim->mcast_if_count < MAXVIFS) + pim_vxlan_add_term_dev(pim, ifp); + else + zlog_warn( + "%s: Cannot enable pim on %s. MAXVIFS(%d) reached. Deleting and readding the vxlan termimation device after unconfiguring pim from other interfaces may succeed.", + __func__, ifp->name, MAXVIFS); + } return 0; } diff --git a/pimd/pim_ifchannel.c b/pimd/pim_ifchannel.c index 44d4ee7192..70e06ccc4e 100644 --- a/pimd/pim_ifchannel.c +++ b/pimd/pim_ifchannel.c @@ -628,6 +628,12 @@ struct pim_ifchannel *pim_ifchannel_add(struct interface *ifp, up->dualactive_ifchannel_count, up->flags); } + if (up_flags == PIM_UPSTREAM_FLAG_MASK_SRC_PIM) + PIM_IF_FLAG_SET_PROTO_PIM(ch->flags); + + if (up_flags == PIM_UPSTREAM_FLAG_MASK_SRC_IGMP) + PIM_IF_FLAG_SET_PROTO_IGMP(ch->flags); + if (PIM_DEBUG_PIM_TRACE) zlog_debug("%s: ifchannel %s(%s) is created ", __func__, ch->sg_str, ch->interface->name); diff --git a/pimd/pim_ifchannel.h b/pimd/pim_ifchannel.h index 3d5cbd8ecf..425622b79e 100644 --- a/pimd/pim_ifchannel.h +++ b/pimd/pim_ifchannel.h @@ -69,7 +69,7 @@ struct pim_assert_metric { #define PIM_IF_FLAG_UNSET_ASSERT_TRACKING_DESIRED(flags) ((flags) &= ~PIM_IF_FLAG_MASK_ASSERT_TRACKING_DESIRED) /* - * Flat to tell us if the ifchannel is (S,G,rpt) + * Flag to tell us if the ifchannel is (S,G,rpt) */ #define PIM_IF_FLAG_MASK_S_G_RPT (1 << 2) #define PIM_IF_FLAG_TEST_S_G_RPT(flags) ((flags) & PIM_IF_FLAG_MASK_S_G_RPT) @@ -77,6 +77,23 @@ struct pim_assert_metric { #define PIM_IF_FLAG_UNSET_S_G_RPT(flags) ((flags) &= ~PIM_IF_FLAG_MASK_S_G_RPT) /* + * Flag to tell us if the ifchannel is proto PIM + */ +#define PIM_IF_FLAG_MASK_PROTO_PIM (1 << 3) +#define PIM_IF_FLAG_TEST_PROTO_PIM(flags) ((flags)&PIM_IF_FLAG_MASK_PROTO_PIM) +#define PIM_IF_FLAG_SET_PROTO_PIM(flags) ((flags) |= PIM_IF_FLAG_MASK_PROTO_PIM) +#define PIM_IF_FLAG_UNSET_PROTO_PIM(flags) \ + ((flags) &= ~PIM_IF_FLAG_MASK_PROTO_PIM) +/* + * Flag to tell us if the ifchannel is proto IGMP + */ +#define PIM_IF_FLAG_MASK_PROTO_IGMP (1 << 4) +#define PIM_IF_FLAG_TEST_PROTO_IGMP(flags) ((flags)&PIM_IF_FLAG_MASK_PROTO_IGMP) +#define PIM_IF_FLAG_SET_PROTO_IGMP(flags) \ + ((flags) |= PIM_IF_FLAG_MASK_PROTO_IGMP) +#define PIM_IF_FLAG_UNSET_PROTO_IGMP(flags) \ + ((flags) &= ~PIM_IF_FLAG_MASK_PROTO_IGMP) +/* Per-interface (S,G) state */ struct pim_ifchannel { diff --git a/pimd/pim_instance.c b/pimd/pim_instance.c index 347b0fc284..b4c2dd28cc 100644 --- a/pimd/pim_instance.c +++ b/pimd/pim_instance.c @@ -69,6 +69,8 @@ static void pim_instance_terminate(struct pim_instance *pim) pim_msdp_exit(pim); + XFREE(MTYPE_PIM_PLIST_NAME, pim->spt.plist); + XFREE(MTYPE_PIM_PLIST_NAME, pim->register_plist); XFREE(MTYPE_PIM_PIM_INSTANCE, pim); } @@ -81,6 +83,7 @@ static struct pim_instance *pim_instance_init(struct vrf *vrf) pim_if_init(pim); + pim->mcast_if_count = 0; pim->keep_alive_time = PIM_KEEPALIVE_PERIOD; pim->rp_keep_alive_time = PIM_RP_KEEPALIVE_PERIOD; diff --git a/pimd/pim_instance.h b/pimd/pim_instance.h index 7b1fd2e172..71bd7c1089 100644 --- a/pimd/pim_instance.h +++ b/pimd/pim_instance.h @@ -135,6 +135,9 @@ struct pim_instance { char *plist; } spt; + /* The name of the register-accept prefix-list */ + char *register_plist; + struct hash *rpf_hash; void *ssm_info; /* per-vrf SSM configuration */ @@ -165,6 +168,7 @@ struct pim_instance { struct route_table *rp_table; int iface_vif_index[MAXVIFS]; + int mcast_if_count; struct rb_pim_oil_head channel_oil_head; diff --git a/pimd/pim_join.c b/pimd/pim_join.c index 62bd2360c3..3a88de2070 100644 --- a/pimd/pim_join.c +++ b/pimd/pim_join.c @@ -140,18 +140,19 @@ static void recv_prune(struct interface *ifp, struct pim_neighbor *neigh, if ((source_flags & PIM_RPT_BIT_MASK) && (source_flags & PIM_WILDCARD_BIT_MASK)) { - struct pim_rpf *rp = RP(pim_ifp->pim, sg->grp); + /* + * RFC 4601 Section 4.5.2: + * Received Prune(*,G) messages are processed even if the + * RP in the message does not match RP(G). + */ + if (PIM_DEBUG_PIM_TRACE) { + char received_rp[INET_ADDRSTRLEN]; - if (!rp) { - if (PIM_DEBUG_PIM_TRACE) - zlog_debug( - "%s: RP for %pSG4 completely failed lookup", - __func__, sg); - return; + pim_inet4_dump("<received?>", sg->src, received_rp, + sizeof(received_rp)); + zlog_debug("%s: Prune received with RP(%s) for %pSG4", + __func__, received_rp, sg); } - // Ignoring Prune *,G's at the moment. - if (sg->src.s_addr != rp->rpf_addr.u.prefix4.s_addr) - return; sg->src.s_addr = INADDR_ANY; } diff --git a/pimd/pim_memory.c b/pimd/pim_memory.c index 2bbab67e45..6bc8062c4b 100644 --- a/pimd/pim_memory.c +++ b/pimd/pim_memory.c @@ -51,5 +51,5 @@ DEFINE_MTYPE(PIMD, PIM_JP_AGG_SOURCE, "PIM JP AGG Source") DEFINE_MTYPE(PIMD, PIM_PIM_INSTANCE, "PIM global state") DEFINE_MTYPE(PIMD, PIM_NEXTHOP_CACHE, "PIM nexthop cache state") DEFINE_MTYPE(PIMD, PIM_SSM_INFO, "PIM SSM configuration") -DEFINE_MTYPE(PIMD, PIM_SPT_PLIST_NAME, "PIM SPT Prefix List Name") +DEFINE_MTYPE(PIMD, PIM_PLIST_NAME, "PIM Prefix List Names") DEFINE_MTYPE(PIMD, PIM_VXLAN_SG, "PIM VxLAN mroute cache") diff --git a/pimd/pim_memory.h b/pimd/pim_memory.h index e5ca57a15d..6beeb60075 100644 --- a/pimd/pim_memory.h +++ b/pimd/pim_memory.h @@ -50,7 +50,7 @@ DECLARE_MTYPE(PIM_JP_AGG_SOURCE) DECLARE_MTYPE(PIM_PIM_INSTANCE) DECLARE_MTYPE(PIM_NEXTHOP_CACHE) DECLARE_MTYPE(PIM_SSM_INFO) -DECLARE_MTYPE(PIM_SPT_PLIST_NAME); +DECLARE_MTYPE(PIM_PLIST_NAME); DECLARE_MTYPE(PIM_VXLAN_SG) #endif /* _QUAGGA_PIM_MEMORY_H */ diff --git a/pimd/pim_mroute.c b/pimd/pim_mroute.c index f366fdbe79..9060b6a95a 100644 --- a/pimd/pim_mroute.c +++ b/pimd/pim_mroute.c @@ -232,6 +232,8 @@ static int pim_mroute_msg_nocache(int fd, struct interface *ifp, pim_upstream_mroute_iif_update(up->channel_oil, __func__); } pim_register_join(up); + /* if we have receiver, inherit from parent */ + pim_upstream_inherited_olist_decide(pim_ifp->pim, up); return 0; } @@ -484,7 +486,10 @@ static int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, struct pim_upstream *parent; struct pim_nexthop source; struct pim_rpf *rpf = RP(pim_ifp->pim, sg.grp); - if (!rpf || !rpf->source_nexthop.interface) + + /* No RPF or No RPF interface or No mcast on RPF interface */ + if (!rpf || !rpf->source_nexthop.interface + || !rpf->source_nexthop.interface->info) return 0; /* @@ -961,7 +966,7 @@ static inline void pim_mroute_copy(struct mfcctl *oil, static int pim_mroute_add(struct channel_oil *c_oil, const char *name) { struct pim_instance *pim = c_oil->pim; - struct mfcctl tmp_oil; + struct mfcctl tmp_oil = { {0} }; int err; pim->mroute_add_last = pim_time_monotonic_sec(); @@ -1017,8 +1022,10 @@ static int pim_mroute_add(struct channel_oil *c_oil, const char *name) pim_channel_oil_dump(c_oil, buf, sizeof(buf))); } - c_oil->installed = 1; - c_oil->mroute_creation = pim_time_monotonic_sec(); + if (!c_oil->installed) { + c_oil->installed = 1; + c_oil->mroute_creation = pim_time_monotonic_sec(); + } return 0; } diff --git a/pimd/pim_neighbor.c b/pimd/pim_neighbor.c index ca7ca11402..d8a797f980 100644 --- a/pimd/pim_neighbor.c +++ b/pimd/pim_neighbor.c @@ -779,6 +779,7 @@ void pim_neighbor_update(struct pim_neighbor *neigh, uint32_t dr_priority, struct list *addr_list) { struct pim_interface *pim_ifp = neigh->interface->info; + uint32_t old, new; /* Received holdtime ? */ if (PIM_OPTION_IS_SET(hello_options, PIM_OPTION_MASK_HOLDTIME)) { @@ -818,6 +819,16 @@ void pim_neighbor_update(struct pim_neighbor *neigh, neigh->prefix_list = addr_list; update_dr_priority(neigh, hello_options, dr_priority); + new = PIM_OPTION_IS_SET(hello_options, PIM_OPTION_MASK_LAN_PRUNE_DELAY); + old = PIM_OPTION_IS_SET(neigh->hello_options, + PIM_OPTION_MASK_LAN_PRUNE_DELAY); + + if (old != new) { + if (old) + ++pim_ifp->pim_number_of_nonlandelay_neighbors; + else + --pim_ifp->pim_number_of_nonlandelay_neighbors; + } /* Copy flags */ diff --git a/pimd/pim_pim.c b/pimd/pim_pim.c index 8d7a921cf4..f37c140bf2 100644 --- a/pimd/pim_pim.c +++ b/pimd/pim_pim.c @@ -42,7 +42,6 @@ #include "pim_bsm.h" static int on_pim_hello_send(struct thread *t); -static int pim_hello_send(struct interface *ifp, uint16_t holdtime); static const char *pim_pim_msgtype2str(enum pim_msg_type type) { @@ -137,6 +136,18 @@ void pim_sock_delete(struct interface *ifp, const char *delete_message) sock_close(ifp); } +/* For now check dst address for hello, assrt and join/prune is all pim rtr */ +static bool pim_pkt_dst_addr_ok(enum pim_msg_type type, in_addr_t addr) +{ + if ((type == PIM_MSG_TYPE_HELLO) || (type == PIM_MSG_TYPE_ASSERT) + || (type == PIM_MSG_TYPE_JOIN_PRUNE)) { + if (addr != qpim_all_pim_routers_addr.s_addr) + return false; + } + + return true; +} + int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len) { struct ip *ip_hdr; @@ -237,6 +248,21 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len) } } + if (!pim_pkt_dst_addr_ok(header->type, ip_hdr->ip_dst.s_addr)) { + char dst_str[INET_ADDRSTRLEN]; + char src_str[INET_ADDRSTRLEN]; + + pim_inet4_dump("<dst?>", ip_hdr->ip_dst, dst_str, + sizeof(dst_str)); + pim_inet4_dump("<src?>", ip_hdr->ip_src, src_str, + sizeof(src_str)); + zlog_warn( + "%s: Ignoring Pkt. Unexpected IP destination %s for %s (Expected: all_pim_routers_addr) from %s", + __func__, dst_str, pim_pim_msgtype2str(header->type), + src_str); + return -1; + } + switch (header->type) { case PIM_MSG_TYPE_HELLO: return pim_hello_recv(ifp, ip_hdr->ip_src, @@ -662,7 +688,7 @@ static int hello_send(struct interface *ifp, uint16_t holdtime) return 0; } -static int pim_hello_send(struct interface *ifp, uint16_t holdtime) +int pim_hello_send(struct interface *ifp, uint16_t holdtime) { struct pim_interface *pim_ifp = ifp->info; diff --git a/pimd/pim_pim.h b/pimd/pim_pim.h index e930ab7c2d..b9fdb14dc0 100644 --- a/pimd/pim_pim.h +++ b/pimd/pim_pim.h @@ -59,4 +59,5 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len); int pim_msg_send(int fd, struct in_addr src, struct in_addr dst, uint8_t *pim_msg, int pim_msg_size, const char *ifname); +int pim_hello_send(struct interface *ifp, uint16_t holdtime); #endif /* PIM_PIM_H */ diff --git a/pimd/pim_register.c b/pimd/pim_register.c index 19baecb9c2..cb6aae7fae 100644 --- a/pimd/pim_register.c +++ b/pimd/pim_register.c @@ -186,7 +186,7 @@ void pim_register_send(const uint8_t *buf, int buf_size, struct in_addr src, if (!pinfo) { if (PIM_DEBUG_PIM_REG) zlog_debug( - "%s: Interface: %s not configured for pim to trasmit on!\n", + "%s: Interface: %s not configured for pim to transmit on!", __func__, ifp->name); return; } @@ -324,14 +324,13 @@ int pim_register_recv(struct interface *ifp, struct in_addr dest_addr, struct prefix_sg sg; uint32_t *bits; int i_am_rp = 0; - struct pim_interface *pim_ifp = NULL; - - pim_ifp = ifp->info; + struct pim_interface *pim_ifp = ifp->info; + struct pim_instance *pim = pim_ifp->pim; #define PIM_MSG_REGISTER_BIT_RESERVED_LEN 4 ip_hdr = (struct ip *)(tlv_buf + PIM_MSG_REGISTER_BIT_RESERVED_LEN); - if (!pim_rp_check_is_my_ip_address(pim_ifp->pim, dest_addr)) { + if (!pim_rp_check_is_my_ip_address(pim, dest_addr)) { if (PIM_DEBUG_PIM_REG) { char dest[INET_ADDRSTRLEN]; @@ -375,7 +374,7 @@ int pim_register_recv(struct interface *ifp, struct in_addr dest_addr, sg.src = ip_hdr->ip_src; sg.grp = ip_hdr->ip_dst; - i_am_rp = I_am_RP(pim_ifp->pim, sg.grp); + i_am_rp = I_am_RP(pim, sg.grp); if (PIM_DEBUG_PIM_REG) { char src_str[INET_ADDRSTRLEN]; @@ -387,9 +386,36 @@ int pim_register_recv(struct interface *ifp, struct in_addr dest_addr, if (i_am_rp && (dest_addr.s_addr - == ((RP(pim_ifp->pim, sg.grp))->rpf_addr.u.prefix4.s_addr))) { + == ((RP(pim, sg.grp))->rpf_addr.u.prefix4.s_addr))) { sentRegisterStop = 0; + if (pim->register_plist) { + struct prefix_list *plist; + struct prefix src; + + plist = prefix_list_lookup(AFI_IP, pim->register_plist); + + src.family = AF_INET; + src.prefixlen = IPV4_MAX_PREFIXLEN; + src.u.prefix4 = sg.src; + + if (prefix_list_apply(plist, &src) == PREFIX_DENY) { + pim_register_stop_send(ifp, &sg, dest_addr, + src_addr); + if (PIM_DEBUG_PIM_PACKETS) { + char src_str[INET_ADDRSTRLEN]; + + pim_inet4_dump("<src?>", src_addr, + src_str, + sizeof(src_str)); + zlog_debug("%s: Sending register-stop to %s for %pSG4 due to prefix-list denial, dropping packet", + __func__, src_str, &sg); + } + + return 0; + } + } + if (*bits & PIM_REGISTER_BORDER_BIT) { struct in_addr pimbr = pim_br_get_pmbr(&sg); if (PIM_DEBUG_PIM_PACKETS) @@ -411,14 +437,13 @@ int pim_register_recv(struct interface *ifp, struct in_addr dest_addr, } } - struct pim_upstream *upstream = - pim_upstream_find(pim_ifp->pim, &sg); + struct pim_upstream *upstream = pim_upstream_find(pim, &sg); /* * If we don't have a place to send ignore the packet */ if (!upstream) { upstream = pim_upstream_add( - pim_ifp->pim, &sg, ifp, + pim, &sg, ifp, PIM_UPSTREAM_FLAG_MASK_SRC_STREAM, __func__, NULL); if (!upstream) { @@ -452,9 +477,8 @@ int pim_register_recv(struct interface *ifp, struct in_addr dest_addr, } if ((upstream->sptbit == PIM_UPSTREAM_SPTBIT_TRUE) - || ((SwitchToSptDesiredOnRp(pim_ifp->pim, &sg)) - && pim_upstream_inherited_olist(pim_ifp->pim, upstream) - == 0)) { + || ((SwitchToSptDesiredOnRp(pim, &sg)) + && pim_upstream_inherited_olist(pim, upstream) == 0)) { pim_register_stop_send(ifp, &sg, dest_addr, src_addr); sentRegisterStop = 1; } else { @@ -463,15 +487,13 @@ int pim_register_recv(struct interface *ifp, struct in_addr dest_addr, upstream->sptbit); } if ((upstream->sptbit == PIM_UPSTREAM_SPTBIT_TRUE) - || (SwitchToSptDesiredOnRp(pim_ifp->pim, &sg))) { + || (SwitchToSptDesiredOnRp(pim, &sg))) { if (sentRegisterStop) { pim_upstream_keep_alive_timer_start( - upstream, - pim_ifp->pim->rp_keep_alive_time); + upstream, pim->rp_keep_alive_time); } else { pim_upstream_keep_alive_timer_start( - upstream, - pim_ifp->pim->keep_alive_time); + upstream, pim->keep_alive_time); } } diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c index 8799134edd..a9f1d9335a 100644 --- a/pimd/pim_rp.c +++ b/pimd/pim_rp.c @@ -248,13 +248,14 @@ struct rp_info *pim_rp_find_match_group(struct pim_instance *pim, if (PIM_DEBUG_PIM_TRACE) { char buf[PREFIX_STRLEN]; - route_unlock_node(rn); zlog_debug("Lookedup: %p for rp_info: %p(%s) Lock: %d", rn, rp_info, prefix2str(&rp_info->group, buf, sizeof(buf)), rn->lock); } + route_unlock_node(rn); + if (!best) return rp_info; @@ -1293,10 +1294,16 @@ void pim_rp_show_information(struct pim_instance *pim, struct vty *vty, bool uj) json_row, "outboundInterface", rp_info->rp.source_nexthop .interface->name); - + else + json_object_string_add( + json_row, "outboundInterface", + "Unknown"); if (rp_info->i_am_rp) json_object_boolean_true_add(json_row, "iAmRP"); + else + json_object_boolean_false_add(json_row, + "iAmRP"); if (rp_info->plist) json_object_string_add(json_row, diff --git a/pimd/pim_sock.c b/pimd/pim_sock.c index ae5781f0cc..f0a71c96ce 100644 --- a/pimd/pim_sock.c +++ b/pimd/pim_sock.c @@ -375,8 +375,7 @@ int pim_socket_recvfromto(int fd, uint8_t *buf, size_t len, struct in_pktinfo *i = (struct in_pktinfo *)CMSG_DATA(cmsg); if (to) - ((struct sockaddr_in *)to)->sin_addr = - i->ipi_addr; + to->sin_addr = i->ipi_addr; if (tolen) *tolen = sizeof(struct sockaddr_in); if (ifindex) @@ -391,7 +390,7 @@ int pim_socket_recvfromto(int fd, uint8_t *buf, size_t len, && (cmsg->cmsg_type == IP_RECVDSTADDR)) { struct in_addr *i = (struct in_addr *)CMSG_DATA(cmsg); if (to) - ((struct sockaddr_in *)to)->sin_addr = *i; + to->sin_addr = *i; if (tolen) *tolen = sizeof(struct sockaddr_in); diff --git a/pimd/pim_tlv.c b/pimd/pim_tlv.c index 5a751ac929..633bb207bd 100644 --- a/pimd/pim_tlv.c +++ b/pimd/pim_tlv.c @@ -121,20 +121,19 @@ int pim_encode_addr_ucast(uint8_t *buf, struct prefix *p) { switch (p->family) { case AF_INET: - *(uint8_t *)buf = - PIM_MSG_ADDRESS_FAMILY_IPV4; /* notice: AF_INET != - PIM_MSG_ADDRESS_FAMILY_IPV4 - */ + *buf = PIM_MSG_ADDRESS_FAMILY_IPV4; /* notice: AF_INET != + PIM_MSG_ADDRESS_FAMILY_IPV4 + */ ++buf; - *(uint8_t *)buf = 0; /* ucast IPv4 native encoding type (RFC + *buf = 0; /* ucast IPv4 native encoding type (RFC 4601: 4.9.1) */ ++buf; memcpy(buf, &p->u.prefix4, sizeof(struct in_addr)); return ucast_ipv4_encoding_len; case AF_INET6: - *(uint8_t *)buf = PIM_MSG_ADDRESS_FAMILY_IPV6; + *buf = PIM_MSG_ADDRESS_FAMILY_IPV6; ++buf; - *(uint8_t *)buf = 0; + *buf = 0; ++buf; memcpy(buf, &p->u.prefix6, sizeof(struct in6_addr)); return ucast_ipv6_encoding_len; @@ -198,13 +197,13 @@ int pim_encode_addr_group(uint8_t *buf, afi_t afi, int bidir, int scope, switch (afi) { case AFI_IP: - *(uint8_t *)buf = PIM_MSG_ADDRESS_FAMILY_IPV4; + *buf = PIM_MSG_ADDRESS_FAMILY_IPV4; ++buf; - *(uint8_t *)buf = 0; + *buf = 0; ++buf; - *(uint8_t *)buf = flags; + *buf = flags; ++buf; - *(uint8_t *)buf = 32; + *buf = 32; ++buf; memcpy(buf, &group, sizeof(struct in_addr)); return group_ipv4_encoding_len; @@ -473,7 +472,7 @@ int pim_parse_addr_ucast(struct prefix *p, const uint8_t *buf, int buf_size) case PIM_MSG_ADDRESS_FAMILY_IPV4: if ((addr + sizeof(struct in_addr)) > pastend) { zlog_warn( - "%s: IPv4 unicast address overflow: left=%zd needed=%zu", + "%s: IPv4 unicast address overflow: left=%td needed=%zu", __func__, pastend - addr, sizeof(struct in_addr)); return -3; @@ -489,7 +488,7 @@ int pim_parse_addr_ucast(struct prefix *p, const uint8_t *buf, int buf_size) case PIM_MSG_ADDRESS_FAMILY_IPV6: if ((addr + sizeof(struct in6_addr)) > pastend) { zlog_warn( - "%s: IPv6 unicast address overflow: left=%zd needed %zu", + "%s: IPv6 unicast address overflow: left=%td needed %zu", __func__, pastend - addr, sizeof(struct in6_addr)); return -3; @@ -548,7 +547,7 @@ int pim_parse_addr_group(struct prefix_sg *sg, const uint8_t *buf, int buf_size) if ((addr + sizeof(struct in_addr)) > pastend) { zlog_warn( - "%s: IPv4 group address overflow: left=%zd needed=%zu from", + "%s: IPv4 group address overflow: left=%td needed=%zu from", __func__, pastend - addr, sizeof(struct in_addr)); return -3; @@ -607,7 +606,7 @@ int pim_parse_addr_source(struct prefix_sg *sg, uint8_t *flags, case PIM_MSG_ADDRESS_FAMILY_IPV4: if ((addr + sizeof(struct in_addr)) > pastend) { zlog_warn( - "%s: IPv4 source address overflow: left=%zd needed=%zu", + "%s: IPv4 source address overflow: left=%td needed=%zu", __func__, pastend - addr, sizeof(struct in_addr)); return -3; diff --git a/pimd/pim_tlv.h b/pimd/pim_tlv.h index 657675b312..ef764656d3 100644 --- a/pimd/pim_tlv.h +++ b/pimd/pim_tlv.h @@ -48,8 +48,18 @@ typedef uint32_t pim_hello_options; #define PIM_OPTION_UNSET(options, option_mask) ((options) &= ~(option_mask)) #define PIM_OPTION_IS_SET(options, option_mask) ((options) & (option_mask)) -#define PIM_TLV_GET_UINT16(buf) ntohs(*(const uint16_t *)(buf)) -#define PIM_TLV_GET_UINT32(buf) ntohl(*(const uint32_t *)(buf)) +#define PIM_TLV_GET_UINT16(buf) \ + ({ \ + uint16_t _tmp; \ + memcpy(&_tmp, (buf), sizeof(uint16_t)); \ + ntohs(_tmp); \ + }) +#define PIM_TLV_GET_UINT32(buf) \ + ({ \ + uint32_t _tmp; \ + memcpy(&_tmp, (buf), sizeof(uint32_t)); \ + ntohl(_tmp); \ + }) #define PIM_TLV_GET_TYPE(buf) PIM_TLV_GET_UINT16(buf) #define PIM_TLV_GET_LENGTH(buf) PIM_TLV_GET_UINT16(buf) #define PIM_TLV_GET_HOLDTIME(buf) PIM_TLV_GET_UINT16(buf) diff --git a/pimd/pim_upstream.c b/pimd/pim_upstream.c index fb7840ee76..982fb7e5a5 100644 --- a/pimd/pim_upstream.c +++ b/pimd/pim_upstream.c @@ -1713,10 +1713,12 @@ static int pim_upstream_register_stop_timer(struct thread *t) case PIM_REG_JOIN: break; case PIM_REG_PRUNE: + /* This is equalent to Couldreg -> False */ if (!up->rpf.source_nexthop.interface) { if (PIM_DEBUG_PIM_TRACE) zlog_debug("%s: up %s RPF is not present", __func__, up->sg_str); + up->reg_state = PIM_REG_NOINFO; return 0; } @@ -1810,10 +1812,16 @@ int pim_upstream_inherited_olist_decide(struct pim_instance *pim, continue; if (pim_upstream_evaluate_join_desired_interface(up, ch, starch)) { - int flag = PIM_OIF_FLAG_PROTO_PIM; + int flag = 0; if (!ch) flag = PIM_OIF_FLAG_PROTO_STAR; + else { + if (PIM_IF_FLAG_TEST_PROTO_IGMP(ch->flags)) + flag = PIM_OIF_FLAG_PROTO_IGMP; + if (PIM_IF_FLAG_TEST_PROTO_PIM(ch->flags)) + flag |= PIM_OIF_FLAG_PROTO_PIM; + } pim_channel_add_oif(up->channel_oil, ifp, flag, __func__); diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c index 8a87dfbb55..72540903be 100644 --- a/pimd/pim_vty.c +++ b/pimd/pim_vty.c @@ -216,6 +216,11 @@ int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty) ssm->plist_name); ++writes; } + if (pim->register_plist) { + vty_out(vty, "%sip pim register-accept-list %s\n", spaces, + pim->register_plist); + ++writes; + } if (pim->spt.switchover == PIM_SPT_INFINITY) { if (pim->spt.plist) vty_out(vty, diff --git a/pimd/pim_vxlan.c b/pimd/pim_vxlan.c index 569b04d278..380c97a97c 100644 --- a/pimd/pim_vxlan.c +++ b/pimd/pim_vxlan.c @@ -253,8 +253,17 @@ static void pim_vxlan_orig_mr_up_del(struct pim_vxlan_sg *vxlan_sg) /* if there are other references register the source * for nht */ - if (up) - pim_rpf_update(vxlan_sg->pim, up, NULL, __func__); + if (up) { + enum pim_rpf_result r; + + r = pim_rpf_update(vxlan_sg->pim, up, NULL, __func__); + if (r == PIM_RPF_FAILURE) { + if (PIM_DEBUG_VXLAN) + zlog_debug( + "vxlan SG %s rpf_update failure", + vxlan_sg->sg_str); + } + } } } @@ -788,8 +797,8 @@ void pim_vxlan_sg_del(struct pim_instance *pim, struct prefix_sg *sg) if (!vxlan_sg) return; - pim_vxlan_sg_del_item(vxlan_sg); hash_release(pim->vxlan.sg_hash, vxlan_sg); + pim_vxlan_sg_del_item(vxlan_sg); } /******************************* MLAG handling *******************************/ diff --git a/pimd/pim_zebra.c b/pimd/pim_zebra.c index e791500ede..8355c2099d 100644 --- a/pimd/pim_zebra.c +++ b/pimd/pim_zebra.c @@ -835,7 +835,7 @@ void igmp_source_forward_stop(struct igmp_source *source) void pim_forward_start(struct pim_ifchannel *ch) { struct pim_upstream *up = ch->upstream; - uint32_t mask = PIM_OIF_FLAG_PROTO_PIM; + uint32_t mask = 0; if (PIM_DEBUG_PIM_TRACE) { char source_str[INET_ADDRSTRLEN]; @@ -853,9 +853,12 @@ void pim_forward_start(struct pim_ifchannel *ch) inet_ntoa(up->upstream_addr)); } - if (up->flags & PIM_UPSTREAM_FLAG_MASK_SRC_IGMP) + if (PIM_IF_FLAG_TEST_PROTO_IGMP(ch->flags)) mask = PIM_OIF_FLAG_PROTO_IGMP; + if (PIM_IF_FLAG_TEST_PROTO_PIM(ch->flags)) + mask |= PIM_OIF_FLAG_PROTO_PIM; + pim_channel_add_oif(up->channel_oil, ch->interface, mask, __func__); } diff --git a/pimd/pim_zlookup.c b/pimd/pim_zlookup.c index fc486f4998..84fac4f951 100644 --- a/pimd/pim_zlookup.c +++ b/pimd/pim_zlookup.c @@ -61,6 +61,14 @@ static int zclient_lookup_connect(struct thread *t) zlookup->fail = 0; /* reset counter on connection */ } + if (zclient_send_hello(zlookup) < 0) { + if (close(zlookup->sock)) { + zlog_warn("%s: closing fd=%d: errno=%d %s", __func__, + zlookup->sock, errno, safe_strerror(errno)); + } + zlookup->sock = -1; + } + if (zlookup->sock < 0) { /* Since last connect failed, retry within 10 secs */ zclient_lookup_sched(zlookup, 10); @@ -125,7 +133,10 @@ void zclient_lookup_free(void) void zclient_lookup_new(void) { - zlookup = zclient_new(router->master, &zclient_options_default); + struct zclient_options options = zclient_options_default; + options.synchronous = true; + + zlookup = zclient_new(router->master, &options); if (!zlookup) { flog_err(EC_LIB_ZAPI_SOCKET, "%s: zclient_new() failure", __func__); @@ -161,6 +172,7 @@ static int zclient_read_nexthop(struct pim_instance *pim, if (PIM_DEBUG_PIM_NHT_DETAIL) { char addr_str[INET_ADDRSTRLEN]; + pim_inet4_dump("<addr?>", addr, addr_str, sizeof(addr_str)); zlog_debug("%s: addr=%s(%s)", __func__, addr_str, pim->vrf->name); diff --git a/ripd/rip_peer.c b/ripd/rip_peer.c index 55dafd7c1f..77c73ab398 100644 --- a/ripd/rip_peer.c +++ b/ripd/rip_peer.c @@ -131,7 +131,6 @@ void rip_peer_bad_packet(struct rip *rip, struct sockaddr_in *from) static char *rip_peer_uptime(struct rip_peer *peer, char *buf, size_t len) { time_t uptime; - struct tm tm; /* If there is no connection has been done before print `never'. */ if (peer->uptime == 0) { @@ -142,17 +141,9 @@ static char *rip_peer_uptime(struct rip_peer *peer, char *buf, size_t len) /* Get current time. */ uptime = time(NULL); uptime -= peer->uptime; - gmtime_r(&uptime, &tm); - - if (uptime < ONE_DAY_SECOND) - snprintf(buf, len, "%02d:%02d:%02d", tm.tm_hour, tm.tm_min, - tm.tm_sec); - else if (uptime < ONE_WEEK_SECOND) - snprintf(buf, len, "%dd%02dh%02dm", tm.tm_yday, tm.tm_hour, - tm.tm_min); - else - snprintf(buf, len, "%02dw%dd%02dh", tm.tm_yday / 7, - tm.tm_yday - ((tm.tm_yday / 7) * 7), tm.tm_hour); + + frrtime_to_interval(uptime, buf, len); + return buf; } diff --git a/ripngd/ripng_interface.c b/ripngd/ripng_interface.c index 6a30c07d99..25d9ed2b9e 100644 --- a/ripngd/ripng_interface.c +++ b/ripngd/ripng_interface.c @@ -867,17 +867,12 @@ int ripng_network_write(struct vty *vty, struct ripng *ripng) unsigned int i; const char *ifname; struct agg_node *node; - char buf[BUFSIZ]; /* Write enable network. */ for (node = agg_route_top(ripng->enable_network); node; node = agg_route_next(node)) - if (node->info) { - struct prefix *p = &node->p; - vty_out(vty, " %s/%d\n", - inet_ntop(p->family, &p->u.prefix, buf, BUFSIZ), - p->prefixlen); - } + if (node->info) + vty_out(vty, " %pRN\n", node); /* Write enable interface. */ for (i = 0; i < vector_active(ripng->enable_if); i++) diff --git a/ripngd/ripng_nb_state.c b/ripngd/ripng_nb_state.c index 167077ea29..75dec3cb3e 100644 --- a/ripngd/ripng_nb_state.c +++ b/ripngd/ripng_nb_state.c @@ -158,7 +158,8 @@ int ripngd_instance_state_routes_route_get_keys(const void *list_entry, const struct agg_node *rn = list_entry; keys->num = 1; - (void)prefix2str(&rn->p, keys->key[0], sizeof(keys->key[0])); + (void)prefix2str(agg_node_get_prefix(rn), keys->key[0], + sizeof(keys->key[0])); return NB_OK; } @@ -191,7 +192,7 @@ ripngd_instance_state_routes_route_prefix_get_elem(const char *xpath, const struct agg_node *rn = list_entry; const struct ripng_info *rinfo = listnode_head(rn->info); - return yang_data_new_ipv6p(xpath, &rinfo->rp->p); + return yang_data_new_ipv6p(xpath, agg_node_get_prefix(rinfo->rp)); } /* diff --git a/ripngd/ripng_peer.c b/ripngd/ripng_peer.c index c038bfccf0..e6ff58dd0c 100644 --- a/ripngd/ripng_peer.c +++ b/ripngd/ripng_peer.c @@ -141,7 +141,6 @@ void ripng_peer_bad_packet(struct ripng *ripng, struct sockaddr_in6 *from) static char *ripng_peer_uptime(struct ripng_peer *peer, char *buf, size_t len) { time_t uptime; - struct tm tm; /* If there is no connection has been done before print `never'. */ if (peer->uptime == 0) { @@ -152,17 +151,9 @@ static char *ripng_peer_uptime(struct ripng_peer *peer, char *buf, size_t len) /* Get current time. */ uptime = time(NULL); uptime -= peer->uptime; - gmtime_r(&uptime, &tm); - - if (uptime < ONE_DAY_SECOND) - snprintf(buf, len, "%02d:%02d:%02d", tm.tm_hour, tm.tm_min, - tm.tm_sec); - else if (uptime < ONE_WEEK_SECOND) - snprintf(buf, len, "%dd%02dh%02dm", tm.tm_yday, tm.tm_hour, - tm.tm_min); - else - snprintf(buf, len, "%02dw%dd%02dh", tm.tm_yday / 7, - tm.tm_yday - ((tm.tm_yday / 7) * 7), tm.tm_hour); + + frrtime_to_interval(uptime, buf, len); + return buf; } diff --git a/ripngd/ripng_zebra.c b/ripngd/ripng_zebra.c index f9bd56d1df..baf7f00961 100644 --- a/ripngd/ripng_zebra.c +++ b/ripngd/ripng_zebra.c @@ -46,12 +46,13 @@ static void ripng_zebra_ipv6_send(struct ripng *ripng, struct agg_node *rp, struct listnode *listnode = NULL; struct ripng_info *rinfo = NULL; int count = 0; + const struct prefix *p = agg_node_get_prefix(rp); memset(&api, 0, sizeof(api)); api.vrf_id = ripng->vrf->vrf_id; api.type = ZEBRA_ROUTE_RIPNG; api.safi = SAFI_UNICAST; - api.prefix = rp->p; + api.prefix = *p; SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP); for (ALL_LIST_ELEMENTS_RO(list, listnode, rinfo)) { @@ -85,18 +86,17 @@ static void ripng_zebra_ipv6_send(struct ripng *ripng, struct agg_node *rp, if (IS_RIPNG_DEBUG_ZEBRA) { if (ripng->ecmp) - zlog_debug("%s: %s/%d nexthops %d", + zlog_debug("%s: %pRN nexthops %d", (cmd == ZEBRA_ROUTE_ADD) ? "Install into zebra" : "Delete from zebra", - inet6_ntoa(rp->p.u.prefix6), rp->p.prefixlen, - count); + rp, count); else - zlog_debug( - "%s: %s/%d", - (cmd == ZEBRA_ROUTE_ADD) ? "Install into zebra" - : "Delete from zebra", - inet6_ntoa(rp->p.u.prefix6), rp->p.prefixlen); + zlog_debug("%s: %pRN", + (cmd == ZEBRA_ROUTE_ADD) + ? "Install into zebra" + : "Delete from zebra", + rp); } } diff --git a/ripngd/ripngd.c b/ripngd/ripngd.c index bb33abdb2c..1ea006abd6 100644 --- a/ripngd/ripngd.c +++ b/ripngd/ripngd.c @@ -1087,7 +1087,8 @@ void ripng_redistribute_withdraw(struct ripng *ripng, int type) if (IS_RIPNG_DEBUG_EVENT) { struct prefix_ipv6 *p = - (struct prefix_ipv6 *)&rp->p; + (struct prefix_ipv6 *) + agg_node_get_prefix(rp); zlog_debug( "Poisone %s/%d on the interface %s [withdraw]", @@ -1619,7 +1620,7 @@ void ripng_output_process(struct interface *ifp, struct sockaddr_in6 *to, * following * information. */ - p = (struct prefix_ipv6 *)&rp->p; + p = (struct prefix_ipv6 *)agg_node_get_prefix(rp); rinfo->metric_out = rinfo->metric; rinfo->tag_out = rinfo->tag; memset(&rinfo->nexthop_out, 0, @@ -1761,7 +1762,7 @@ void ripng_output_process(struct interface *ifp, struct sockaddr_in6 *to, * following * information. */ - p = (struct prefix_ipv6 *)&rp->p; + p = (struct prefix_ipv6 *)agg_node_get_prefix(rp); aggregate->metric_set = 0; aggregate->metric_out = aggregate->metric; aggregate->tag_out = aggregate->tag; @@ -2053,7 +2054,6 @@ DEFUN (show_ipv6_ripng, struct agg_node *rp; struct ripng_info *rinfo; struct ripng_aggregate *aggregate; - struct prefix_ipv6 *p; struct list *list = NULL; struct listnode *listnode = NULL; int len; @@ -2085,15 +2085,11 @@ DEFUN (show_ipv6_ripng, for (rp = agg_route_top(ripng->table); rp; rp = agg_route_next(rp)) { if ((aggregate = rp->aggregate) != NULL) { - p = (struct prefix_ipv6 *)&rp->p; - #ifdef DEBUG - vty_out(vty, "R(a) %d/%d %s/%d ", aggregate->count, - aggregate->suppress, inet6_ntoa(p->prefix), - p->prefixlen); + vty_out(vty, "R(a) %d/%d %pRN ", aggregate->count, + aggregate->suppress, rp); #else - vty_out(vty, "R(a) %s/%d ", inet6_ntoa(p->prefix), - p->prefixlen); + vty_out(vty, "R(a) %pRN ", rp); #endif /* DEBUG */ vty_out(vty, "\n"); vty_out(vty, "%*s", 18, " "); @@ -2105,19 +2101,15 @@ DEFUN (show_ipv6_ripng, if ((list = rp->info) != NULL) for (ALL_LIST_ELEMENTS_RO(list, listnode, rinfo)) { - p = (struct prefix_ipv6 *)&rp->p; - #ifdef DEBUG - vty_out(vty, "%c(%s) 0/%d %s/%d ", + vty_out(vty, "%c(%s) 0/%d %pRN ", zebra_route_char(rinfo->type), ripng_route_subtype_print(rinfo), - rinfo->suppress, inet6_ntoa(p->prefix), - p->prefixlen); + rinfo->suppress, rp); #else - vty_out(vty, "%c(%s) %s/%d ", + vty_out(vty, "%c(%s) %pRN ", zebra_route_char(rinfo->type), - ripng_route_subtype_print(rinfo), - inet6_ntoa(p->prefix), p->prefixlen); + ripng_route_subtype_print(rinfo), rp); #endif /* DEBUG */ vty_out(vty, "\n"); vty_out(vty, "%*s", 18, " "); diff --git a/sharpd/sharp_globals.h b/sharpd/sharp_globals.h index 4e5c933667..8eba57f4dd 100644 --- a/sharpd/sharp_globals.h +++ b/sharpd/sharp_globals.h @@ -28,9 +28,11 @@ struct sharp_routes { /* The original prefix for route installation */ struct prefix orig_prefix; - /* The nexthop group we are using for installation */ + /* The nexthop info we are using for installation */ struct nexthop nhop; + struct nexthop backup_nhop; struct nexthop_group nhop_group; + struct nexthop_group backup_nhop_group; uint32_t total_routes; uint32_t installed_routes; diff --git a/sharpd/sharp_logpump.c b/sharpd/sharp_logpump.c new file mode 100644 index 0000000000..d07e2d273f --- /dev/null +++ b/sharpd/sharp_logpump.c @@ -0,0 +1,153 @@ +/* + * testing log message generator + * Copyright (C) 2019-2020 David Lamparter for NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> + +#include "vty.h" +#include "command.h" +#include "prefix.h" +#include "nexthop.h" +#include "log.h" +#include "thread.h" +#include "vrf.h" +#include "zclient.h" +#include "frr_pthread.h" + +#include "sharpd/sharp_vty.h" + +/* this is quite hacky, but then again it's a test tool and it does its job. */ +static struct frr_pthread *lpt; + +static unsigned long lp_duration; +static unsigned lp_frequency; +static unsigned lp_burst; +static size_t lp_ctr, lp_expect; +static struct rusage lp_rusage; +static struct vty *lp_vty; + +extern struct thread_master *master; + +static int logpump_done(struct thread *thread) +{ + double x; + + vty_out(lp_vty, "\nlogpump done\n"); + vty_out(lp_vty, "%9zu messages written\n", lp_ctr); + x = (double)lp_ctr / (double)lp_expect * 100.; + vty_out(lp_vty, "%9zu messages targeted = %5.1lf%%\n", lp_expect, x); + + x = lp_rusage.ru_utime.tv_sec * 1000000 + lp_rusage.ru_utime.tv_usec; + x /= (double)lp_ctr; + vty_out(lp_vty, "%6llu.%06u usr %9.1lfns/msg\n", + (unsigned long long)lp_rusage.ru_utime.tv_sec, + (unsigned)lp_rusage.ru_utime.tv_usec, x * 1000.); + + x = lp_rusage.ru_stime.tv_sec * 1000000 + lp_rusage.ru_stime.tv_usec; + x /= (double)lp_ctr; + vty_out(lp_vty, "%6llu.%06u sys %9.1lfns/msg\n", + (unsigned long long)lp_rusage.ru_stime.tv_sec, + (unsigned)lp_rusage.ru_stime.tv_usec, x * 1000.); + + frr_pthread_stop(lpt, NULL); + frr_pthread_destroy(lpt); + lpt = NULL; + return 0; +} + +static void *logpump_run(void *arg) +{ + struct timespec start, next, now; + unsigned long delta, period; + + period = 1000000000L / lp_frequency; + + clock_gettime(CLOCK_MONOTONIC, &start); + next = start; + do { + for (size_t inburst = 0; inburst < lp_burst; inburst++) + zlog_debug("log pump: %zu (burst %zu)", + lp_ctr++, inburst); + + clock_gettime(CLOCK_MONOTONIC, &now); + delta = (now.tv_sec - start.tv_sec) * 1000000000L + + (now.tv_nsec - start.tv_nsec); + + next.tv_nsec += period; + if (next.tv_nsec > 1000000000L) { + next.tv_sec++; + next.tv_nsec -= 1000000000L; + } +#ifdef HAVE_CLOCK_NANOSLEEP + clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &next, NULL); +#else + struct timespec slpdur; + + slpdur.tv_sec = next.tv_sec - now.tv_sec; + slpdur.tv_nsec = next.tv_nsec - now.tv_nsec; + if (slpdur.tv_nsec < 0) { + slpdur.tv_sec--; + slpdur.tv_nsec += 1000000000L; + } + + nanosleep(&slpdur, NULL); +#endif + } while (delta < lp_duration); + +#ifdef RUSAGE_THREAD + getrusage(RUSAGE_THREAD, &lp_rusage); +#else + getrusage(RUSAGE_SELF, &lp_rusage); +#endif + + thread_add_timer_msec(master, logpump_done, NULL, 0, NULL); + return NULL; +} + +static int logpump_halt(struct frr_pthread *fpt, void **res) +{ + return 0; +} + +/* default frr_pthread attributes */ +static const struct frr_pthread_attr attr = { + .start = logpump_run, + .stop = logpump_halt, +}; + +void sharp_logpump_run(struct vty *vty, unsigned duration, unsigned frequency, + unsigned burst) +{ + if (lpt != NULL) { + vty_out(vty, "logpump already running\n"); + return; + } + + vty_out(vty, "starting logpump...\n"); + vty_out(vty, "keep this VTY open and press Enter to see results\n"); + + lp_vty = vty; + lp_duration = duration * 1000000000UL; + lp_frequency = frequency; + lp_burst = burst; + lp_expect = duration * frequency * burst; + lp_ctr = 0; + + lpt = frr_pthread_new(&attr, "logpump", "logpump"); + frr_pthread_run(lpt, NULL); +} diff --git a/sharpd/sharp_vty.c b/sharpd/sharp_vty.c index 486ccf6bfe..e2ea773055 100644 --- a/sharpd/sharp_vty.c +++ b/sharpd/sharp_vty.c @@ -162,7 +162,12 @@ DEFPY (install_routes_data_dump, DEFPY (install_routes, install_routes_cmd, - "sharp install routes [vrf NAME$vrf_name] <A.B.C.D$start4|X:X::X:X$start6> <nexthop <A.B.C.D$nexthop4|X:X::X:X$nexthop6>|nexthop-group NHGNAME$nexthop_group> (1-1000000)$routes [instance (0-255)$instance] [repeat (2-1000)$rpt]", + "sharp install routes [vrf NAME$vrf_name]\ + <A.B.C.D$start4|X:X::X:X$start6>\ + <nexthop <A.B.C.D$nexthop4|X:X::X:X$nexthop6>|\ + nexthop-group NHGNAME$nexthop_group>\ + [backup$backup <A.B.C.D$backup_nexthop4|X:X::X:X$backup_nexthop6>] \ + (1-1000000)$routes [instance (0-255)$instance] [repeat (2-1000)$rpt]", "Sharp routing Protocol\n" "install some routes\n" "Routes to install\n" @@ -175,6 +180,9 @@ DEFPY (install_routes, "V6 Nexthop address to use\n" "Nexthop-Group to use\n" "The Name of the nexthop-group\n" + "Backup nexthop to use(Can be an IPv4 or IPv6 address)\n" + "Backup V4 Nexthop address to use\n" + "Backup V6 Nexthop address to use\n" "How many to create\n" "Instance to use\n" "Instance\n" @@ -197,6 +205,8 @@ DEFPY (install_routes, memset(&sg.r.orig_prefix, 0, sizeof(sg.r.orig_prefix)); memset(&sg.r.nhop, 0, sizeof(sg.r.nhop)); memset(&sg.r.nhop_group, 0, sizeof(sg.r.nhop_group)); + memset(&sg.r.backup_nhop, 0, sizeof(sg.r.nhop)); + memset(&sg.r.backup_nhop_group, 0, sizeof(sg.r.nhop_group)); if (start4.s_addr != 0) { prefix.family = AF_INET; @@ -219,6 +229,12 @@ DEFPY (install_routes, return CMD_WARNING; } + /* Explicit backup not available with named nexthop-group */ + if (backup && nexthop_group) { + vty_out(vty, "%% Invalid: cannot specify both nexthop-group and backup\n"); + return CMD_WARNING; + } + if (nexthop_group) { struct nexthop_group_cmd *nhgc = nhgc_find(nexthop_group); if (!nhgc) { @@ -229,6 +245,22 @@ DEFPY (install_routes, } sg.r.nhop_group.nexthop = nhgc->nhg.nexthop; + + /* Use group's backup nexthop info if present */ + if (nhgc->backup_list_name[0]) { + struct nexthop_group_cmd *bnhgc = + nhgc_find(nhgc->backup_list_name); + + if (!bnhgc) { + vty_out(vty, "%% Backup group %s not found for group %s\n", + nhgc->backup_list_name, + nhgc->name); + return CMD_WARNING; + } + + sg.r.backup_nhop.vrf_id = vrf->vrf_id; + sg.r.backup_nhop_group.nexthop = bnhgc->nhg.nexthop; + } } else { if (nexthop4.s_addr != INADDR_ANY) { sg.r.nhop.gate.ipv4 = nexthop4; @@ -242,11 +274,30 @@ DEFPY (install_routes, sg.r.nhop_group.nexthop = &sg.r.nhop; } + /* Use single backup nexthop if specified */ + if (backup) { + /* Set flag and index in primary nexthop */ + SET_FLAG(sg.r.nhop.flags, NEXTHOP_FLAG_HAS_BACKUP); + sg.r.nhop.backup_idx = 0; + + if (backup_nexthop4.s_addr != INADDR_ANY) { + sg.r.backup_nhop.gate.ipv4 = backup_nexthop4; + sg.r.backup_nhop.type = NEXTHOP_TYPE_IPV4; + } else { + sg.r.backup_nhop.gate.ipv6 = backup_nexthop6; + sg.r.backup_nhop.type = NEXTHOP_TYPE_IPV6; + } + + sg.r.backup_nhop.vrf_id = vrf->vrf_id; + sg.r.backup_nhop_group.nexthop = &sg.r.backup_nhop; + } + sg.r.inst = instance; sg.r.vrf_id = vrf->vrf_id; rts = routes; - sharp_install_routes_helper(&prefix, sg.r.vrf_id, - sg.r.inst, &sg.r.nhop_group, rts); + sharp_install_routes_helper(&prefix, sg.r.vrf_id, sg.r.inst, + &sg.r.nhop_group, &sg.r.backup_nhop_group, + rts); return CMD_SUCCESS; } @@ -337,8 +388,142 @@ DEFUN_NOSH (show_debugging_sharpd, DEBUG_STR "Sharp Information\n") { - vty_out(vty, "Sharp debugging status\n"); + vty_out(vty, "Sharp debugging status:\n"); + + return CMD_SUCCESS; +} + +DEFPY(sharp_lsp_prefix_v4, sharp_lsp_prefix_v4_cmd, + "sharp lsp (0-100000)$inlabel\ + nexthop-group NHGNAME$nhgname\ + [prefix A.B.C.D/M$pfx\ + " FRR_IP_REDIST_STR_ZEBRA "$type_str [instance (0-255)$instance]]", + "Sharp Routing Protocol\n" + "Add an LSP\n" + "The ingress label to use\n" + "Use nexthops from a nexthop-group\n" + "The nexthop-group name\n" + "Label a prefix\n" + "The v4 prefix to label\n" + FRR_IP_REDIST_HELP_STR_ZEBRA + "Instance to use\n" + "Instance\n") +{ + struct nexthop_group_cmd *nhgc = NULL; + struct prefix p = {}; + int type = 0; + + /* We're offered a v4 prefix */ + if (pfx->family > 0 && type_str) { + p.family = pfx->family; + p.prefixlen = pfx->prefixlen; + p.u.prefix4 = pfx->prefix; + + type = proto_redistnum(AFI_IP, type_str); + if (type < 0) { + vty_out(vty, "%% Unknown route type '%s'\n", type_str); + return CMD_WARNING; + } + } else if (pfx->family > 0 || type_str) { + vty_out(vty, "%% Must supply both prefix and type\n"); + return CMD_WARNING; + } + + nhgc = nhgc_find(nhgname); + if (!nhgc) { + vty_out(vty, "%% Nexthop-group '%s' does not exist\n", + nhgname); + return CMD_WARNING; + } + + if (nhgc->nhg.nexthop == NULL) { + vty_out(vty, "%% Nexthop-group '%s' is empty\n", nhgname); + return CMD_WARNING; + } + + if (sharp_install_lsps_helper(true, pfx->family > 0 ? &p : NULL, + type, instance, inlabel, + &(nhgc->nhg)) == 0) + return CMD_SUCCESS; + else { + vty_out(vty, "%% LSP install failed!\n"); + return CMD_WARNING; + } +} + +DEFPY(sharp_remove_lsp_prefix_v4, sharp_remove_lsp_prefix_v4_cmd, + "sharp remove lsp \ + (0-100000)$inlabel\ + nexthop-group NHGNAME$nhgname\ + [prefix A.B.C.D/M$pfx\ + " FRR_IP_REDIST_STR_SHARPD "$type_str [instance (0-255)$instance]]", + "Sharp Routing Protocol\n" + "Remove data\n" + "Remove an LSP\n" + "The ingress label\n" + "Use nexthops from a nexthop-group\n" + "The nexthop-group name\n" + "Specify a v4 prefix\n" + "The v4 prefix to label\n" + FRR_IP_REDIST_HELP_STR_SHARPD + "Routing instance\n" + "Instance to use\n") +{ + struct nexthop_group_cmd *nhgc = NULL; + struct prefix p = {}; + int type = 0; + + /* We're offered a v4 prefix */ + if (pfx->family > 0 && type_str) { + p.family = pfx->family; + p.prefixlen = pfx->prefixlen; + p.u.prefix4 = pfx->prefix; + + type = proto_redistnum(AFI_IP, type_str); + if (type < 0) { + vty_out(vty, "%% Unknown route type '%s'\n", type_str); + return CMD_WARNING; + } + } else if (pfx->family > 0 || type_str) { + vty_out(vty, "%% Must supply both prefix and type\n"); + return CMD_WARNING; + } + nhgc = nhgc_find(nhgname); + if (!nhgc) { + vty_out(vty, "%% Nexthop-group '%s' does not exist\n", + nhgname); + return CMD_WARNING; + } + + if (nhgc->nhg.nexthop == NULL) { + vty_out(vty, "%% Nexthop-group '%s' is empty\n", nhgname); + return CMD_WARNING; + } + + if (sharp_install_lsps_helper(false, pfx->family > 0 ? &p : NULL, + type, instance, inlabel, + &(nhgc->nhg)) == 0) + return CMD_SUCCESS; + else { + vty_out(vty, "%% LSP remove failed!\n"); + return CMD_WARNING; + } +} + +DEFPY (logpump, + logpump_cmd, + "sharp logpump duration (1-60) frequency (1-1000000) burst (1-1000)", + "Sharp Routing Protocol\n" + "Generate bulk log messages for testing\n" + "Duration of run (s)\n" + "Duration of run (s)\n" + "Frequency of bursts (s^-1)\n" + "Frequency of bursts (s^-1)\n" + "Number of log messages per each burst\n" + "Number of log messages per each burst\n") +{ + sharp_logpump_run(vty, duration, frequency, burst); return CMD_SUCCESS; } @@ -351,6 +536,9 @@ void sharp_vty_init(void) install_element(ENABLE_NODE, &sharp_nht_data_dump_cmd); install_element(ENABLE_NODE, &watch_nexthop_v6_cmd); install_element(ENABLE_NODE, &watch_nexthop_v4_cmd); + install_element(ENABLE_NODE, &sharp_lsp_prefix_v4_cmd); + install_element(ENABLE_NODE, &sharp_remove_lsp_prefix_v4_cmd); + install_element(ENABLE_NODE, &logpump_cmd); install_element(VIEW_NODE, &show_debugging_sharpd_cmd); diff --git a/sharpd/sharp_vty.h b/sharpd/sharp_vty.h index d4af095e89..0d1327259c 100644 --- a/sharpd/sharp_vty.h +++ b/sharpd/sharp_vty.h @@ -23,4 +23,10 @@ #define __SHARP_VTY_H__ extern void sharp_vty_init(void); + +struct vty; + +extern void sharp_logpump_run(struct vty *vty, unsigned duration, + unsigned frequency, unsigned burst); + #endif diff --git a/sharpd/sharp_zebra.c b/sharpd/sharp_zebra.c index 5baa74fe96..e1bd6f5722 100644 --- a/sharpd/sharp_zebra.c +++ b/sharpd/sharp_zebra.c @@ -87,8 +87,65 @@ static int sharp_ifp_down(struct interface *ifp) return 0; } +int sharp_install_lsps_helper(bool install_p, const struct prefix *p, + uint8_t type, int instance, uint32_t in_label, + const struct nexthop_group *nhg) +{ + struct zapi_labels zl = {}; + struct zapi_nexthop *znh; + const struct nexthop *nh; + int i, ret; + + zl.type = ZEBRA_LSP_SHARP; + zl.local_label = in_label; + + if (p) { + SET_FLAG(zl.message, ZAPI_LABELS_FTN); + prefix_copy(&zl.route.prefix, p); + zl.route.type = type; + zl.route.instance = instance; + } + + i = 0; + for (ALL_NEXTHOPS_PTR(nhg, nh)) { + znh = &zl.nexthops[i]; + + /* Must have labels to be useful */ + if (nh->nh_label == NULL || nh->nh_label->num_labels == 0) + continue; + + if (nh->type == NEXTHOP_TYPE_IFINDEX || + nh->type == NEXTHOP_TYPE_BLACKHOLE) + /* Hmm - can't really deal with these types */ + continue; + + ret = zapi_nexthop_from_nexthop(znh, nh); + if (ret < 0) + return -1; + + i++; + } + + /* Whoops - no nexthops isn't very useful */ + if (i == 0) + return -1; + + zl.nexthop_num = i; + + if (install_p) + ret = zebra_send_mpls_labels(zclient, ZEBRA_MPLS_LABELS_ADD, + &zl); + else + ret = zebra_send_mpls_labels(zclient, ZEBRA_MPLS_LABELS_DELETE, + &zl); + + return ret; +} + void sharp_install_routes_helper(struct prefix *p, vrf_id_t vrf_id, - uint8_t instance, struct nexthop_group *nhg, + uint8_t instance, + const struct nexthop_group *nhg, + const struct nexthop_group *backup_nhg, uint32_t routes) { uint32_t temp, i; @@ -102,9 +159,13 @@ void sharp_install_routes_helper(struct prefix *p, vrf_id_t vrf_id, } else temp = ntohl(p->u.val32[3]); + /* Only use backup route/nexthops if present */ + if (backup_nhg && (backup_nhg->nexthop == NULL)) + backup_nhg = NULL; + monotime(&sg.r.t_start); for (i = 0; i < routes; i++) { - route_add(p, vrf_id, (uint8_t)instance, nhg); + route_add(p, vrf_id, (uint8_t)instance, nhg, backup_nhg); if (v4) p->u.prefix4.s_addr = htonl(++temp); else @@ -154,6 +215,7 @@ static void handle_repeated(bool installed) sg.r.installed_routes = 0; sharp_install_routes_helper(&p, sg.r.vrf_id, sg.r.inst, &sg.r.nhop_group, + &sg.r.backup_nhop_group, sg.r.total_routes); } } @@ -221,8 +283,9 @@ void vrf_label_add(vrf_id_t vrf_id, afi_t afi, mpls_label_t label) zclient_send_vrf_label(zclient, vrf_id, afi, label, ZEBRA_LSP_SHARP); } -void route_add(struct prefix *p, vrf_id_t vrf_id, - uint8_t instance, struct nexthop_group *nhg) +void route_add(const struct prefix *p, vrf_id_t vrf_id, + uint8_t instance, const struct nexthop_group *nhg, + const struct nexthop_group *backup_nhg) { struct zapi_route api; struct zapi_nexthop *api_nh; @@ -241,46 +304,28 @@ void route_add(struct prefix *p, vrf_id_t vrf_id, for (ALL_NEXTHOPS_PTR(nhg, nh)) { api_nh = &api.nexthops[i]; - api_nh->vrf_id = nh->vrf_id; - api_nh->type = nh->type; - api_nh->weight = nh->weight; - switch (nh->type) { - case NEXTHOP_TYPE_IPV4: - api_nh->gate = nh->gate; - break; - case NEXTHOP_TYPE_IPV4_IFINDEX: - api_nh->gate = nh->gate; - api_nh->ifindex = nh->ifindex; - break; - case NEXTHOP_TYPE_IFINDEX: - api_nh->ifindex = nh->ifindex; - break; - case NEXTHOP_TYPE_IPV6: - memcpy(&api_nh->gate.ipv6, &nh->gate.ipv6, 16); - break; - case NEXTHOP_TYPE_IPV6_IFINDEX: - api_nh->ifindex = nh->ifindex; - memcpy(&api_nh->gate.ipv6, &nh->gate.ipv6, 16); - break; - case NEXTHOP_TYPE_BLACKHOLE: - api_nh->bh_type = nh->bh_type; - break; - } + zapi_nexthop_from_nexthop(api_nh, nh); - if (nh->nh_label && nh->nh_label->num_labels > 0) { - int j; + i++; + } + api.nexthop_num = i; - SET_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_LABEL); + /* Include backup nexthops, if present */ + if (backup_nhg && backup_nhg->nexthop) { + SET_FLAG(api.message, ZAPI_MESSAGE_BACKUP_NEXTHOPS); - api_nh->label_num = nh->nh_label->num_labels; - for (j = 0; j < nh->nh_label->num_labels; j++) - api_nh->labels[j] = nh->nh_label->label[j]; + i = 0; + for (ALL_NEXTHOPS_PTR(backup_nhg, nh)) { + api_nh = &api.backup_nexthops[i]; + + zapi_backup_nexthop_from_nexthop(api_nh, nh); + + i++; } - i++; + api.backup_nexthop_num = i; } - api.nexthop_num = i; zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api); } @@ -333,7 +378,7 @@ static int sharp_debug_nexthops(struct zapi_route *api) case NEXTHOP_TYPE_IPV4_IFINDEX: case NEXTHOP_TYPE_IPV4: zlog_debug( - "\tNexthop %s, type: %d, ifindex: %d, vrf: %d, label_num: %d", + " Nexthop %s, type: %d, ifindex: %d, vrf: %d, label_num: %d", inet_ntop(AF_INET, &znh->gate.ipv4.s_addr, buf, sizeof(buf)), znh->type, znh->ifindex, znh->vrf_id, @@ -342,18 +387,18 @@ static int sharp_debug_nexthops(struct zapi_route *api) case NEXTHOP_TYPE_IPV6_IFINDEX: case NEXTHOP_TYPE_IPV6: zlog_debug( - "\tNexthop %s, type: %d, ifindex: %d, vrf: %d, label_num: %d", + " Nexthop %s, type: %d, ifindex: %d, vrf: %d, label_num: %d", inet_ntop(AF_INET6, &znh->gate.ipv6, buf, sizeof(buf)), znh->type, znh->ifindex, znh->vrf_id, znh->label_num); break; case NEXTHOP_TYPE_IFINDEX: - zlog_debug("\tNexthop IFINDEX: %d, ifindex: %d", + zlog_debug(" Nexthop IFINDEX: %d, ifindex: %d", znh->type, znh->ifindex); break; case NEXTHOP_TYPE_BLACKHOLE: - zlog_debug("\tNexthop blackhole"); + zlog_debug(" Nexthop blackhole"); break; } } diff --git a/sharpd/sharp_zebra.h b/sharpd/sharp_zebra.h index 57ffcc7690..926bff676b 100644 --- a/sharpd/sharp_zebra.h +++ b/sharpd/sharp_zebra.h @@ -25,16 +25,22 @@ extern void sharp_zebra_init(void); extern void vrf_label_add(vrf_id_t vrf_id, afi_t afi, mpls_label_t label); -extern void route_add(struct prefix *p, vrf_id_t, uint8_t instance, - struct nexthop_group *nhg); +extern void route_add(const struct prefix *p, vrf_id_t, uint8_t instance, + const struct nexthop_group *nhg, + const struct nexthop_group *backup_nhg); extern void route_delete(struct prefix *p, vrf_id_t vrf_id, uint8_t instance); extern void sharp_zebra_nexthop_watch(struct prefix *p, vrf_id_t vrf_id, bool import, bool watch, bool connected); extern void sharp_install_routes_helper(struct prefix *p, vrf_id_t vrf_id, uint8_t instance, - struct nexthop_group *nhg, + const struct nexthop_group *nhg, + const struct nexthop_group *backup_nhg, uint32_t routes); extern void sharp_remove_routes_helper(struct prefix *p, vrf_id_t vrf_id, uint8_t instance, uint32_t routes); + +int sharp_install_lsps_helper(bool install_p, const struct prefix *p, + uint8_t type, int instance, uint32_t in_label, + const struct nexthop_group *nhg); #endif diff --git a/sharpd/subdir.am b/sharpd/subdir.am index 89b183d832..8b32b2370c 100644 --- a/sharpd/subdir.am +++ b/sharpd/subdir.am @@ -14,6 +14,7 @@ sharpd_libsharp_a_SOURCES = \ sharpd/sharp_nht.c \ sharpd/sharp_zebra.c \ sharpd/sharp_vty.c \ + sharpd/sharp_logpump.c \ # end noinst_HEADERS += \ diff --git a/staticd/static_debug.c b/staticd/static_debug.c index 9906e805a7..e43d4e79ff 100644 --- a/staticd/static_debug.c +++ b/staticd/static_debug.c @@ -3,7 +3,7 @@ * Copyright (C) 2019 Volta Networks Inc. * Mark Stapp * - * This file is part of Free Range Routing (FRR). + * This file is part of FRRouting (FRR). * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/staticd/static_debug.h b/staticd/static_debug.h index 8932e2d429..481c266e14 100644 --- a/staticd/static_debug.h +++ b/staticd/static_debug.h @@ -3,7 +3,7 @@ * Copyright (C) 2019 Volta Networks Inc. * Mark Stapp * - * This file is part of Free Range Routing (FRR). + * This file is part of FRRouting (FRR). * * GNU Zebra is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/staticd/static_vty.c b/staticd/static_vty.c index 40bcf2b5d3..a950b0473e 100644 --- a/staticd/static_vty.c +++ b/staticd/static_vty.c @@ -867,7 +867,7 @@ DEFPY(ip_route_address_interface, "Table to configure\n" "The table number to configure\n" VRF_CMD_HELP_STR - "Treat the nexthop as directly attached to the interface") + "Treat the nexthop as directly attached to the interface\n") { struct static_vrf *svrf; struct static_vrf *nh_svrf; @@ -935,7 +935,7 @@ DEFPY(ip_route_address_interface_vrf, "Table to configure\n" "The table number to configure\n" VRF_CMD_HELP_STR - "Treat the nexthop as directly attached to the interface") + "Treat the nexthop as directly attached to the interface\n") { VTY_DECLVAR_CONTEXT(vrf, vrf); const char *flag = NULL; @@ -1211,7 +1211,7 @@ DEFPY(ipv6_route_address_interface, "Table to configure\n" "The table number to configure\n" VRF_CMD_HELP_STR - "Treat the nexthop as directly attached to the interface") + "Treat the nexthop as directly attached to the interface\n") { struct static_vrf *svrf; struct static_vrf *nh_svrf; @@ -1279,7 +1279,7 @@ DEFPY(ipv6_route_address_interface_vrf, "Table to configure\n" "The table number to configure\n" VRF_CMD_HELP_STR - "Treat the nexthop as directly attached to the interface") + "Treat the nexthop as directly attached to the interface\n") { VTY_DECLVAR_CONTEXT(vrf, vrf); struct static_vrf *svrf = vrf->info; diff --git a/tests/bgpd/test_aspath.c b/tests/bgpd/test_aspath.c index 9feec7156a..b94355e8b8 100644 --- a/tests/bgpd/test_aspath.c +++ b/tests/bgpd/test_aspath.c @@ -474,6 +474,20 @@ static struct test_segment { 14, {NULL, NULL, 0, 0, 0, 0, 0, 0}, }, + { + /* 28 */ + "BGP_AS_ZERO", + "seq(8466,3,52737,0,4096)", + {0x2, 0x5, + 0x21, 0x12, + 0x00, 0x03, + 0xce, 0x01, + 0x00, 0x00, + 0x10, 0x00}, + 12, + {"8466 3 52737 0 4096", "8466 3 52737 0 4096", 5, 0, + NOT_ALL_PRIVATE, 4096, 4, 8466}, + }, {NULL, NULL, {0}, 0, {NULL, 0, 0}}}; #define COMMON_ATTRS \ @@ -678,6 +692,21 @@ static struct aspath_tests { COMMON_ATTR_SIZE + 3, &test_segments[0], }, + /* 13 */ + { + "4b AS4_PATH: BGP_AS_ZERO", + &test_segments[28], + "8466 3 52737 0 4096", + AS4_DATA, + -1, + PEER_CAP_AS4_RCV | PEER_CAP_AS4_ADV, + { + COMMON_ATTRS, + BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_OPTIONAL, + BGP_ATTR_AS4_PATH, 22, + }, + COMMON_ATTR_SIZE + 3, + }, {NULL, NULL, NULL, 0, 0, 0, {0}, 0}, }; diff --git a/tests/bgpd/test_aspath.py b/tests/bgpd/test_aspath.py index 15ae514c87..5fa1f11629 100644 --- a/tests/bgpd/test_aspath.py +++ b/tests/bgpd/test_aspath.py @@ -52,6 +52,7 @@ TestAspath.parsertest("redundantset2") TestAspath.parsertest("zero-size overflow") TestAspath.parsertest("zero-size overflow + valid segment") TestAspath.parsertest("invalid segment type") +TestAspath.parsertest("BGP_AS_ZERO") for i in range(10): TestAspath.okfail("prepend test %d" % i) @@ -77,3 +78,4 @@ TestAspath.attrtest("4b AS_PATH: too long2") TestAspath.attrtest("4b AS_PATH: bad flags") TestAspath.attrtest("4b AS4_PATH w/o AS_PATH") TestAspath.attrtest("4b AS4_PATH: confed") +TestAspath.attrtest("4b AS4_PATH: BGP_AS_ZERO") diff --git a/tests/bgpd/test_bgp_table.c b/tests/bgpd/test_bgp_table.c index 819c2d7282..79a8bb4408 100644 --- a/tests/bgpd/test_bgp_table.c +++ b/tests/bgpd/test_bgp_table.c @@ -82,7 +82,7 @@ static void print_range_result(struct list *list) for (ALL_LIST_ELEMENTS_RO(list, listnode, bnode)) { char buf[PREFIX2STR_BUFFER]; - prefix2str(&bnode->p, buf, PREFIX2STR_BUFFER); + prefix2str(bgp_node_get_prefix(bnode), buf, PREFIX2STR_BUFFER); printf("%s\n", buf); } } @@ -106,7 +106,7 @@ static void check_lookup_result(struct list *list, va_list arglist) assert(0); for (ALL_LIST_ELEMENTS_RO(list, listnode, bnode)) { - if (prefix_same(&bnode->p, &p)) + if (prefix_same(bgp_node_get_prefix(bnode), &p)) found = true; } diff --git a/tests/bgpd/test_mp_attr.c b/tests/bgpd/test_mp_attr.c index c97ea57150..7fabaad7fa 100644 --- a/tests/bgpd/test_mp_attr.c +++ b/tests/bgpd/test_mp_attr.c @@ -951,12 +951,19 @@ static struct test_segment mp_prefix_sid[] = { "PREFIX-SID", "PREFIX-SID Test 1", { - 0x01, 0x00, 0x07, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x02, - 0x03, 0x00, 0x08, 0x00, - 0x00, 0x0a, 0x1b, 0xfe, - 0x00, 0x00, 0x0a + /* TLV[0] Latel-Index TLV */ + 0x01, /* Type 0x01:Label-Index */ + 0x00, 0x07, /* Length */ + 0x00, /* RESERVED */ + 0x00, 0x00, /* Flags */ + 0x00, 0x00, 0x00, 0x02, /* Label Index */ + + /* TLV[1] SRGB TLV */ + 0x03, /* Type 0x03:SRGB */ + 0x00, 0x08, /* Length */ + 0x00, 0x00, /* Flags */ + 0x0a, 0x1b, 0xfe, /* SRGB[0] first label */ + 0x00, 0x00, 0x0a /* SRBG[0] nb-labels in range */ }, .len = 21, .parses = SHOULD_PARSE, @@ -1027,7 +1034,7 @@ static void parse_test(struct peer *peer, struct test_segment *t, int type) parse_ret = bgp_mp_unreach_parse(&attr_args, &nlri); break; case BGP_ATTR_PREFIX_SID: - parse_ret = bgp_attr_prefix_sid(&attr_args, &nlri); + parse_ret = bgp_attr_prefix_sid(&attr_args); break; default: printf("unknown type"); diff --git a/tests/bgpd/test_peer_attr.c b/tests/bgpd/test_peer_attr.c index 422d397479..490b0ee73b 100644 --- a/tests/bgpd/test_peer_attr.c +++ b/tests/bgpd/test_peer_attr.c @@ -1387,7 +1387,7 @@ static void bgp_startup(void) zprivs_init(&bgpd_privs); master = thread_master_create(NULL); - yang_init(); + yang_init(true); nb_init(master, NULL, 0); bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE); bgp_option_set(BGP_OPT_NO_LISTEN); diff --git a/tests/helpers/c/main.c b/tests/helpers/c/main.c index 2de29cbdbc..68ed16d513 100644 --- a/tests/helpers/c/main.c +++ b/tests/helpers/c/main.c @@ -155,7 +155,7 @@ int main(int argc, char **argv) cmd_init(1); vty_init(master, false); lib_cmd_init(); - yang_init(); + yang_init(true); nb_init(master, NULL, 0); /* OSPF vty inits. */ diff --git a/tests/isisd/test_fuzz_isis_tlv_tests.h.gz b/tests/isisd/test_fuzz_isis_tlv_tests.h.gz Binary files differindex 46e45e5ee0..4f59d1d7c0 100644 --- a/tests/isisd/test_fuzz_isis_tlv_tests.h.gz +++ b/tests/isisd/test_fuzz_isis_tlv_tests.h.gz diff --git a/tests/lib/cli/common_cli.c b/tests/lib/cli/common_cli.c index e091372ab8..bd81656ca9 100644 --- a/tests/lib/cli/common_cli.c +++ b/tests/lib/cli/common_cli.c @@ -84,7 +84,7 @@ int main(int argc, char **argv) vty_init(master, false); lib_cmd_init(); - yang_init(); + yang_init(true); nb_init(master, NULL, 0); test_init(argc, argv); diff --git a/tests/lib/cli/test_commands.c b/tests/lib/cli/test_commands.c index bbdc8b238d..779a7539e9 100644 --- a/tests/lib/cli/test_commands.c +++ b/tests/lib/cli/test_commands.c @@ -142,7 +142,7 @@ static void test_init(void) struct cmd_element *cmd; cmd_init(1); - yang_init(); + yang_init(true); nb_init(master, NULL, 0); install_node(&bgp_node, NULL); diff --git a/tests/lib/northbound/test_oper_data.c b/tests/lib/northbound/test_oper_data.c index 18d3180889..786fce33f9 100644 --- a/tests/lib/northbound/test_oper_data.c +++ b/tests/lib/northbound/test_oper_data.c @@ -413,7 +413,7 @@ int main(int argc, char **argv) cmd_hostname_set("test"); vty_init(master, false); lib_cmd_init(); - yang_init(); + yang_init(true); nb_init(master, modules, array_size(modules)); /* Create artificial data. */ diff --git a/tests/lib/test_atomlist.c b/tests/lib/test_atomlist.c index 238ee9539e..40837b4722 100644 --- a/tests/lib/test_atomlist.c +++ b/tests/lib/test_atomlist.c @@ -29,6 +29,7 @@ #include "atomlist.h" #include "seqlock.h" #include "monotime.h" +#include "printfrr.h" /* * maybe test: @@ -288,7 +289,7 @@ static void run_tr(struct testrun *tr) size_t c = 0, s = 0, n = 0; struct item *item, *prev, dummy; - printf("[%02u] %35s %s\n", seqlock_cur(&sqlo) >> 2, "", desc); + printfrr("[%02u] %35s %s\n", seqlock_cur(&sqlo) >> 2, "", desc); fflush(stdout); if (tr->prefill != NOCLEAR) @@ -324,7 +325,7 @@ static void run_tr(struct testrun *tr) } assert(c == alist_count(&ahead)); } - printf("\033[1A[%02u] %9"PRId64"us c=%5zu s=%5zu n=%5zu %s\n", + printfrr("\033[1A[%02u] %9"PRId64"us c=%5zu s=%5zu n=%5zu %s\n", sv >> 2, delta, c, s, n, desc); } @@ -334,9 +335,9 @@ static void dump(const char *lbl) struct item *item, *safe; size_t ctr = 0; - printf("dumping %s:\n", lbl); + printfrr("dumping %s:\n", lbl); frr_each_safe(alist, &ahead, item) { - printf("%s %3zu %p %3"PRIu64" %3"PRIu64"\n", lbl, ctr++, + printfrr("%s %3zu %p %3"PRIu64" %3"PRIu64"\n", lbl, ctr++, (void *)item, item->val1, item->val2); } } @@ -362,12 +363,12 @@ static void basic_tests(void) dump(""); alist_del(&ahead, &itm[1]); dump(""); - printf("POP: %p\n", alist_pop(&ahead)); + printfrr("POP: %p\n", alist_pop(&ahead)); dump(""); - printf("POP: %p\n", alist_pop(&ahead)); - printf("POP: %p\n", alist_pop(&ahead)); - printf("POP: %p\n", alist_pop(&ahead)); - printf("POP: %p\n", alist_pop(&ahead)); + printfrr("POP: %p\n", alist_pop(&ahead)); + printfrr("POP: %p\n", alist_pop(&ahead)); + printfrr("POP: %p\n", alist_pop(&ahead)); + printfrr("POP: %p\n", alist_pop(&ahead)); dump(""); } #else diff --git a/tests/lib/test_srcdest_table.c b/tests/lib/test_srcdest_table.c index 0fca571d28..9d395bee89 100644 --- a/tests/lib/test_srcdest_table.c +++ b/tests/lib/test_srcdest_table.c @@ -4,7 +4,7 @@ * Copyright (C) 2017 by David Lamparter & Christian Franke, * Open Source Routing / NetDEF Inc. * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -391,8 +391,7 @@ static void test_state_del_one_route(struct test_state *test, struct prng *prng) } assert(rn); - srcdest_rnode_prefixes(rn, (const struct prefix **)&dst_p, - (const struct prefix **)&src_p); + srcdest_rnode_prefixes(rn, &dst_p, &src_p); memcpy(&dst6_p, dst_p, sizeof(dst6_p)); if (src_p) memcpy(&src6_p, src_p, sizeof(src6_p)); diff --git a/tests/lib/test_stream.c b/tests/lib/test_stream.c index 2ecfc87942..a45c2b4d54 100644 --- a/tests/lib/test_stream.c +++ b/tests/lib/test_stream.c @@ -23,6 +23,8 @@ #include <stream.h> #include <thread.h> +#include "printfrr.h" + static unsigned long long ham = 0xdeadbeefdeadbeef; struct thread_master *master; @@ -30,15 +32,15 @@ static void print_stream(struct stream *s) { size_t getp = stream_get_getp(s); - printf("endp: %zu, readable: %zu, writeable: %zu\n", stream_get_endp(s), - STREAM_READABLE(s), STREAM_WRITEABLE(s)); + printfrr("endp: %zu, readable: %zu, writeable: %zu\n", + stream_get_endp(s), STREAM_READABLE(s), STREAM_WRITEABLE(s)); while (STREAM_READABLE(s)) { - printf("0x%x ", *stream_pnt(s)); + printfrr("0x%x ", *stream_pnt(s)); stream_forward_getp(s, 1); } - printf("\n"); + printfrr("\n"); /* put getp back to where it was */ stream_set_getp(s, getp); @@ -61,10 +63,10 @@ int main(void) print_stream(s); - printf("c: 0x%hhx\n", stream_getc(s)); - printf("w: 0x%hx\n", stream_getw(s)); - printf("l: 0x%x\n", stream_getl(s)); - printf("q: 0x%" PRIx64 "\n", stream_getq(s)); + printfrr("c: 0x%hhx\n", stream_getc(s)); + printfrr("w: 0x%hx\n", stream_getw(s)); + printfrr("l: 0x%x\n", stream_getl(s)); + printfrr("q: 0x%" PRIx64 "\n", stream_getq(s)); return 0; } diff --git a/tests/lib/test_typelist.c b/tests/lib/test_typelist.c index 2438fb5f08..607e29e56b 100644 --- a/tests/lib/test_typelist.c +++ b/tests/lib/test_typelist.c @@ -35,6 +35,7 @@ #include "monotime.h" #include "jhash.h" #include "sha256.h" +#include "printfrr.h" #include "tests/helpers/c/prng.h" @@ -90,14 +91,14 @@ static void ts_ref(const char *text) { int64_t us; us = monotime_since(&ref, NULL); - printf("%7"PRId64"us %s\n", us, text); + printfrr("%7"PRId64"us %s\n", us, text); monotime(&ref); } static void ts_end(void) { int64_t us; us = monotime_since(&ref0, NULL); - printf("%7"PRId64"us total\n", us); + printfrr("%7"PRId64"us total\n", us); } #define TYPE LIST diff --git a/tests/lib/test_typelist.h b/tests/lib/test_typelist.h index 9039fa8a46..da3530e9c0 100644 --- a/tests/lib/test_typelist.h +++ b/tests/lib/test_typelist.h @@ -123,10 +123,10 @@ static void ts_hash(const char *text, const char *expect) for (i = 0; i < sizeof(hash); i++) sprintf(hashtext + i * 2, "%02x", hash[i]); - printf("%7"PRId64"us %-25s %s%s\n", us, text, + printfrr("%7"PRId64"us %-25s %s%s\n", us, text, expect ? " " : "*", hashtext); if (expect && strcmp(expect, hashtext)) { - printf("%-21s %s\n", "EXPECTED:", expect); + printfrr("%-21s %s\n", "EXPECTED:", expect); assert(0); } monotime(&ref); @@ -149,7 +149,7 @@ static void concat(test_, TYPE)(void) for (i = 0; i < NITEM; i++) itm[i].val = i; - printf("%s start\n", str(TYPE)); + printfrr("%s start\n", str(TYPE)); ts_start(); list_init(&head); @@ -530,7 +530,7 @@ static void concat(test_, TYPE)(void) list_fini(&head); ts_ref("fini"); ts_end(); - printf("%s end\n", str(TYPE)); + printfrr("%s end\n", str(TYPE)); } #undef ts_hashx diff --git a/tests/lib/test_zlog.c b/tests/lib/test_zlog.c index 790e65cfe9..07885d9847 100644 --- a/tests/lib/test_zlog.c +++ b/tests/lib/test_zlog.c @@ -34,12 +34,14 @@ static bool test_zlog_hexdump(void) unsigned int nl = 1; do { - long d[nl]; + uint8_t d[nl]; for (unsigned int i = 0; i < nl; i++) d[i] = random(); - zlog_hexdump(d, nl * sizeof(long)); - } while (++nl * sizeof(long) <= MAXDATA); + zlog_hexdump(d, nl - 1); + + nl += 1 + (nl / 2); + } while (nl <= MAXDATA); return true; } diff --git a/tests/subdir.am b/tests/subdir.am index d87d348949..5efdcbbd4c 100644 --- a/tests/subdir.am +++ b/tests/subdir.am @@ -38,8 +38,11 @@ else TESTS_OSPF6D = endif +tests/lib/cli/test_cli_clippy.c: $(CLIPPY_DEPS) tests/lib/cli/tests_lib_cli_test_cli-test_cli.$(OBJEXT): tests/lib/cli/test_cli_clippy.c tests/lib/cli/test_cli-test_cli.$(OBJEXT): tests/lib/cli/test_cli_clippy.c + +tests/ospf6d/test_lsdb_clippy.c: $(CLIPPY_DEPS) tests/ospf6d/tests_ospf6d_test_lsdb-test_lsdb.$(OBJEXT): tests/ospf6d/test_lsdb_clippy.c tests/ospf6d/test_lsdb-test_lsdb.$(OBJEXT): tests/ospf6d/test_lsdb_clippy.c @@ -87,6 +90,7 @@ check_PROGRAMS += \ endif tests/lib/cli/test_commands_defun.c: vtysh/vtysh_cmd.c + mkdir -p tests/lib/cli sed \ -e 's%"vtysh/vtysh\.h"%"tests/helpers/c/tests.h"%' \ -e 's/vtysh_init_cmd/test_init_cmd/' \ @@ -96,6 +100,7 @@ tests/lib/cli/test_commands_defun.c: vtysh/vtysh_cmd.c CLEANFILES += tests/lib/cli/test_commands_defun.c tests/isisd/test_fuzz_isis_tlv_tests.h: $(top_srcdir)/tests/isisd/test_fuzz_isis_tlv_tests.h.gz + mkdir -p tests/isisd gzip -d < $(top_srcdir)/tests/isisd/test_fuzz_isis_tlv_tests.h.gz > "$@" CLEANFILES += tests/isisd/test_fuzz_isis_tlv_tests.h diff --git a/tests/topotests/all-protocol-startup/r1/ipv4_routes.ref b/tests/topotests/all-protocol-startup/r1/ipv4_routes.ref index 61d17a61b3..993b4df6a2 100644 --- a/tests/topotests/all-protocol-startup/r1/ipv4_routes.ref +++ b/tests/topotests/all-protocol-startup/r1/ipv4_routes.ref @@ -8,16 +8,19 @@ C>* 192.168.6.0/26 is directly connected, r1-eth6, XX:XX:XX C>* 192.168.7.0/26 is directly connected, r1-eth7, XX:XX:XX C>* 192.168.8.0/26 is directly connected, r1-eth8, XX:XX:XX C>* 192.168.9.0/26 is directly connected, r1-eth9, XX:XX:XX -O 192.168.0.0/24 [110/10] is directly connected, r1-eth0, XX:XX:XX -O 192.168.3.0/26 [110/10] is directly connected, r1-eth3, XX:XX:XX -S>* 1.1.1.1/32 [1/0] is directly connected, r1-eth0, XX:XX:XX -S>* 1.1.1.2/32 [1/0] is directly connected, r1-eth1, XX:XX:XX -S>* 4.5.6.10/32 [1/0] via 192.168.0.2, r1-eth0, XX:XX:XX -S>* 4.5.6.11/32 [1/0] via 192.168.0.2, r1-eth0, XX:XX:XX -S>* 4.5.6.12/32 [1/0] is directly connected, r1-eth0, XX:XX:XX -S>* 4.5.6.13/32 [1/0] unreachable (blackhole), XX:XX:XX -S>* 4.5.6.14/32 [1/0] unreachable (blackhole), XX:XX:XX -S 4.5.6.15/32 [255/0] via 192.168.0.2, r1-eth0, XX:XX:XX -S>* 4.5.6.7/32 [1/0] unreachable (blackhole), XX:XX:XX -S>* 4.5.6.8/32 [1/0] unreachable (blackhole), XX:XX:XX -S>* 4.5.6.9/32 [1/0] unreachable (ICMP unreachable), XX:XX:XX +O 192.168.0.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX +O 192.168.3.0/26 [110/10] is directly connected, r1-eth3, weight 1, XX:XX:XX +S>* 1.1.1.1/32 [1/0] is directly connected, r1-eth0, weight 1, XX:XX:XX +S>* 1.1.1.2/32 [1/0] is directly connected, r1-eth1, weight 1, XX:XX:XX +S>* 4.5.6.10/32 [1/0] via 192.168.0.2, r1-eth0, weight 1, XX:XX:XX +S>* 4.5.6.11/32 [1/0] via 192.168.0.2, r1-eth0, weight 1, XX:XX:XX +S>* 4.5.6.12/32 [1/0] is directly connected, r1-eth0, weight 1, XX:XX:XX +S>* 4.5.6.13/32 [1/0] unreachable (blackhole), weight 1, XX:XX:XX +S>* 4.5.6.14/32 [1/0] unreachable (blackhole), weight 1, XX:XX:XX +S 4.5.6.15/32 [255/0] via 192.168.0.2, r1-eth0, weight 1, XX:XX:XX +S 4.5.6.16/32 [10/0] via 192.168.0.4, r1-eth0, weight 1, XX:XX:XX +S>* 4.5.6.16/32 [5/0] via 192.168.0.2, r1-eth0, weight 1, XX:XX:XX +S>* 4.5.6.17/32 [1/0] via 192.168.0.2, r1-eth0, weight 1, XX:XX:XX +S>* 4.5.6.7/32 [1/0] unreachable (blackhole), weight 1, XX:XX:XX +S>* 4.5.6.8/32 [1/0] unreachable (blackhole), weight 1, XX:XX:XX +S>* 4.5.6.9/32 [1/0] unreachable (ICMP unreachable), weight 1, XX:XX:XX diff --git a/tests/topotests/all-protocol-startup/r1/ipv6_routes.ref b/tests/topotests/all-protocol-startup/r1/ipv6_routes.ref index d5bc16a2bf..ef12d615dc 100644 --- a/tests/topotests/all-protocol-startup/r1/ipv6_routes.ref +++ b/tests/topotests/all-protocol-startup/r1/ipv6_routes.ref @@ -19,11 +19,11 @@ C * fe80::/64 is directly connected, r1-eth6, XX:XX:XX C * fe80::/64 is directly connected, r1-eth7, XX:XX:XX C * fe80::/64 is directly connected, r1-eth8, XX:XX:XX C * fe80::/64 is directly connected, r1-eth9, XX:XX:XX -O fc00:0:0:4::/64 [110/10] is directly connected, r1-eth4, XX:XX:XX -S>* 4:5::6:10/128 [1/0] via fc00::2, r1-eth0, XX:XX:XX -S>* 4:5::6:11/128 [1/0] via fc00::2, r1-eth0, XX:XX:XX -S>* 4:5::6:12/128 [1/0] is directly connected, r1-eth0, XX:XX:XX -S 4:5::6:15/128 [255/0] via fc00::2, r1-eth0, XX:XX:XX -S>* 4:5::6:7/128 [1/0] unreachable (blackhole), XX:XX:XX -S>* 4:5::6:8/128 [1/0] unreachable (blackhole), XX:XX:XX -S>* 4:5::6:9/128 [1/0] unreachable (ICMP unreachable), XX:XX:XX +O fc00:0:0:4::/64 [110/10] is directly connected, r1-eth4, weight 1, XX:XX:XX +S>* 4:5::6:10/128 [1/0] via fc00::2, r1-eth0, weight 1, XX:XX:XX +S>* 4:5::6:11/128 [1/0] via fc00::2, r1-eth0, weight 1, XX:XX:XX +S>* 4:5::6:12/128 [1/0] is directly connected, r1-eth0, weight 1, XX:XX:XX +S 4:5::6:15/128 [255/0] via fc00::2, r1-eth0, weight 1, XX:XX:XX +S>* 4:5::6:7/128 [1/0] unreachable (blackhole), weight 1, XX:XX:XX +S>* 4:5::6:8/128 [1/0] unreachable (blackhole), weight 1, XX:XX:XX +S>* 4:5::6:9/128 [1/0] unreachable (ICMP unreachable), weight 1, XX:XX:XX diff --git a/tests/topotests/all-protocol-startup/r1/zebra.conf b/tests/topotests/all-protocol-startup/r1/zebra.conf index fbf827604f..f283590ddf 100644 --- a/tests/topotests/all-protocol-startup/r1/zebra.conf +++ b/tests/topotests/all-protocol-startup/r1/zebra.conf @@ -26,11 +26,20 @@ ipv6 route 4:5::6:12/128 r1-eth0 # by zebra but not installed. ip route 4.5.6.15/32 192.168.0.2 255 ipv6 route 4:5::6:15/128 fc00:0:0:0::2 255 - # Routes to put into a nexthop-group ip route 1.1.1.1/32 r1-eth0 ip route 1.1.1.2/32 r1-eth1 +# Create a route that has overlapping distance +# so we have backups +ip route 4.5.6.16/32 192.168.0.2 5 +ip route 4.5.6.16/32 192.168.0.4 10 + +# Create routes that have different tags +# and how we handle it +ip route 4.5.6.17/32 192.168.0.2 tag 9000 +ip route 4.5.6.17/32 192.168.0.2 tag 10000 + ! interface r1-eth0 description to sw0 - no routing protocol diff --git a/tests/topotests/all-protocol-startup/test_all_protocol_startup.py b/tests/topotests/all-protocol-startup/test_all_protocol_startup.py index 16609221c1..a671e14e07 100755 --- a/tests/topotests/all-protocol-startup/test_all_protocol_startup.py +++ b/tests/topotests/all-protocol-startup/test_all_protocol_startup.py @@ -307,7 +307,7 @@ def test_converge_protocols(): expected = open(v4_routesFile).read().rstrip() expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) - actual = net['r%s' %i].cmd('vtysh -c "show ip route" | /usr/bin/tail -n +7 | sort 2> /dev/null').rstrip() + actual = net['r%s' %i].cmd('vtysh -c "show ip route" | /usr/bin/tail -n +7 | env LC_ALL=en_US.UTF-8 sort 2> /dev/null').rstrip() # Drop time in last update actual = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", actual) actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) @@ -329,7 +329,7 @@ def test_converge_protocols(): expected = open(v6_routesFile).read().rstrip() expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) - actual = net['r%s' %i].cmd('vtysh -c "show ipv6 route" | /usr/bin/tail -n +7 | sort 2> /dev/null').rstrip() + actual = net['r%s' %i].cmd('vtysh -c "show ipv6 route" | /usr/bin/tail -n +7 | env LC_ALL=en_US.UTF-8 sort 2> /dev/null').rstrip() # Drop time in last update actual = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", actual) actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) diff --git a/tests/topotests/bfd-bgp-cbit-topo3/r1/bgp_ipv6_routes_down.json b/tests/topotests/bfd-bgp-cbit-topo3/r1/bgp_ipv6_routes_down.json index 54ae57f7be..ac5fd04074 100644 --- a/tests/topotests/bfd-bgp-cbit-topo3/r1/bgp_ipv6_routes_down.json +++ b/tests/topotests/bfd-bgp-cbit-topo3/r1/bgp_ipv6_routes_down.json @@ -14,7 +14,6 @@ "prefix": "2001:db8:6::", "prefixLen": 64, "network": "2001:db8:6::\/64", - "med": 0, "metric": 0, "weight": 0, "peerId": "2001:db8:4::1", @@ -37,7 +36,6 @@ "prefix": "2001:db8:7::", "prefixLen": 64, "network": "2001:db8:7::\/64", - "med": 0, "metric": 0, "weight": 0, "peerId": "2001:db8:4::1", @@ -60,7 +58,6 @@ "prefix": "2001:db8:8::", "prefixLen": 64, "network": "2001:db8:8::\/64", - "med": 0, "metric": 0, "weight": 32768, "peerId": "(unspec)", @@ -83,7 +80,6 @@ "prefix": "2001:db8:9::", "prefixLen": 64, "network": "2001:db8:9::\/64", - "med": 0, "metric": 0, "weight": 32768, "peerId": "(unspec)", diff --git a/tests/topotests/bfd-bgp-cbit-topo3/r3/bgp_ipv6_routes_down.json b/tests/topotests/bfd-bgp-cbit-topo3/r3/bgp_ipv6_routes_down.json index a3bb222504..ab42b05e85 100644 --- a/tests/topotests/bfd-bgp-cbit-topo3/r3/bgp_ipv6_routes_down.json +++ b/tests/topotests/bfd-bgp-cbit-topo3/r3/bgp_ipv6_routes_down.json @@ -13,7 +13,6 @@ "prefix": "2001:db8:6::", "prefixLen": 64, "network": "2001:db8:6::\/64", - "med": 0, "metric": 0, "weight": 32768, "peerId": "(unspec)", @@ -36,7 +35,6 @@ "prefix": "2001:db8:7::", "prefixLen": 64, "network": "2001:db8:7::\/64", - "med": 0, "metric": 0, "weight": 32768, "peerId": "(unspec)", diff --git a/tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py b/tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py index 59858d6fd3..186dac31a0 100755 --- a/tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py +++ b/tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py @@ -33,7 +33,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -44,23 +44,26 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class BFDTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 4 routers. for routern in range(1, 4): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) def setup_module(mod): "Sets up the pytest environment" @@ -71,16 +74,13 @@ def setup_module(mod): for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)), + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)), ) router.load_config( - TopoRouter.RD_BFD, - os.path.join(CWD, '{}/bfdd.conf'.format(rname)) + TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) # Initialize all routers. @@ -90,10 +90,11 @@ def setup_module(mod): # daemon exists. for router in router_list.values(): # Check for Version - if router.has_version('<', '5.1'): - tgen.set_error('Unsupported FRR version') + if router.has_version("<", "5.1"): + tgen.set_error("Unsupported FRR version") break + def teardown_module(_mod): "Teardown the pytest environment" tgen = get_topogen() @@ -112,17 +113,17 @@ def test_protocols_convergence(): # Check IPv6 routing tables. logger.info("Checking IPv6 routes for convergence") for router in tgen.routers().values(): - if router.name == 'r2': + if router.name == "r2": continue - json_file = '{}/{}/ipv6_routes.json'.format(CWD, router.name) + json_file = "{}/{}/ipv6_routes.json".format(CWD, router.name) if not os.path.isfile(json_file): - logger.info('skipping file {}'.format(json_file)) + logger.info("skipping file {}".format(json_file)) continue expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ipv6 route json', expected) - _, result = topotest.run_and_expect(test_func, None, count=40, - wait=0.5) + test_func = partial( + topotest.router_json_cmp, router, "show ipv6 route json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg @@ -133,19 +134,21 @@ def test_bfd_connection(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bfd peers to go up') + logger.info("waiting for bfd peers to go up") for router in tgen.routers().values(): - if router.name == 'r2': + if router.name == "r2": continue - json_file = '{}/{}/peers.json'.format(CWD, router.name) + json_file = "{}/{}/peers.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show bfd peers json', expected) - _, result = topotest.run_and_expect(test_func, None, count=8, wait=0.5) + test_func = partial( + topotest.router_json_cmp, router, "show bfd peers json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=32, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg + def test_bfd_loss_intermediate(): """ Assert that BFD notices the bfd link down failure. @@ -155,94 +158,98 @@ def test_bfd_loss_intermediate(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('removing IPv6 address from r2 to simulate loss of connectivity') + logger.info("removing IPv6 address from r2 to simulate loss of connectivity") # Disable r2-eth0 ipv6 address - cmd = 'vtysh -c \"configure terminal\" -c \"interface r2-eth1\" -c "no ipv6 address 2001:db8:4::2/64\"' - tgen.net['r2'].cmd(cmd) - + cmd = 'vtysh -c "configure terminal" -c "interface r2-eth1" -c "no ipv6 address 2001:db8:4::2/64"' + tgen.net["r2"].cmd(cmd) + # Wait the minimum time we can before checking that BGP/BFD # converged. - logger.info('waiting for BFD converge down') + logger.info("waiting for BFD converge down") # Check that BGP converged quickly. for router in tgen.routers().values(): - if router.name == 'r2': + if router.name == "r2": continue - json_file = '{}/{}/peers_down.json'.format(CWD, router.name) + json_file = "{}/{}/peers_down.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show bfd peers json', expected) - _, result = topotest.run_and_expect(test_func, None, count=8, wait=0.5) + test_func = partial( + topotest.router_json_cmp, router, "show bfd peers json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=32, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg - logger.info('waiting for BGP entries to become stale') + logger.info("waiting for BGP entries to become stale") for router in tgen.routers().values(): - if router.name == 'r2': + if router.name == "r2": continue - json_file = '{}/{}/bgp_ipv6_routes_down.json'.format(CWD, router.name) + json_file = "{}/{}/bgp_ipv6_routes_down.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show bgp ipv6 json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show bgp ipv6 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=50, wait=1) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg logger.info("Checking IPv6 routes on r1 should still be present") for router in tgen.routers().values(): - if router.name == 'r2': + if router.name == "r2": continue - if router.name == 'r3': + if router.name == "r3": continue - json_file = '{}/r1/ipv6_routes.json'.format(CWD) + json_file = "{}/r1/ipv6_routes.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ipv6 route json', expected) - _, result = topotest.run_and_expect(test_func, None, count=30, - wait=0.5) + test_func = partial( + topotest.router_json_cmp, router, "show ipv6 route json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg + def test_bfd_comes_back_again(): """ Assert that BFD notices the bfd link up and that ipv6 entries appear back """ tgen = get_topogen() - logger.info('re-adding IPv6 address from r2 to simulate connectivity is back') + logger.info("re-adding IPv6 address from r2 to simulate connectivity is back") # adds back r2-eth0 ipv6 address - cmd = 'vtysh -c \"configure terminal\" -c \"interface r2-eth1\" -c "ipv6 address 2001:db8:4::2/64\"' - tgen.net['r2'].cmd(cmd) + cmd = 'vtysh -c "configure terminal" -c "interface r2-eth1" -c "ipv6 address 2001:db8:4::2/64"' + tgen.net["r2"].cmd(cmd) # Wait the minimum time we can before checking that BGP/BFD # converged. - logger.info('waiting for BFD to converge up') + logger.info("waiting for BFD to converge up") # Check that BGP converged quickly. for router in tgen.routers().values(): - if router.name == 'r2': + if router.name == "r2": continue - json_file = '{}/{}/peers.json'.format(CWD, router.name) + json_file = "{}/{}/peers.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show bfd peers json', expected) - _, result = topotest.run_and_expect(test_func, None, count=8, wait=0.5) + test_func = partial( + topotest.router_json_cmp, router, "show bfd peers json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=16, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg - + def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bfd-topo1/r1/bgp_prefixes.json b/tests/topotests/bfd-topo1/r1/bgp_prefixes.json index 4b2cc1ad62..1262f5e984 100644 --- a/tests/topotests/bfd-topo1/r1/bgp_prefixes.json +++ b/tests/topotests/bfd-topo1/r1/bgp_prefixes.json @@ -2,7 +2,7 @@ "routes": { "10.254.254.2/32": [ { - "aspath": "102", + "path": "102", "prefix": "10.254.254.2", "valid": true, "peerId": "192.168.0.2", @@ -18,7 +18,7 @@ ], "10.254.254.3/32": [ { - "aspath": "102 103", + "path": "102 103", "prefix": "10.254.254.3", "valid": true, "peerId": "192.168.0.2", @@ -34,7 +34,7 @@ ], "10.254.254.4/32": [ { - "aspath": "102 104", + "path": "102 104", "prefix": "10.254.254.4", "valid": true, "peerId": "192.168.0.2", diff --git a/tests/topotests/bfd-topo1/r2/bgp_prefixes.json b/tests/topotests/bfd-topo1/r2/bgp_prefixes.json index 39f3c0a835..0d47c0fc30 100644 --- a/tests/topotests/bfd-topo1/r2/bgp_prefixes.json +++ b/tests/topotests/bfd-topo1/r2/bgp_prefixes.json @@ -2,7 +2,7 @@ "routes": { "10.254.254.1/32": [ { - "aspath": "101", + "path": "101", "prefix": "10.254.254.1", "valid": true, "peerId": "192.168.0.1", @@ -18,7 +18,7 @@ ], "10.254.254.3/32": [ { - "aspath": "103", + "path": "103", "prefix": "10.254.254.3", "valid": true, "peerId": "192.168.1.1", @@ -34,7 +34,7 @@ ], "10.254.254.4/32": [ { - "aspath": "104", + "path": "104", "prefix": "10.254.254.4", "valid": true, "peerId": "192.168.2.1", diff --git a/tests/topotests/bfd-topo1/r3/bgp_prefixes.json b/tests/topotests/bfd-topo1/r3/bgp_prefixes.json index c92d4e052a..36fca17bbf 100644 --- a/tests/topotests/bfd-topo1/r3/bgp_prefixes.json +++ b/tests/topotests/bfd-topo1/r3/bgp_prefixes.json @@ -2,7 +2,7 @@ "routes": { "10.254.254.1/32": [ { - "aspath": "102 101", + "path": "102 101", "prefix": "10.254.254.1", "valid": true, "peerId": "192.168.1.2", @@ -18,7 +18,7 @@ ], "10.254.254.2/32": [ { - "aspath": "102", + "path": "102", "prefix": "10.254.254.2", "valid": true, "peerId": "192.168.1.2", @@ -34,7 +34,7 @@ ], "10.254.254.4/32": [ { - "aspath": "102 104", + "path": "102 104", "prefix": "10.254.254.4", "valid": true, "peerId": "192.168.1.2", diff --git a/tests/topotests/bfd-topo1/r4/bgp_prefixes.json b/tests/topotests/bfd-topo1/r4/bgp_prefixes.json index cc8510dd61..efe7d47b1a 100644 --- a/tests/topotests/bfd-topo1/r4/bgp_prefixes.json +++ b/tests/topotests/bfd-topo1/r4/bgp_prefixes.json @@ -2,7 +2,7 @@ "routes": { "10.254.254.1/32": [ { - "aspath": "102 101", + "path": "102 101", "prefix": "10.254.254.1", "valid": true, "peerId": "192.168.2.2", @@ -18,7 +18,7 @@ ], "10.254.254.2/32": [ { - "aspath": "102", + "path": "102", "prefix": "10.254.254.2", "valid": true, "peerId": "192.168.2.2", @@ -34,7 +34,7 @@ ], "10.254.254.3/32": [ { - "aspath": "102 103", + "path": "102 103", "prefix": "10.254.254.3", "valid": true, "peerId": "192.168.2.2", diff --git a/tests/topotests/bfd-topo1/test_bfd_topo1.py b/tests/topotests/bfd-topo1/test_bfd_topo1.py index 4fd4f97436..e1865dc5a8 100644 --- a/tests/topotests/bfd-topo1/test_bfd_topo1.py +++ b/tests/topotests/bfd-topo1/test_bfd_topo1.py @@ -34,7 +34,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -45,27 +45,29 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class BFDTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 4 routers for routern in range(1, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r4']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r4"]) def setup_module(mod): @@ -76,16 +78,13 @@ def setup_module(mod): router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BFD, - os.path.join(CWD, '{}/bfdd.conf'.format(rname)) + TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) # Initialize all routers. @@ -95,8 +94,8 @@ def setup_module(mod): # daemon exists. for router in router_list.values(): # Check for Version - if router.has_version('<', '5.1'): - tgen.set_error('Unsupported FRR version') + if router.has_version("<", "5.1"): + tgen.set_error("Unsupported FRR version") break @@ -112,14 +111,15 @@ def test_bfd_connection(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bfd peers to go up') + logger.info("waiting for bfd peers to go up") for router in tgen.routers().values(): - json_file = '{}/{}/peers.json'.format(CWD, router.name) + json_file = "{}/{}/peers.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show bfd peers json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show bfd peers json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=8, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg @@ -131,15 +131,16 @@ def test_bgp_convergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bgp peers to go up') + logger.info("waiting for bgp peers to go up") for router in tgen.routers().values(): - ref_file = '{}/{}/bgp_summary.json'.format(CWD, router.name) + ref_file = "{}/{}/bgp_summary.json".format(CWD, router.name) expected = json.loads(open(ref_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ip bgp summary json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show ip bgp summary json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=125, wait=1.0) - assertmsg = '{}: bgp did not converge'.format(router.name) + assertmsg = "{}: bgp did not converge".format(router.name) assert res is None, assertmsg @@ -149,15 +150,16 @@ def test_bgp_fast_convergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bgp peers converge') + logger.info("waiting for bgp peers converge") for router in tgen.routers().values(): - ref_file = '{}/{}/bgp_prefixes.json'.format(CWD, router.name) + ref_file = "{}/{}/bgp_prefixes.json".format(CWD, router.name) expected = json.loads(open(ref_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ip bgp json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show ip bgp json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=40, wait=0.5) - assertmsg = '{}: bgp did not converge'.format(router.name) + assertmsg = "{}: bgp did not converge".format(router.name) assert res is None, assertmsg @@ -171,29 +173,30 @@ def test_bfd_fast_convergence(): pytest.skip(tgen.errors) # Disable r1-eth0 link. - tgen.gears['r1'].link_enable('r1-eth0', enabled=False) + tgen.gears["r1"].link_enable("r1-eth0", enabled=False) # Wait the minimum time we can before checking that BGP/BFD # converged. - logger.info('waiting for BFD converge') + logger.info("waiting for BFD converge") # Check that BGP converged quickly. for router in tgen.routers().values(): - json_file = '{}/{}/peers.json'.format(CWD, router.name) + json_file = "{}/{}/peers.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) # Load the same file as previous test, but expect R1 to be down. - if router.name == 'r1': + if router.name == "r1": for peer in expected: - if peer['peer'] == '192.168.0.2': - peer['status'] = 'down' + if peer["peer"] == "192.168.0.2": + peer["status"] = "down" else: for peer in expected: - if peer['peer'] == '192.168.0.1': - peer['status'] = 'down' + if peer["peer"] == "192.168.0.1": + peer["status"] = "down" - test_func = partial(topotest.router_json_cmp, - router, 'show bfd peers json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show bfd peers json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=20, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert res is None, assertmsg @@ -205,31 +208,27 @@ def test_bgp_fast_reconvergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for BGP re convergence') + logger.info("waiting for BGP re convergence") # Check that BGP converged quickly. for router in tgen.routers().values(): - ref_file = '{}/{}/bgp_prefixes.json'.format(CWD, router.name) + ref_file = "{}/{}/bgp_prefixes.json".format(CWD, router.name) expected = json.loads(open(ref_file).read()) # Load the same file as previous test, but set networks to None # to test absence. - if router.name == 'r1': - expected['routes']['10.254.254.2/32'] = None - expected['routes']['10.254.254.3/32'] = None - expected['routes']['10.254.254.4/32'] = None + if router.name == "r1": + expected["routes"]["10.254.254.2/32"] = None + expected["routes"]["10.254.254.3/32"] = None + expected["routes"]["10.254.254.4/32"] = None else: - expected['routes']['10.254.254.1/32'] = None - - test_func = partial(topotest.router_json_cmp, - router, 'show ip bgp json', expected) - _, res = topotest.run_and_expect( - test_func, - None, - count=3, - wait=1 + expected["routes"]["10.254.254.1/32"] = None + + test_func = partial( + topotest.router_json_cmp, router, "show ip bgp json", expected ) - assertmsg = '{}: bgp did not converge'.format(router.name) + _, res = topotest.run_and_expect(test_func, None, count=3, wait=1) + assertmsg = "{}: bgp did not converge".format(router.name) assert res is None, assertmsg @@ -237,11 +236,11 @@ def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bfd-topo2/test_bfd_topo2.py b/tests/topotests/bfd-topo2/test_bfd_topo2.py index 773db129f0..3e87e8485a 100644 --- a/tests/topotests/bfd-topo2/test_bfd_topo2.py +++ b/tests/topotests/bfd-topo2/test_bfd_topo2.py @@ -35,7 +35,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -49,25 +49,26 @@ from mininet.topo import Topo class BFDTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 4 routers. for routern in range(1, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r4']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r4"]) def setup_module(mod): @@ -78,24 +79,19 @@ def setup_module(mod): router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BFD, - os.path.join(CWD, '{}/bfdd.conf'.format(rname)) + TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_OSPF, - os.path.join(CWD, '{}/ospfd.conf'.format(rname)) + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_OSPF6, - os.path.join(CWD, '{}/ospf6d.conf'.format(rname)) + TopoRouter.RD_OSPF6, os.path.join(CWD, "{}/ospf6d.conf".format(rname)) ) # Initialize all routers. @@ -105,8 +101,8 @@ def setup_module(mod): # daemon exists. for router in router_list.values(): # Check for Version - if router.has_version('<', '5.1'): - tgen.set_error('Unsupported FRR version') + if router.has_version("<", "5.1"): + tgen.set_error("Unsupported FRR version") break @@ -128,32 +124,32 @@ def test_protocols_convergence(): # Check IPv4 routing tables. logger.info("Checking IPv4 routes for convergence") for router in tgen.routers().values(): - json_file = '{}/{}/ipv4_routes.json'.format(CWD, router.name) + json_file = "{}/{}/ipv4_routes.json".format(CWD, router.name) if not os.path.isfile(json_file): - logger.info('skipping file {}'.format(json_file)) + logger.info("skipping file {}".format(json_file)) continue expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ip route json', expected) - _, result = topotest.run_and_expect(test_func, None, count=160, - wait=0.5) + test_func = partial( + topotest.router_json_cmp, router, "show ip route json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg # Check IPv6 routing tables. logger.info("Checking IPv6 routes for convergence") for router in tgen.routers().values(): - json_file = '{}/{}/ipv6_routes.json'.format(CWD, router.name) + json_file = "{}/{}/ipv6_routes.json".format(CWD, router.name) if not os.path.isfile(json_file): - logger.info('skipping file {}'.format(json_file)) + logger.info("skipping file {}".format(json_file)) continue expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ipv6 route json', expected) - _, result = topotest.run_and_expect(test_func, None, count=160, - wait=0.5) + test_func = partial( + topotest.router_json_cmp, router, "show ipv6 route json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg @@ -164,14 +160,15 @@ def test_bfd_connection(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bfd peers to go up') + logger.info("waiting for bfd peers to go up") for router in tgen.routers().values(): - json_file = '{}/{}/peers.json'.format(CWD, router.name) + json_file = "{}/{}/peers.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show bfd peers json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show bfd peers json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=8, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg @@ -181,11 +178,11 @@ def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bfd-vrf-topo1/r1/bgp_prefixes.json b/tests/topotests/bfd-vrf-topo1/r1/bgp_prefixes.json index 4b2cc1ad62..1262f5e984 100644 --- a/tests/topotests/bfd-vrf-topo1/r1/bgp_prefixes.json +++ b/tests/topotests/bfd-vrf-topo1/r1/bgp_prefixes.json @@ -2,7 +2,7 @@ "routes": { "10.254.254.2/32": [ { - "aspath": "102", + "path": "102", "prefix": "10.254.254.2", "valid": true, "peerId": "192.168.0.2", @@ -18,7 +18,7 @@ ], "10.254.254.3/32": [ { - "aspath": "102 103", + "path": "102 103", "prefix": "10.254.254.3", "valid": true, "peerId": "192.168.0.2", @@ -34,7 +34,7 @@ ], "10.254.254.4/32": [ { - "aspath": "102 104", + "path": "102 104", "prefix": "10.254.254.4", "valid": true, "peerId": "192.168.0.2", diff --git a/tests/topotests/bfd-vrf-topo1/r2/bgp_prefixes.json b/tests/topotests/bfd-vrf-topo1/r2/bgp_prefixes.json index 39f3c0a835..0d47c0fc30 100644 --- a/tests/topotests/bfd-vrf-topo1/r2/bgp_prefixes.json +++ b/tests/topotests/bfd-vrf-topo1/r2/bgp_prefixes.json @@ -2,7 +2,7 @@ "routes": { "10.254.254.1/32": [ { - "aspath": "101", + "path": "101", "prefix": "10.254.254.1", "valid": true, "peerId": "192.168.0.1", @@ -18,7 +18,7 @@ ], "10.254.254.3/32": [ { - "aspath": "103", + "path": "103", "prefix": "10.254.254.3", "valid": true, "peerId": "192.168.1.1", @@ -34,7 +34,7 @@ ], "10.254.254.4/32": [ { - "aspath": "104", + "path": "104", "prefix": "10.254.254.4", "valid": true, "peerId": "192.168.2.1", diff --git a/tests/topotests/bfd-vrf-topo1/r3/bgp_prefixes.json b/tests/topotests/bfd-vrf-topo1/r3/bgp_prefixes.json index c92d4e052a..36fca17bbf 100644 --- a/tests/topotests/bfd-vrf-topo1/r3/bgp_prefixes.json +++ b/tests/topotests/bfd-vrf-topo1/r3/bgp_prefixes.json @@ -2,7 +2,7 @@ "routes": { "10.254.254.1/32": [ { - "aspath": "102 101", + "path": "102 101", "prefix": "10.254.254.1", "valid": true, "peerId": "192.168.1.2", @@ -18,7 +18,7 @@ ], "10.254.254.2/32": [ { - "aspath": "102", + "path": "102", "prefix": "10.254.254.2", "valid": true, "peerId": "192.168.1.2", @@ -34,7 +34,7 @@ ], "10.254.254.4/32": [ { - "aspath": "102 104", + "path": "102 104", "prefix": "10.254.254.4", "valid": true, "peerId": "192.168.1.2", diff --git a/tests/topotests/bfd-vrf-topo1/r4/bgp_prefixes.json b/tests/topotests/bfd-vrf-topo1/r4/bgp_prefixes.json index cc8510dd61..efe7d47b1a 100644 --- a/tests/topotests/bfd-vrf-topo1/r4/bgp_prefixes.json +++ b/tests/topotests/bfd-vrf-topo1/r4/bgp_prefixes.json @@ -2,7 +2,7 @@ "routes": { "10.254.254.1/32": [ { - "aspath": "102 101", + "path": "102 101", "prefix": "10.254.254.1", "valid": true, "peerId": "192.168.2.2", @@ -18,7 +18,7 @@ ], "10.254.254.2/32": [ { - "aspath": "102", + "path": "102", "prefix": "10.254.254.2", "valid": true, "peerId": "192.168.2.2", @@ -34,7 +34,7 @@ ], "10.254.254.3/32": [ { - "aspath": "102 103", + "path": "102 103", "prefix": "10.254.254.3", "valid": true, "peerId": "192.168.2.2", diff --git a/tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py b/tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py index e2933820bd..eb4f0d4a83 100755 --- a/tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py +++ b/tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py @@ -35,7 +35,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -46,27 +46,29 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class BFDTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 4 routers for routern in range(1, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r4']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r4"]) def setup_module(mod): @@ -78,47 +80,50 @@ def setup_module(mod): # check for zebra capability for rname, router in router_list.iteritems(): - if router.check_capability( - TopoRouter.RD_ZEBRA, - '--vrfwnetns' - ) == False: - return pytest.skip('Skipping BFD Topo1 VRF NETNS feature. VRF NETNS backend not available on FRR') - - if os.system('ip netns list') != 0: - return pytest.skip('Skipping BFD Topo1 VRF NETNS Test. NETNS not available on System') - - logger.info('Testing with VRF Namespace support') - - cmds = ['if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi', - 'ip netns add {0}-cust1', - 'ip link set dev {0}-eth0 netns {0}-cust1', - 'ip netns exec {0}-cust1 ifconfig {0}-eth0 up'] - cmds2 = ['ip link set dev {0}-eth1 netns {0}-cust1', - 'ip netns exec {0}-cust1 ifconfig {0}-eth1 up', - 'ip link set dev {0}-eth2 netns {0}-cust1', - 'ip netns exec {0}-cust1 ifconfig {0}-eth2 up'] + if router.check_capability(TopoRouter.RD_ZEBRA, "--vrfwnetns") == False: + return pytest.skip( + "Skipping BFD Topo1 VRF NETNS feature. VRF NETNS backend not available on FRR" + ) + + if os.system("ip netns list") != 0: + return pytest.skip( + "Skipping BFD Topo1 VRF NETNS Test. NETNS not available on System" + ) + + logger.info("Testing with VRF Namespace support") + + cmds = [ + "if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi", + "ip netns add {0}-cust1", + "ip link set dev {0}-eth0 netns {0}-cust1", + "ip netns exec {0}-cust1 ifconfig {0}-eth0 up", + ] + cmds2 = [ + "ip link set dev {0}-eth1 netns {0}-cust1", + "ip netns exec {0}-cust1 ifconfig {0}-eth1 up", + "ip link set dev {0}-eth2 netns {0}-cust1", + "ip netns exec {0}-cust1 ifconfig {0}-eth2 up", + ] for rname, router in router_list.iteritems(): # create VRF rx-cust1 and link rx-eth0 to rx-cust1 for cmd in cmds: output = tgen.net[rname].cmd(cmd.format(rname)) - if rname == 'r2': + if rname == "r2": for cmd in cmds2: output = tgen.net[rname].cmd(cmd.format(rname)) for rname, router in router_list.iteritems(): router.load_config( TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)), - '--vrfwnetns' + os.path.join(CWD, "{}/zebra.conf".format(rname)), + "--vrfwnetns", ) router.load_config( - TopoRouter.RD_BFD, - os.path.join(CWD, '{}/bfdd.conf'.format(rname)) + TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) # Initialize all routers. @@ -128,42 +133,49 @@ def setup_module(mod): # daemon exists. for router in router_list.values(): # Check for Version - if router.has_version('<', '5.1'): - tgen.set_error('Unsupported FRR version') + if router.has_version("<", "5.1"): + tgen.set_error("Unsupported FRR version") break + def teardown_module(_mod): "Teardown the pytest environment" tgen = get_topogen() # move back rx-eth0 to default VRF # delete rx-vrf - cmds = ['ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1', - 'ip netns delete {0}-cust1'] - cmds2 = ['ip netns exec {0}-cust1 ip link set {0}-eth1 netns 1', - 'ip netns exec {0}-cust2 ip link set {0}-eth1 netns 1'] + cmds = [ + "ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1", + "ip netns delete {0}-cust1", + ] + cmds2 = [ + "ip netns exec {0}-cust1 ip link set {0}-eth1 netns 1", + "ip netns exec {0}-cust2 ip link set {0}-eth1 netns 1", + ] router_list = tgen.routers() for rname, router in router_list.iteritems(): - if rname == 'r2': + if rname == "r2": for cmd in cmds2: tgen.net[rname].cmd(cmd.format(rname)) for cmd in cmds: tgen.net[rname].cmd(cmd.format(rname)) tgen.stop_topology() + def test_bfd_connection(): "Assert that the BFD peers can find themselves." tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bfd peers to go up') + logger.info("waiting for bfd peers to go up") for router in tgen.routers().values(): - json_file = '{}/{}/peers.json'.format(CWD, router.name) + json_file = "{}/{}/peers.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show bfd peers json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show bfd peers json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=8, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg @@ -175,15 +187,19 @@ def test_bgp_convergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bgp peers to go up') + logger.info("waiting for bgp peers to go up") for router in tgen.routers().values(): - ref_file = '{}/{}/bgp_summary.json'.format(CWD, router.name) + ref_file = "{}/{}/bgp_summary.json".format(CWD, router.name) expected = json.loads(open(ref_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ip bgp vrf {}-cust1 summary json'.format(router.name), expected) + test_func = partial( + topotest.router_json_cmp, + router, + "show ip bgp vrf {}-cust1 summary json".format(router.name), + expected, + ) _, res = topotest.run_and_expect(test_func, None, count=125, wait=1.0) - assertmsg = '{}: bgp did not converge'.format(router.name) + assertmsg = "{}: bgp did not converge".format(router.name) assert res is None, assertmsg @@ -193,15 +209,19 @@ def test_bgp_fast_convergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bgp peers converge') + logger.info("waiting for bgp peers converge") for router in tgen.routers().values(): - ref_file = '{}/{}/bgp_prefixes.json'.format(CWD, router.name) + ref_file = "{}/{}/bgp_prefixes.json".format(CWD, router.name) expected = json.loads(open(ref_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ip bgp vrf {}-cust1 json'.format(router.name), expected) + test_func = partial( + topotest.router_json_cmp, + router, + "show ip bgp vrf {}-cust1 json".format(router.name), + expected, + ) _, res = topotest.run_and_expect(test_func, None, count=40, wait=0.5) - assertmsg = '{}: bgp did not converge'.format(router.name) + assertmsg = "{}: bgp did not converge".format(router.name) assert res is None, assertmsg @@ -215,30 +235,33 @@ def test_bfd_fast_convergence(): pytest.skip(tgen.errors) # Disable r2-eth0 link - router2 = tgen.gears['r2'] - topotest.interface_set_status(router2, 'r2-eth0', ifaceaction=False, vrf_name='r2-cust1') + router2 = tgen.gears["r2"] + topotest.interface_set_status( + router2, "r2-eth0", ifaceaction=False, vrf_name="r2-cust1" + ) # Wait the minimum time we can before checking that BGP/BFD # converged. - logger.info('waiting for BFD converge') + logger.info("waiting for BFD converge") # Check that BGP converged quickly. for router in tgen.routers().values(): - json_file = '{}/{}/peers.json'.format(CWD, router.name) + json_file = "{}/{}/peers.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) # Load the same file as previous test, but expect R1 to be down. - if router.name == 'r1': + if router.name == "r1": for peer in expected: - if peer['peer'] == '192.168.0.2': - peer['status'] = 'down' + if peer["peer"] == "192.168.0.2": + peer["status"] = "down" else: for peer in expected: - if peer['peer'] == '192.168.0.1': - peer['status'] = 'down' + if peer["peer"] == "192.168.0.1": + peer["status"] = "down" - test_func = partial(topotest.router_json_cmp, - router, 'show bfd peers json', expected) + test_func = partial( + topotest.router_json_cmp, router, "show bfd peers json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=20, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert res is None, assertmsg @@ -250,31 +273,30 @@ def test_bgp_fast_reconvergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for BGP re convergence') + logger.info("waiting for BGP re convergence") # Check that BGP converged quickly. for router in tgen.routers().values(): - ref_file = '{}/{}/bgp_prefixes.json'.format(CWD, router.name) + ref_file = "{}/{}/bgp_prefixes.json".format(CWD, router.name) expected = json.loads(open(ref_file).read()) # Load the same file as previous test, but set networks to None # to test absence. - if router.name == 'r1': - expected['routes']['10.254.254.2/32'] = None - expected['routes']['10.254.254.3/32'] = None - expected['routes']['10.254.254.4/32'] = None + if router.name == "r1": + expected["routes"]["10.254.254.2/32"] = None + expected["routes"]["10.254.254.3/32"] = None + expected["routes"]["10.254.254.4/32"] = None else: - expected['routes']['10.254.254.1/32'] = None - - test_func = partial(topotest.router_json_cmp, - router, 'show ip bgp vrf {}-cust1 json'.format(router.name), expected) - _, res = topotest.run_and_expect( - test_func, - None, - count=3, - wait=1 + expected["routes"]["10.254.254.1/32"] = None + + test_func = partial( + topotest.router_json_cmp, + router, + "show ip bgp vrf {}-cust1 json".format(router.name), + expected, ) - assertmsg = '{}: bgp did not converge'.format(router.name) + _, res = topotest.run_and_expect(test_func, None, count=3, wait=1) + assertmsg = "{}: bgp did not converge".format(router.name) assert res is None, assertmsg @@ -282,11 +304,11 @@ def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py index e99111d90b..3441d68731 100755 --- a/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py +++ b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py @@ -48,8 +48,8 @@ from copy import deepcopy # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) -sys.path.append(os.path.join(CWD, '../lib/')) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) # Required to instantiate the topology builder class. @@ -59,26 +59,38 @@ from lib.topogen import Topogen, get_topogen from mininet.topo import Topo from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, reset_config_on_routers, create_static_routes, - verify_rib, verify_admin_distance_for_static_routes + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + create_static_routes, + verify_rib, + verify_admin_distance_for_static_routes, ) from lib.topolog import logger from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, verify_router_id, - modify_as_number, verify_as_numbers, clear_bgp_and_verify, - verify_bgp_timers_and_functionality + verify_bgp_convergence, + create_router_bgp, + verify_router_id, + modify_as_number, + verify_as_numbers, + clear_bgp_and_verify, + verify_bgp_timers_and_functionality, ) from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology creation jsonFile = "{}/bgp_basic_functionality.json".format(CWD) try: - with open(jsonFile, 'r') as topoJson: + with open(jsonFile, "r") as topoJson: topo = json.load(topoJson) except IOError: assert False, "Could not read file {}".format(jsonFile) +# Global Variable +KEEPALIVETIMER = 2 +HOLDDOWNTIMER = 6 + class CreateTopo(Topo): """ @@ -121,8 +133,9 @@ def setup_module(mod): global BGP_CONVERGENCE BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) - assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}". \ - format(BGP_CONVERGENCE) + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format( + BGP_CONVERGENCE + ) logger.info("Running setup_module() done") @@ -137,8 +150,9 @@ def teardown_module(): # Stop toplogy and Remove tmp files tgen.stop_topology() - logger.info("Testsuite end time: {}". - format(time.asctime(time.localtime(time.time())))) + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) logger.info("=" * 40) @@ -154,7 +168,7 @@ def test_modify_and_delete_router_id(request): tgen = get_topogen() if BGP_CONVERGENCE is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -162,59 +176,31 @@ def test_modify_and_delete_router_id(request): # Modify router id input_dict = { - 'r1': { - "bgp": { - 'router_id': '12.12.12.12' - } - }, - 'r2': { - "bgp": { - 'router_id': '22.22.22.22' - } - }, - 'r3': { - "bgp": { - 'router_id': '33.33.33.33' - } - }, + "r1": {"bgp": {"router_id": "12.12.12.12"}}, + "r2": {"bgp": {"router_id": "22.22.22.22"}}, + "r3": {"bgp": {"router_id": "33.33.33.33"}}, } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}".\ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Verifying router id once modified result = verify_router_id(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}".\ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Delete router id input_dict = { - 'r1': { - "bgp": { - 'del_router_id': True - } - }, - 'r2': { - "bgp": { - 'del_router_id': True - } - }, - 'r3': { - "bgp": { - 'del_router_id': True - } - }, + "r1": {"bgp": {"del_router_id": True}}, + "r2": {"bgp": {"del_router_id": True}}, + "r3": {"bgp": {"del_router_id": True}}, } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Verifying router id once deleted # Once router-id is deleted, highest interface ip should become # router-id result = verify_router_id(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -226,41 +212,94 @@ def test_bgp_config_with_4byte_as_number(request): tgen = get_topogen() if BGP_CONVERGENCE is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name write_test_header(tc_name) input_dict = { - "r1": { - "bgp": { - "local_as": 131079 - } - }, - "r2": { - "bgp": { - "local_as": 131079 - } - }, - "r3": { - "bgp": { - "local_as": 131079 - } - }, - "r4": { - "bgp": { - "local_as": 131080 - } - } + "r1": {"bgp": {"local_as": 131079}}, + "r2": {"bgp": {"local_as": 131079}}, + "r3": {"bgp": {"local_as": 131079}}, + "r4": {"bgp": {"local_as": 131080}}, } result = modify_as_number(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) result = verify_as_numbers(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_BGP_config_with_invalid_ASN_p2(request): + """ + Configure BGP with invalid ASN(ex - 0, reserved ASN) and verify test case + ended up with error + """ + + tgen = get_topogen() + global BGP_CONVERGENCE + + if BGP_CONVERGENCE != True: + pytest.skip("skipped because of BGP Convergence failure") + + # test case name + tc_name = request.node.name + write_test_header(tc_name) + + # Api call to modify AS number + input_dict = { + "r1": {"bgp": {"local_as": 0,}}, + "r2": {"bgp": {"local_as": 0,}}, + "r3": {"bgp": {"local_as": 0,}}, + "r4": {"bgp": {"local_as": 64000,}}, + } + result = modify_as_number(tgen, topo, input_dict) + try: + assert result is True + except AssertionError: + logger.info("Expected behaviour: {}".format(result)) + logger.info("BGP config is not created because of invalid ASNs") + + write_test_footer(tc_name) + + +def test_BGP_config_with_2byteAS_and_4byteAS_number_p1(request): + """ + Configure BGP with 4 byte and 2 byte ASN and verify BGP is converged + """ + + tgen = get_topogen() + global BGP_CONVERGENCE + + if BGP_CONVERGENCE != True: + pytest.skip("skipped because of BGP Convergence failure") + + # test case name + tc_name = request.node.name + write_test_header(tc_name) + + # Api call to modify AS number + input_dict = { + "r1": {"bgp": {"local_as": 131079}}, + "r2": {"bgp": {"local_as": 131079}}, + "r3": {"bgp": {"local_as": 131079}}, + "r4": {"bgp": {"local_as": 111}}, + } + result = modify_as_number(tgen, topo, input_dict) + if result != True: + assert False, "Testcase " + tc_name + " :Failed \n Error: {}".format(result) + + result = verify_as_numbers(tgen, topo, input_dict) + if result != True: + assert False, "Testcase " + tc_name + " :Failed \n Error: {}".format(result) + + # Api call verify whether BGP is converged + result = verify_bgp_convergence(tgen, topo) + if result != True: + assert False, "Testcase " + tc_name + " :Failed \n Error: {}".format(result) write_test_footer(tc_name) @@ -272,7 +311,7 @@ def test_bgp_timers_functionality(request): tgen = get_topogen() if BGP_CONVERGENCE is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -290,10 +329,10 @@ def test_bgp_timers_functionality(request): "unicast": { "neighbor": { "r2": { - "dest_link":{ + "dest_link": { "r1": { - "keepalivetimer": 60, - "holddowntimer": 180, + "keepalivetimer": KEEPALIVETIMER, + "holddowntimer": HOLDDOWNTIMER, } } } @@ -305,28 +344,24 @@ def test_bgp_timers_functionality(request): } } result = create_router_bgp(tgen, topo, deepcopy(input_dict)) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Api call to clear bgp, so timer modification would take place - clear_bgp_and_verify(tgen, topo, 'r1') + clear_bgp_and_verify(tgen, topo, "r1") # Verifying bgp timers functionality result = verify_bgp_timers_and_functionality(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) - - def test_static_routes(request): """ Test to create and verify static routes. """ tgen = get_topogen() if BGP_CONVERGENCE is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -338,17 +373,18 @@ def test_static_routes(request): # Api call to create static routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 9, - "admin_distance": 100, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + { + "network": "10.0.20.1/32", + "no_of_ip": 9, + "admin_distance": 100, + "next_hop": "10.0.0.2", + } + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes input_dict_1 = { @@ -359,7 +395,7 @@ def test_static_routes(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -369,17 +405,16 @@ def test_static_routes(request): } result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes - dut = 'r3' - protocol = 'bgp' - next_hop = ['10.0.0.2', '10.0.0.5'] - result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop, - protocol=protocol) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + dut = "r3" + protocol = "bgp" + next_hop = ["10.0.0.2", "10.0.0.5"] + result = verify_rib( + tgen, "ipv4", dut, input_dict, next_hop=next_hop, protocol=protocol + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -389,7 +424,7 @@ def test_admin_distance_for_existing_static_routes(request): tgen = get_topogen() if BGP_CONVERGENCE is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -400,21 +435,21 @@ def test_admin_distance_for_existing_static_routes(request): input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "admin_distance": 10, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + { + "network": "10.0.20.1/32", + "admin_distance": 10, + "next_hop": "10.0.0.2", + } + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Verifying admin distance once modified result = verify_admin_distance_for_static_routes(tgen, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -424,7 +459,7 @@ def test_advertise_network_using_network_command(request): tgen = get_topogen() if BGP_CONVERGENCE is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -441,14 +476,8 @@ def test_advertise_network_using_network_command(request): "ipv4": { "unicast": { "advertise_networks": [ - { - "network": "20.0.0.0/32", - "no_of_network": 10 - }, - { - "network": "30.0.0.0/32", - "no_of_network": 10 - } + {"network": "20.0.0.0/32", "no_of_network": 10}, + {"network": "30.0.0.0/32", "no_of_network": 10}, ] } } @@ -458,15 +487,13 @@ def test_advertise_network_using_network_command(request): } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes - dut = 'r2' + dut = "r2" protocol = "bgp" - result = verify_rib(tgen, 'ipv4', dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -479,7 +506,7 @@ def test_clear_bgp_and_verify(request): tgen = get_topogen() if BGP_CONVERGENCE is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -489,9 +516,8 @@ def test_clear_bgp_and_verify(request): reset_config_on_routers(tgen) # clear ip bgp - result = clear_bgp_and_verify(tgen, topo, 'r1') - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + result = clear_bgp_and_verify(tgen, topo, "r1") + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -508,7 +534,7 @@ def test_bgp_with_loopback_interface(request): tgen = get_topogen() if BGP_CONVERGENCE is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -517,79 +543,51 @@ def test_bgp_with_loopback_interface(request): # Creating configuration from JSON reset_config_on_routers(tgen) - for routerN in sorted(topo['routers'].keys()): - for bgp_neighbor in \ - topo['routers'][routerN]['bgp']['address_family']['ipv4'][ - 'unicast']['neighbor'].keys(): + for routerN in sorted(topo["routers"].keys()): + for bgp_neighbor in topo["routers"][routerN]["bgp"]["address_family"]["ipv4"][ + "unicast" + ]["neighbor"].keys(): # Adding ['source_link'] = 'lo' key:value pair - topo['routers'][routerN]['bgp']['address_family']['ipv4'][ - 'unicast']['neighbor'][bgp_neighbor]["dest_link"] = { - 'lo': { - "source_link": "lo", - } - } + topo["routers"][routerN]["bgp"]["address_family"]["ipv4"]["unicast"][ + "neighbor" + ][bgp_neighbor]["dest_link"] = {"lo": {"source_link": "lo",}} # Creating configuration from JSON build_config_from_json(tgen, topo) input_dict = { "r1": { - "static_routes": [{ - "network": "1.0.2.17/32", - "next_hop": "10.0.0.2" - }, - { - "network": "1.0.3.17/32", - "next_hop": "10.0.0.6" - } + "static_routes": [ + {"network": "1.0.2.17/32", "next_hop": "10.0.0.2"}, + {"network": "1.0.3.17/32", "next_hop": "10.0.0.6"}, ] }, "r2": { - "static_routes": [{ - "network": "1.0.1.17/32", - "next_hop": "10.0.0.1" - }, - { - "network": "1.0.3.17/32", - "next_hop": "10.0.0.10" - } + "static_routes": [ + {"network": "1.0.1.17/32", "next_hop": "10.0.0.1"}, + {"network": "1.0.3.17/32", "next_hop": "10.0.0.10"}, ] }, "r3": { - "static_routes": [{ - "network": "1.0.1.17/32", - "next_hop": "10.0.0.5" - }, - { - "network": "1.0.2.17/32", - "next_hop": "10.0.0.9" - }, - { - "network": "1.0.4.17/32", - "next_hop": "10.0.0.14" - } + "static_routes": [ + {"network": "1.0.1.17/32", "next_hop": "10.0.0.5"}, + {"network": "1.0.2.17/32", "next_hop": "10.0.0.9"}, + {"network": "1.0.4.17/32", "next_hop": "10.0.0.14"}, ] }, - "r4": { - "static_routes": [{ - "network": "1.0.3.17/32", - "next_hop": "10.0.0.13" - }] - } + "r4": {"static_routes": [{"network": "1.0.3.17/32", "next_hop": "10.0.0.13"}]}, } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) # Api call verify whether BGP is converged result = verify_bgp_convergence(tgen, topo) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp-ecmp-topo1/peer1/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer1/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer1/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer1/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer1/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer1/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer1/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer1/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer10/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer10/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer10/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer10/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer10/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer10/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer10/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer10/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer11/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer11/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer11/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer11/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer11/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer11/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer11/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer11/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer12/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer12/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer12/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer12/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer12/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer12/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer12/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer12/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer13/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer13/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer13/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer13/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer13/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer13/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer13/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer13/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer14/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer14/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer14/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer14/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer14/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer14/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer14/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer14/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer15/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer15/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer15/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer15/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer15/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer15/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer15/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer15/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer16/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer16/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer16/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer16/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer16/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer16/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer16/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer16/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer17/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer17/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer17/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer17/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer17/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer17/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer17/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer17/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer18/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer18/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer18/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer18/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer18/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer18/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer18/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer18/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer19/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer19/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer19/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer19/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer19/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer19/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer19/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer19/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer2/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer2/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer2/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer2/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer2/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer2/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer2/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer2/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer20/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer20/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer20/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer20/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer20/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer20/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer20/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer20/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer3/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer3/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer3/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer3/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer3/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer3/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer3/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer3/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer4/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer4/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer4/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer4/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer4/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer4/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer4/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer4/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer5/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer5/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer5/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer5/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer5/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer5/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer5/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer5/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer6/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer6/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer6/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer6/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer6/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer6/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer6/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer6/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer7/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer7/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer7/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer7/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer7/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer7/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer7/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer7/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer8/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer8/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer8/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer8/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer8/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer8/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer8/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer8/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/peer9/exa-receive.py b/tests/topotests/bgp-ecmp-topo1/peer9/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer9/exa-receive.py +++ b/tests/topotests/bgp-ecmp-topo1/peer9/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp-ecmp-topo1/peer9/exa-send.py b/tests/topotests/bgp-ecmp-topo1/peer9/exa-send.py index 647c254250..d9ae3d1906 100755 --- a/tests/topotests/bgp-ecmp-topo1/peer9/exa-send.py +++ b/tests/topotests/bgp-ecmp-topo1/peer9/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -13,37 +13,54 @@ sleep(5) # 2nd arg is number of routes to send peer = int(argv[1]) numRoutes = int(argv[2]) -if (peer <= 10): +if peer <= 10: asnum = 99 else: - asnum = peer+100 + asnum = peer + 100 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n" + % (i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes per PE - different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() # Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS for i in range(0, numRoutes): - stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum)) + stdout.write( + "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum) + ) stdout.flush() # Announce 2 different route per peer -stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100)) -stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum)) +stdout.write( + "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100) +) +stdout.write( + "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n" + % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum) +) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp-ecmp-topo1/r1/summary.txt b/tests/topotests/bgp-ecmp-topo1/r1/summary.txt index bccc483d52..11611d041b 100644 --- a/tests/topotests/bgp-ecmp-topo1/r1/summary.txt +++ b/tests/topotests/bgp-ecmp-topo1/r1/summary.txt @@ -9,121 +9,121 @@ "10.0.1.101":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.1.102":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.1.103":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.1.104":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.1.105":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.106":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.107":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.108":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.109":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.110":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.111":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.112":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.113":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.114":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.115":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.116":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.117":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.118":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.119":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.120":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" } }, diff --git a/tests/topotests/bgp-ecmp-topo1/r1/summary20.txt b/tests/topotests/bgp-ecmp-topo1/r1/summary20.txt index 73ae256abe..f90aedb1ec 100644 --- a/tests/topotests/bgp-ecmp-topo1/r1/summary20.txt +++ b/tests/topotests/bgp-ecmp-topo1/r1/summary20.txt @@ -8,121 +8,121 @@ "10.0.1.101":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.1.102":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.1.103":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.1.104":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.1.105":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.106":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.107":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.108":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.109":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.2.110":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.111":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.112":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.113":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.114":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.3.115":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.116":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.117":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.118":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.119":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" }, "10.0.4.120":{ "outq":0, "inq":0, - "prefixReceivedCount":42, + "pfxRcd":42, "state":"Established" } }, diff --git a/tests/topotests/bgp-ecmp-topo1/test_bgp_ecmp_topo1.py b/tests/topotests/bgp-ecmp-topo1/test_bgp_ecmp_topo1.py index d806226dff..c37f818b0f 100755 --- a/tests/topotests/bgp-ecmp-topo1/test_bgp_ecmp_topo1.py +++ b/tests/topotests/bgp-ecmp-topo1/test_bgp_ecmp_topo1.py @@ -34,7 +34,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -61,23 +61,24 @@ class BGPECMPTopo1(Topo): tgen = get_topogen(self) # Create the BGP router - router = tgen.add_router('r1') + router = tgen.add_router("r1") # Setup Switches - 1 switch per 5 peering routers for swNum in range(1, (total_ebgp_peers + 4) / 5 + 1): - switch = tgen.add_switch('s{}'.format(swNum)) + switch = tgen.add_switch("s{}".format(swNum)) switch.add_link(router) # Add 'total_ebgp_peers' number of eBGP ExaBGP neighbors - for peerNum in range(1, total_ebgp_peers+1): - swNum = ((peerNum - 1) / 5 + 1) + for peerNum in range(1, total_ebgp_peers + 1): + swNum = (peerNum - 1) / 5 + 1 - peer_ip = '10.0.{}.{}'.format(swNum, peerNum + 100) - peer_route = 'via 10.0.{}.1'.format(swNum) - peer = tgen.add_exabgp_peer('peer{}'.format(peerNum), - ip=peer_ip, defaultRoute=peer_route) + peer_ip = "10.0.{}.{}".format(swNum, peerNum + 100) + peer_route = "via 10.0.{}.1".format(swNum) + peer = tgen.add_exabgp_peer( + "peer{}".format(peerNum), ip=peer_ip, defaultRoute=peer_route + ) - switch = tgen.gears['s{}'.format(swNum)] + switch = tgen.gears["s{}".format(swNum)] switch.add_link(peer) @@ -87,6 +88,7 @@ class BGPECMPTopo1(Topo): # ##################################################### + def setup_module(module): tgen = Topogen(BGPECMPTopo1, module.__name__) tgen.start_topology() @@ -95,21 +97,19 @@ def setup_module(module): router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) router.start() # Starting Hosts and init ExaBGP on each of them - topotest.sleep(10, 'starting BGP on all {} peers'.format(total_ebgp_peers)) + topotest.sleep(10, "starting BGP on all {} peers".format(total_ebgp_peers)) peer_list = tgen.exabgp_peers() for pname, peer in peer_list.iteritems(): peer_dir = os.path.join(CWD, pname) - env_file = os.path.join(CWD, 'exabgp.env') + env_file = os.path.join(CWD, "exabgp.env") peer.start(peer_dir, env_file) logger.info(pname) @@ -128,11 +128,11 @@ def test_bgp_convergence(): pytest.skip(tgen.errors) # Expected result - router = tgen.gears['r1'] - if router.has_version('<', '3.0'): - reffile = os.path.join(CWD, 'r1/summary20.txt') + router = tgen.gears["r1"] + if router.has_version("<", "3.0"): + reffile = os.path.join(CWD, "r1/summary20.txt") else: - reffile = os.path.join(CWD, 'r1/summary.txt') + reffile = os.path.join(CWD, "r1/summary.txt") expected = json.loads(open(reffile).read()) @@ -142,18 +142,19 @@ def test_bgp_convergence(): with 'json') and compare with `data` contents. """ output = router.vtysh_cmd(cmd, isjson=True) - if 'ipv4Unicast' in output: - output['ipv4Unicast']['vrfName'] = \ - output['ipv4Unicast']['vrfName'].replace( - 'default', 'Default') - elif 'vrfName' in output: - output['vrfName'] = output['vrfName'].replace('default', 'Default') + if "ipv4Unicast" in output: + output["ipv4Unicast"]["vrfName"] = output["ipv4Unicast"]["vrfName"].replace( + "default", "Default" + ) + elif "vrfName" in output: + output["vrfName"] = output["vrfName"].replace("default", "Default") return topotest.json_cmp(output, data) test_func = functools.partial( - _output_summary_cmp, router, 'show ip bgp summary json', expected) + _output_summary_cmp, router, "show ip bgp summary json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assertmsg = 'BGP router network did not converge' + assertmsg = "BGP router network did not converge" assert res is None, assertmsg @@ -165,26 +166,26 @@ def test_bgp_ecmp(): pytest.skip(tgen.errors) expect = { - 'routerId': '10.0.255.1', - 'routes': { - }, + "routerId": "10.0.255.1", + "routes": {}, } for net in range(1, 5): for subnet in range(0, 10): - netkey = '10.20{}.{}.0/24'.format(net, subnet) - expect['routes'][netkey] = [] + netkey = "10.20{}.{}.0/24".format(net, subnet) + expect["routes"][netkey] = [] for _ in range(0, 10): - peer = {'multipath': True, 'valid': True} - expect['routes'][netkey].append(peer) + peer = {"multipath": True, "valid": True} + expect["routes"][netkey].append(peer) - test_func = functools.partial(topotest.router_json_cmp, - tgen.gears['r1'], 'show ip bgp json', expect) + test_func = functools.partial( + topotest.router_json_cmp, tgen.gears["r1"], "show ip bgp json", expect + ) _, res = topotest.run_and_expect(test_func, None, count=10, wait=0.5) assertmsg = 'expected multipath routes in "show ip bgp" output' assert res is None, assertmsg -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py b/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py index 4b9f419bf2..fd3e7fd7d3 100755 --- a/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py +++ b/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py @@ -41,10 +41,11 @@ import sys import time import json import pytest + # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) -sys.path.append(os.path.join(CWD, '../../')) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -52,15 +53,17 @@ from lib.topogen import Topogen, get_topogen from mininet.topo import Topo from lib.common_config import ( - start_topology, write_test_header, + start_topology, + write_test_header, write_test_footer, - verify_rib, create_static_routes, check_address_types, - interface_status, reset_config_on_routers + verify_rib, + create_static_routes, + check_address_types, + interface_status, + reset_config_on_routers, ) from lib.topolog import logger -from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp_and_verify) +from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology and configuration creation @@ -130,27 +133,32 @@ def setup_module(mod): ADDR_TYPES = check_address_types() BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) - assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:" - " {}".format(BGP_CONVERGENCE)) - - link_data = [val for links, val in - topo["routers"]["r2"]["links"].iteritems() - if "r3" in links] + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format( + BGP_CONVERGENCE + ) + + link_data = [ + val + for links, val in topo["routers"]["r2"]["links"].iteritems() + if "r3" in links + ] for adt in ADDR_TYPES: NEXT_HOPS[adt] = [val[adt].split("/")[0] for val in link_data] if adt == "ipv4": - NEXT_HOPS[adt] = sorted( - NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2])) + NEXT_HOPS[adt] = sorted(NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2])) elif adt == "ipv6": NEXT_HOPS[adt] = sorted( - NEXT_HOPS[adt], key=lambda x: int(x.split(':')[-3], 16)) + NEXT_HOPS[adt], key=lambda x: int(x.split(":")[-3], 16) + ) INTF_LIST_R2 = [val["interface"].split("/")[0] for val in link_data] INTF_LIST_R2 = sorted(INTF_LIST_R2, key=lambda x: int(x.split("eth")[1])) - link_data = [val for links, val in - topo["routers"]["r3"]["links"].iteritems() - if "r2" in links] + link_data = [ + val + for links, val in topo["routers"]["r3"]["links"].iteritems() + if "r2" in links + ] INTF_LIST_R3 = [val["interface"].split("/")[0] for val in link_data] INTF_LIST_R3 = sorted(INTF_LIST_R3, key=lambda x: int(x.split("eth")[1])) @@ -179,40 +187,27 @@ def static_or_nw(tgen, topo, tc_name, test_type, dut): input_dict_static = { dut: { "static_routes": [ - { - "network": NETWORK["ipv4"], - "next_hop": NEXT_HOP_IP["ipv4"] - }, - { - "network": NETWORK["ipv6"], - "next_hop": NEXT_HOP_IP["ipv6"] - } + {"network": NETWORK["ipv4"], "next_hop": NEXT_HOP_IP["ipv4"]}, + {"network": NETWORK["ipv6"], "next_hop": NEXT_HOP_IP["ipv6"]}, ] } } logger.info("Configuring static route on router %s", dut) result = create_static_routes(tgen, input_dict_static) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_2 = { dut: { "bgp": { "address_family": { "ipv4": { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } + "unicast": {"redistribute": [{"redist_type": "static"}]} }, "ipv6": { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } - } + "unicast": {"redistribute": [{"redist_type": "static"}]} + }, } } } @@ -221,7 +216,8 @@ def static_or_nw(tgen, topo, tc_name, test_type, dut): logger.info("Configuring redistribute static route on router %s", dut) result = create_router_bgp(tgen, topo, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) elif test_type == "advertise_nw": input_dict_nw = { @@ -230,28 +226,29 @@ def static_or_nw(tgen, topo, tc_name, test_type, dut): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": NETWORK["ipv4"]} - ] + "advertise_networks": [{"network": NETWORK["ipv4"]}] } }, "ipv6": { "unicast": { - "advertise_networks": [ - {"network": NETWORK["ipv6"]} - ] + "advertise_networks": [{"network": NETWORK["ipv6"]}] } - } + }, } } } } - logger.info("Advertising networks %s %s from router %s", - NETWORK["ipv4"], NETWORK["ipv6"], dut) + logger.info( + "Advertising networks %s %s from router %s", + NETWORK["ipv4"], + NETWORK["ipv6"], + dut, + ) result = create_router_bgp(tgen, topo, input_dict_nw) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) @pytest.mark.parametrize("ecmp_num", ["8", "16", "32"]) @@ -274,20 +271,8 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type): "r3": { "bgp": { "address_family": { - "ipv4": { - "unicast": { - "maximum_paths": { - "ebgp": ecmp_num, - } - } - }, - "ipv6": { - "unicast": { - "maximum_paths": { - "ebgp": ecmp_num, - } - } - } + "ipv4": {"unicast": {"maximum_paths": {"ebgp": ecmp_num,}}}, + "ipv6": {"unicast": {"maximum_paths": {"ebgp": ecmp_num,}}}, } } } @@ -295,35 +280,33 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type): logger.info("Configuring bgp maximum-paths %s on router r3", ecmp_num) result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" for addr_type in ADDR_TYPES: - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) -def test_ecmp_after_clear_bgp(request): +@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"]) +def test_ecmp_after_clear_bgp(request, test_type): """ Verify BGP table and RIB in DUT after clear BGP routes and neighbors""" tc_name = request.node.name @@ -336,46 +319,41 @@ def test_ecmp_after_clear_bgp(request): dut = "r3" protocol = "bgp" - static_or_nw(tgen, topo, tc_name, "redist_static", "r2") + static_or_nw(tgen, topo, tc_name, test_type, "r2") for addr_type in ADDR_TYPES: - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Clear bgp result = clear_bgp_and_verify(tgen, topo, dut) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -395,22 +373,20 @@ def test_ecmp_remove_redistribute_static(request): # Verifying RIB routes dut = "r3" protocol = "bgp" - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_2 = { "r2": { @@ -418,22 +394,14 @@ def test_ecmp_remove_redistribute_static(request): "address_family": { "ipv4": { "unicast": { - "redistribute": [{ - "redist_type": "static", - "delete": True - - }] + "redistribute": [{"redist_type": "static", "delete": True}] } }, "ipv6": { "unicast": { - "redistribute": [{ - "redist_type": "static", - "delete": True - - }] + "redistribute": [{"redist_type": "static", "delete": True}] } - } + }, } } } @@ -441,88 +409,68 @@ def test_ecmp_remove_redistribute_static(request): logger.info("Remove redistribute static") result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: # Verifying RIB routes dut = "r3" protocol = "bgp" - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3 are deleted", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=[], protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=[], + protocol=protocol, + expected=False, + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name) logger.info("Enable redistribute static") input_dict_2 = { "r2": { "bgp": { "address_family": { - "ipv4": { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } - }, - "ipv6": { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } - } + "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}}, + "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}}, } } } } result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: # Verifying RIB routes dut = "r3" protocol = "bgp" - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) -def test_ecmp_shut_bgp_neighbor(request): - """ - Disable/Shut selected paths nexthops and verify other next are installed in - the RIB of DUT. Enable interfaces and verify RIB count. - - Shut BGP neigbors one by one and verify BGP and routing table updated - accordingly in DUT - """ +@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"]) +def test_ecmp_shut_bgp_neighbor(request, test_type): + """ Shut BGP neigbors one by one and verify BGP and routing table updated + accordingly in DUT """ tc_name = request.node.name write_test_header(tc_name) @@ -534,40 +482,33 @@ def test_ecmp_shut_bgp_neighbor(request): protocol = "bgp" reset_config_on_routers(tgen) - static_or_nw(tgen, topo, tc_name, "redist_static", "r2") + static_or_nw(tgen, topo, tc_name, test_type, "r2") for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - for intf_num in range(len(INTF_LIST_R2)+1, 16): - intf_val = INTF_LIST_R2[intf_num:intf_num+16] + for intf_num in range(len(INTF_LIST_R2) + 1, 16): + intf_val = INTF_LIST_R2[intf_num : intf_num + 16] - input_dict_1 = { - "r2": { - "interface_list": [intf_val], - "status": "down" - } - } - logger.info("Shutting down neighbor interface {} on r2". - format(intf_val)) + input_dict_1 = {"r2": {"interface_list": [intf_val], "status": "down"}} + logger.info("Shutting down neighbor interface {} on r2".format(intf_val)) result = interface_status(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for addr_type in ADDR_TYPES: if intf_num + 16 < 32: @@ -575,52 +516,37 @@ def test_ecmp_shut_bgp_neighbor(request): else: check_hops = [] - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=check_hops, - protocol=protocol) + result = verify_rib( + tgen, addr_type, dut, input_dict, next_hop=check_hops, protocol=protocol + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - input_dict_1 = { - "r2": { - "interface_list": INTF_LIST_R2, - "status": "up" - } - } + input_dict_1 = {"r2": {"interface_list": INTF_LIST_R2, "status": "up"}} logger.info("Enabling all neighbor interface {} on r2") result = interface_status(tgen, topo, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) - static_or_nw(tgen, topo, tc_name, "redist_static", "r2") + static_or_nw(tgen, topo, tc_name, test_type, "r2") for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -643,22 +569,20 @@ def test_ecmp_remove_static_route(request): static_or_nw(tgen, topo, tc_name, "redist_static", "r2") for addr_type in ADDR_TYPES: - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) result = verify_rib( - tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], protocol=protocol) + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for addr_type in ADDR_TYPES: input_dict_2 = { @@ -667,7 +591,7 @@ def test_ecmp_remove_static_route(request): { "network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type], - "delete": True + "delete": True, } ] } @@ -676,23 +600,29 @@ def test_ecmp_remove_static_route(request): logger.info("Remove static routes") result = create_static_routes(tgen, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) logger.info("Verifying %s routes on r3 are removed", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_2, - next_hop=[], protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_2, + next_hop=[], + protocol=protocol, + expected=False, + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name) for addr_type in ADDR_TYPES: # Enable static routes input_dict_4 = { "r2": { "static_routes": [ - { - "network": NETWORK[addr_type], - "next_hop": NEXT_HOP_IP[addr_type] - } + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} ] } } @@ -700,14 +630,21 @@ def test_ecmp_remove_static_route(request): logger.info("Enable static route") result = create_static_routes(tgen, input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_4, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) def test_ecmp_remove_nw_advertise(request): @@ -727,22 +664,20 @@ def test_ecmp_remove_nw_advertise(request): reset_config_on_routers(tgen) static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2") for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_3 = { "r2": { @@ -750,64 +685,59 @@ def test_ecmp_remove_nw_advertise(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [{ - "network": NETWORK["ipv4"], - "delete": True - }] - } - }, + "advertise_networks": [ + {"network": NETWORK["ipv4"], "delete": True} + ] + } + }, "ipv6": { "unicast": { - "advertise_networks": [{ - "network": NETWORK["ipv6"], - "delete": True - }] - } + "advertise_networks": [ + {"network": NETWORK["ipv6"], "delete": True} + ] } - } + }, } } } + } logger.info("Withdraw advertised networks") result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=[], protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=[], + protocol=protocol, + expected=False, + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name) static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2") for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) diff --git a/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py b/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py index a9f18ed1fa..94ffc71ef6 100755 --- a/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py +++ b/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py @@ -41,10 +41,11 @@ import sys import time import json import pytest + # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) -sys.path.append(os.path.join(CWD, '../../')) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -52,15 +53,17 @@ from lib.topogen import Topogen, get_topogen from mininet.topo import Topo from lib.common_config import ( - start_topology, write_test_header, + start_topology, + write_test_header, write_test_footer, - verify_rib, create_static_routes, check_address_types, - interface_status, reset_config_on_routers + verify_rib, + create_static_routes, + check_address_types, + interface_status, + reset_config_on_routers, ) from lib.topolog import logger -from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp_and_verify) +from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology and configuration creation @@ -131,27 +134,32 @@ def setup_module(mod): for addr_type in ADDR_TYPES: BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) - assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:" - " {}".format(BGP_CONVERGENCE)) - - link_data = [val for links, val in - topo["routers"]["r2"]["links"].iteritems() - if "r3" in links] + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format( + BGP_CONVERGENCE + ) + + link_data = [ + val + for links, val in topo["routers"]["r2"]["links"].iteritems() + if "r3" in links + ] for adt in ADDR_TYPES: NEXT_HOPS[adt] = [val[adt].split("/")[0] for val in link_data] if adt == "ipv4": - NEXT_HOPS[adt] = sorted( - NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2])) + NEXT_HOPS[adt] = sorted(NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2])) elif adt == "ipv6": NEXT_HOPS[adt] = sorted( - NEXT_HOPS[adt], key=lambda x: int(x.split(':')[-3], 16)) + NEXT_HOPS[adt], key=lambda x: int(x.split(":")[-3], 16) + ) INTF_LIST_R2 = [val["interface"].split("/")[0] for val in link_data] INTF_LIST_R2 = sorted(INTF_LIST_R2, key=lambda x: int(x.split("eth")[1])) - link_data = [val for links, val in - topo["routers"]["r3"]["links"].iteritems() - if "r2" in links] + link_data = [ + val + for links, val in topo["routers"]["r3"]["links"].iteritems() + if "r2" in links + ] INTF_LIST_R3 = [val["interface"].split("/")[0] for val in link_data] INTF_LIST_R3 = sorted(INTF_LIST_R3, key=lambda x: int(x.split("eth")[1])) @@ -180,40 +188,27 @@ def static_or_nw(tgen, topo, tc_name, test_type, dut): input_dict_static = { dut: { "static_routes": [ - { - "network": NETWORK["ipv4"], - "next_hop": NEXT_HOP_IP["ipv4"] - }, - { - "network": NETWORK["ipv6"], - "next_hop": NEXT_HOP_IP["ipv6"] - } + {"network": NETWORK["ipv4"], "next_hop": NEXT_HOP_IP["ipv4"]}, + {"network": NETWORK["ipv6"], "next_hop": NEXT_HOP_IP["ipv6"]}, ] } } logger.info("Configuring static route on router %s", dut) result = create_static_routes(tgen, input_dict_static) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_2 = { dut: { "bgp": { "address_family": { "ipv4": { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } + "unicast": {"redistribute": [{"redist_type": "static"}]} }, "ipv6": { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } - } + "unicast": {"redistribute": [{"redist_type": "static"}]} + }, } } } @@ -222,7 +217,8 @@ def static_or_nw(tgen, topo, tc_name, test_type, dut): logger.info("Configuring redistribute static route on router %s", dut) result = create_router_bgp(tgen, topo, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) elif test_type == "advertise_nw": input_dict_nw = { @@ -231,28 +227,29 @@ def static_or_nw(tgen, topo, tc_name, test_type, dut): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": NETWORK["ipv4"]} - ] + "advertise_networks": [{"network": NETWORK["ipv4"]}] } }, "ipv6": { "unicast": { - "advertise_networks": [ - {"network": NETWORK["ipv6"]} - ] + "advertise_networks": [{"network": NETWORK["ipv6"]}] } - } + }, } } } } - logger.info("Advertising networks %s %s from router %s", - NETWORK["ipv4"], NETWORK["ipv6"], dut) + logger.info( + "Advertising networks %s %s from router %s", + NETWORK["ipv4"], + NETWORK["ipv6"], + dut, + ) result = create_router_bgp(tgen, topo, input_dict_nw) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) @pytest.mark.parametrize("ecmp_num", ["8", "16", "32"]) @@ -275,20 +272,8 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type): "r3": { "bgp": { "address_family": { - "ipv4": { - "unicast": { - "maximum_paths": { - "ibgp": ecmp_num, - } - } - }, - "ipv6": { - "unicast": { - "maximum_paths": { - "ibgp": ecmp_num, - } - } - } + "ipv4": {"unicast": {"maximum_paths": {"ibgp": ecmp_num,}}}, + "ipv6": {"unicast": {"maximum_paths": {"ibgp": ecmp_num,}}}, } } } @@ -296,35 +281,33 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type): logger.info("Configuring bgp maximum-paths %s on router r3", ecmp_num) result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" for addr_type in ADDR_TYPES: - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) -def test_ecmp_after_clear_bgp(request): +@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"]) +def test_ecmp_after_clear_bgp(request, test_type): """ Verify BGP table and RIB in DUT after clear BGP routes and neighbors""" tc_name = request.node.name @@ -337,46 +320,41 @@ def test_ecmp_after_clear_bgp(request): dut = "r3" protocol = "bgp" - static_or_nw(tgen, topo, tc_name, "redist_static", "r2") + static_or_nw(tgen, topo, tc_name, test_type, "r2") for addr_type in ADDR_TYPES: - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Clear bgp result = clear_bgp_and_verify(tgen, topo, dut) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -396,22 +374,20 @@ def test_ecmp_remove_redistribute_static(request): # Verifying RIB routes dut = "r3" protocol = "bgp" - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_2 = { "r2": { @@ -419,22 +395,14 @@ def test_ecmp_remove_redistribute_static(request): "address_family": { "ipv4": { "unicast": { - "redistribute": [{ - "redist_type": "static", - "delete": True - - }] + "redistribute": [{"redist_type": "static", "delete": True}] } }, "ipv6": { "unicast": { - "redistribute": [{ - "redist_type": "static", - "delete": True - - }] + "redistribute": [{"redist_type": "static", "delete": True}] } - } + }, } } } @@ -442,81 +410,66 @@ def test_ecmp_remove_redistribute_static(request): logger.info("Remove redistribute static") result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: # Verifying RIB routes dut = "r3" protocol = "bgp" - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3 are deleted", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=[], protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=[], + protocol=protocol, + expected=False, + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name) logger.info("Enable redistribute static") input_dict_2 = { "r2": { "bgp": { "address_family": { - "ipv4": { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } - }, - "ipv6": { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } - } + "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}}, + "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}}, } } } } result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: # Verifying RIB routes dut = "r3" protocol = "bgp" - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) -def test_ecmp_shut_bgp_neighbor(request): +@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"]) +def test_ecmp_shut_bgp_neighbor(request, test_type): """ Shut BGP neigbors one by one and verify BGP and routing table updated accordingly in DUT """ @@ -530,40 +483,33 @@ def test_ecmp_shut_bgp_neighbor(request): protocol = "bgp" reset_config_on_routers(tgen) - static_or_nw(tgen, topo, tc_name, "redist_static", "r2") + static_or_nw(tgen, topo, tc_name, test_type, "r2") for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - for intf_num in range(len(INTF_LIST_R2)+1, 16): - intf_val = INTF_LIST_R2[intf_num:intf_num+16] + for intf_num in range(len(INTF_LIST_R2) + 1, 16): + intf_val = INTF_LIST_R2[intf_num : intf_num + 16] - input_dict_1 = { - "r2": { - "interface_list": [intf_val], - "status": "down" - } - } - logger.info("Shutting down neighbor interface {} on r2". - format(intf_val)) + input_dict_1 = {"r2": {"interface_list": [intf_val], "status": "down"}} + logger.info("Shutting down neighbor interface {} on r2".format(intf_val)) result = interface_status(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for addr_type in ADDR_TYPES: if intf_num + 16 < 32: @@ -571,52 +517,37 @@ def test_ecmp_shut_bgp_neighbor(request): else: check_hops = [] - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=check_hops, - protocol=protocol) + result = verify_rib( + tgen, addr_type, dut, input_dict, next_hop=check_hops, protocol=protocol + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - input_dict_1 = { - "r2": { - "interface_list": INTF_LIST_R2, - "status": "up" - } - } + input_dict_1 = {"r2": {"interface_list": INTF_LIST_R2, "status": "up"}} logger.info("Enabling all neighbor interface {} on r2") result = interface_status(tgen, topo, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) - static_or_nw(tgen, topo, tc_name, "redist_static", "r2") + static_or_nw(tgen, topo, tc_name, test_type, "r2") for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -639,22 +570,20 @@ def test_ecmp_remove_static_route(request): static_or_nw(tgen, topo, tc_name, "redist_static", "r2") for addr_type in ADDR_TYPES: - input_dict_1 = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) result = verify_rib( - tgen, addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], protocol=protocol) + tgen, + addr_type, + dut, + input_dict_1, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for addr_type in ADDR_TYPES: input_dict_2 = { @@ -663,7 +592,7 @@ def test_ecmp_remove_static_route(request): { "network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type], - "delete": True + "delete": True, } ] } @@ -672,23 +601,29 @@ def test_ecmp_remove_static_route(request): logger.info("Remove static routes") result = create_static_routes(tgen, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) logger.info("Verifying %s routes on r3 are removed", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_2, - next_hop=[], protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_2, + next_hop=[], + protocol=protocol, + expected=False, + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name) for addr_type in ADDR_TYPES: # Enable static routes input_dict_4 = { "r2": { "static_routes": [ - { - "network": NETWORK[addr_type], - "next_hop": NEXT_HOP_IP[addr_type] - } + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} ] } } @@ -696,14 +631,21 @@ def test_ecmp_remove_static_route(request): logger.info("Enable static route") result = create_static_routes(tgen, input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict_4, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -725,22 +667,20 @@ def test_ecmp_remove_nw_advertise(request): reset_config_on_routers(tgen) static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2") for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_3 = { "r2": { @@ -748,64 +688,59 @@ def test_ecmp_remove_nw_advertise(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [{ - "network": NETWORK["ipv4"], - "delete": True - }] - } - }, + "advertise_networks": [ + {"network": NETWORK["ipv4"], "delete": True} + ] + } + }, "ipv6": { "unicast": { - "advertise_networks": [{ - "network": NETWORK["ipv6"], - "delete": True - }] - } + "advertise_networks": [ + {"network": NETWORK["ipv6"], "delete": True} + ] } - } + }, } } } + } logger.info("Withdraw advertised networks") result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=[], protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=[], + protocol=protocol, + expected=False, + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name) static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2") for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "static_routes": [ - { - "network": NETWORK[addr_type] - } - ] - } - } + input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}} logger.info("Verifying %s routes on r3", addr_type) - result = verify_rib(tgen, addr_type, dut, input_dict, - next_hop=NEXT_HOPS[addr_type], - protocol=protocol) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict, + next_hop=NEXT_HOPS[addr_type], + protocol=protocol, + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) if __name__ == "__main__": diff --git a/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py b/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py index 3b2d9c25d7..b0ff3ac437 100755 --- a/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py +++ b/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py @@ -72,18 +72,26 @@ from lib.topogen import Topogen, TopoRouter, get_topogen # Required to instantiate the topology builder class. from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, reset_config_on_routers, - verify_rib, create_static_routes, - create_prefix_lists, verify_prefix_lists, - create_route_maps, check_address_types + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + create_prefix_lists, + verify_prefix_lists, + create_route_maps, + check_address_types, ) from lib.topolog import logger from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp_and_verify, verify_best_path_as_per_bgp_attribute, - verify_best_path_as_per_admin_distance, modify_as_number, - verify_as_numbers + verify_bgp_convergence, + create_router_bgp, + clear_bgp_and_verify, + verify_best_path_as_per_bgp_attribute, + verify_best_path_as_per_admin_distance, + modify_as_number, + verify_as_numbers, ) from lib.topojson import build_topo_from_json, build_config_from_json @@ -147,8 +155,7 @@ def setup_module(mod): # Checking BGP convergence result = verify_bgp_convergence(tgen, topo) - assert result is True, ("setup_module :Failed \n Error:" - " {}".format(result)) + assert result is True, "setup_module :Failed \n Error:" " {}".format(result) logger.info("Running setup_module() done") @@ -165,8 +172,7 @@ def teardown_module(): # Stop toplogy and Remove tmp files tgen.stop_topology() - logger.info("Testsuite end time: %s", - time.asctime(time.localtime(time.time()))) + logger.info("Testsuite end time: %s", time.asctime(time.localtime(time.time()))) logger.info("=" * 40) @@ -176,6 +182,7 @@ def teardown_module(): ## ##################################################### + def test_next_hop_attribute(request): """ Verifying route are not getting installed in, as next_hop is @@ -204,44 +211,38 @@ def test_next_hop_attribute(request): "ipv4": { "unicast": { "advertise_networks": [ - { - "network": "200.50.2.0/32" - }, - { - "network": "200.60.2.0/32" - } + {"network": "200.50.2.0/32"}, + {"network": "200.60.2.0/32"}, ] } }, "ipv6": { "unicast": { "advertise_networks": [ - { - "network": "200:50:2::/128" - }, - { - "network": "200:60:2::/128" - } + {"network": "200:50:2::/128"}, + {"network": "200:60:2::/128"}, ] } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r1" protocol = "bgp" # Verification should fail as nexthop-self is not enabled for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: "\ + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) + assert result is not True, ( + "Testcase {} : Failed \n Error: " "{} routes are not present in RIB".format(addr_type, tc_name) + ) # Configure next-hop-self to bgp neighbor input_dict_1 = { @@ -251,25 +252,17 @@ def test_next_hop_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } - } + }, } } }, @@ -279,42 +272,33 @@ def test_next_hop_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r1" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -343,27 +327,19 @@ def test_aspath_attribute(request): "ipv4": { "unicast": { "advertise_networks": [ - { - "network": "200.50.2.0/32" - }, - { - "network": "200.60.2.0/32" - } + {"network": "200.50.2.0/32"}, + {"network": "200.60.2.0/32"}, ] } }, "ipv6": { "unicast": { "advertise_networks": [ - { - "network": "200:50:2::/128" - }, - { - "network": "200:60:2::/128" - } + {"network": "200:50:2::/128"}, + {"network": "200:60:2::/128"}, ] } - } + }, } } }, @@ -373,25 +349,17 @@ def test_aspath_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } - } + }, } } }, @@ -401,42 +369,34 @@ def test_aspath_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" - attribute = "aspath" + attribute = "path" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - {"r7": input_dict["r7"]}, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Modify AS-Path and verify best path is changed # Create Prefix list @@ -445,66 +405,52 @@ def test_aspath_attribute(request): "r3": { "prefix_lists": { "ipv4": { - "pf_ls_1_ipv4": [{ - "seqid": 10, - "network": "200.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_ls_1_ipv4": [ + { + "seqid": 10, + "network": "200.0.0.0/8", + "le": "32", + "action": "permit", + } + ] }, "ipv6": { - "pf_ls_1_ipv6": [{ - "seqid": 10, - "network": "200::/8", - "le": "128", - "action": "permit" - }] - } + "pf_ls_1_ipv6": [ + { + "seqid": 10, + "network": "200::/8", + "le": "128", + "action": "permit", + } + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map input_dict_3 = { "r3": { "route_maps": { - "RMAP_AS_PATH": [{ - "action": "permit", - "match": { - "ipv4": { - "prefix_lists": "pf_ls_1_ipv4" - } + "RMAP_AS_PATH": [ + { + "action": "permit", + "match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}}, + "set": {"path": {"as_num": "111 222", "as_action": "prepend"}}, }, - "set": { - "aspath": { - "as_num": "111 222", - "as_action": "prepend" - } - } - }, - { - "action": "permit", - "match": { - "ipv6": { - "prefix_lists": "pf_ls_1_ipv6" - } + { + "action": "permit", + "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}}, + "set": {"path": {"as_num": "111 222", "as_action": "prepend"}}, }, - "set": { - "aspath": { - "as_num": "111 222", - "as_action": "prepend" - } - } - }] + ] } } } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure neighbor for route map input_dict_4 = { @@ -518,8 +464,10 @@ def test_aspath_attribute(request): "dest_link": { "r3": { "route_maps": [ - {"name": "RMAP_AS_PATH", - "direction": "in"} + { + "name": "RMAP_AS_PATH", + "direction": "in", + } ] } } @@ -534,32 +482,34 @@ def test_aspath_attribute(request): "dest_link": { "r3": { "route_maps": [ - {"name": "RMAP_AS_PATH", - "direction": "in"} + { + "name": "RMAP_AS_PATH", + "direction": "in", + } ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" - attribute = "aspath" + attribute = "path" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - {"r7": input_dict["r7"]}, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -588,27 +538,19 @@ def test_localpref_attribute(request): "ipv4": { "unicast": { "advertise_networks": [ - { - "network": "200.50.2.0/32" - }, - { - "network": "200.60.2.0/32" - } + {"network": "200.50.2.0/32"}, + {"network": "200.60.2.0/32"}, ] } }, "ipv6": { "unicast": { "advertise_networks": [ - { - "network": "200:50:2::/128" - }, - { - "network": "200:60:2::/128" - } + {"network": "200:50:2::/128"}, + {"network": "200:60:2::/128"}, ] } - } + }, } } }, @@ -618,25 +560,17 @@ def test_localpref_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } - } + }, } } }, @@ -646,95 +580,78 @@ def test_localpref_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create Prefix list input_dict_2 = { "r2": { "prefix_lists": { "ipv4": { - "pf_ls_1_ipv4": [{ - "seqid": 10, - "network": "200.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_ls_1_ipv4": [ + { + "seqid": 10, + "network": "200.0.0.0/8", + "le": "32", + "action": "permit", + } + ] }, "ipv6": { - "pf_ls_1_ipv6": [{ - "seqid": 10, - "network": "200::/8", - "le": "128", - "action": "permit" - }] - } + "pf_ls_1_ipv6": [ + { + "seqid": 10, + "network": "200::/8", + "le": "128", + "action": "permit", + } + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map input_dict_3 = { "r2": { "route_maps": { - "RMAP_LOCAL_PREF": [{ - "action": "permit", - "seq_id": "10", - "match": { - "ipv4": { - "prefix_lists": "pf_ls_1_ipv4" - } + "RMAP_LOCAL_PREF": [ + { + "action": "permit", + "seq_id": "10", + "match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}}, + "set": {"locPrf": 1111}, }, - "set": { - "localpref": 1111 - } - }, - { - "action": "permit", - "seq_id": "20", - "match": { - "ipv6": { - "prefix_lists": "pf_ls_1_ipv6" - } + { + "action": "permit", + "seq_id": "20", + "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}}, + "set": {"locPrf": 1111}, }, - "set": { - "localpref": 1111 - } - }] + ] } } } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure neighbor for route map input_dict_4 = { @@ -748,8 +665,10 @@ def test_localpref_attribute(request): "dest_link": { "r2-link1": { "route_maps": [ - {"name": "RMAP_LOCAL_PREF", - "direction": "in"} + { + "name": "RMAP_LOCAL_PREF", + "direction": "in", + } ] } } @@ -764,77 +683,69 @@ def test_localpref_attribute(request): "dest_link": { "r2-link1": { "route_maps": [ - {"name": "RMAP_LOCAL_PREF", - "direction": "in"} + { + "name": "RMAP_LOCAL_PREF", + "direction": "in", + } ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" - attribute = "localpref" + attribute = "locPrf" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - {"r7": input_dict["r7"]}, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Modify route map input_dict_3 = { "r2": { "route_maps": { - "RMAP_LOCAL_PREF": [{ - "action": "permit", - "seq_id": "10", - "match": { - "ipv4": { - "prefix_lists": "pf_ls_1_ipv4" - } + "RMAP_LOCAL_PREF": [ + { + "action": "permit", + "seq_id": "10", + "match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}}, + "set": {"locPrf": 50}, }, - "set": { - "localpref": 50 - } - }, - { - "action": "permit", - "seq_id": "20", - "match": { - "ipv6": { - "prefix_lists": "pf_ls_1_ipv6" - } + { + "action": "permit", + "seq_id": "20", + "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}}, + "set": {"locPrf": 50}, }, - "set": { - "localpref": 50 - } - }] + ] } } } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" - attribute = "localpref" + attribute = "locPrf" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - {"r7": input_dict["r7"]}, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -866,27 +777,19 @@ def test_weight_attribute(request): "ipv4": { "unicast": { "advertise_networks": [ - { - "network": "200.50.2.0/32" - }, - { - "network": "200.60.2.0/32" - } + {"network": "200.50.2.0/32"}, + {"network": "200.60.2.0/32"}, ] } }, "ipv6": { "unicast": { "advertise_networks": [ - { - "network": "200:50:2::/128" - }, - { - "network": "200:60:2::/128" - } + {"network": "200:50:2::/128"}, + {"network": "200:60:2::/128"}, ] } - } + }, } } }, @@ -896,25 +799,17 @@ def test_weight_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } - } + }, } } }, @@ -924,94 +819,77 @@ def test_weight_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create Prefix list input_dict_2 = { "r1": { "prefix_lists": { "ipv4": { - "pf_ls_1_ipv4": [{ - "seqid": 10, - "network": "200.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_ls_1_ipv4": [ + { + "seqid": 10, + "network": "200.0.0.0/8", + "le": "32", + "action": "permit", + } + ] }, "ipv6": { - "pf_ls_1_ipv6": [{ - "seqid": 10, - "network": "200::/8", - "le": "128", - "action": "permit" - }] - } + "pf_ls_1_ipv6": [ + { + "seqid": 10, + "network": "200::/8", + "le": "128", + "action": "permit", + } + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map input_dict_3 = { "r1": { "route_maps": { - "RMAP_WEIGHT": [{ - "action": "permit", - "seq_id": "5", - "match": { - "ipv4": { - "prefix_lists": "pf_ls_1_ipv4" - } + "RMAP_WEIGHT": [ + { + "action": "permit", + "seq_id": "5", + "match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}}, + "set": {"weight": 500}, }, - "set": { - "weight": 500 - } - }, - { - "action": "permit", - "seq_id": "10", - "match": { - "ipv6": { - "prefix_lists": "pf_ls_1_ipv6" - } + { + "action": "permit", + "seq_id": "10", + "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}}, + "set": {"weight": 500}, }, - "set": { - "weight": 500 - } - }] + ] } } } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure neighbor for route map input_dict_4 = { @@ -1025,8 +903,10 @@ def test_weight_attribute(request): "dest_link": { "r1": { "route_maps": [ - {"name": "RMAP_WEIGHT", - "direction": "in"} + { + "name": "RMAP_WEIGHT", + "direction": "in", + } ] } } @@ -1041,77 +921,69 @@ def test_weight_attribute(request): "dest_link": { "r1": { "route_maps": [ - {"name": "RMAP_WEIGHT", - "direction": "in"} + { + "name": "RMAP_WEIGHT", + "direction": "in", + } ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" attribute = "weight" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - {"r7": input_dict["r7"]}, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Modify route map input_dict_3 = { "r1": { "route_maps": { - "RMAP_WEIGHT": [{ - "action": "permit", - "seq_id": "5", - "match": { - "ipv4": { - "prefix_lists": "pf_ls_1_ipv4" - } + "RMAP_WEIGHT": [ + { + "action": "permit", + "seq_id": "5", + "match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}}, + "set": {"weight": 1000}, }, - "set": { - "weight": 1000 - } - }, - { - "action": "permit", - "seq_id": "10", - "match": { - "ipv6": { - "prefix_lists": "pf_ls_1_ipv6" - } + { + "action": "permit", + "seq_id": "10", + "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}}, + "set": {"weight": 1000}, }, - "set": { - "weight": 1000 - } - }] + ] } } } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" attribute = "weight" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - {"r7": input_dict["r7"]}, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1143,27 +1015,19 @@ def test_origin_attribute(request): "ipv4": { "unicast": { "advertise_networks": [ - { - "network": "200.50.2.0/32" - }, - { - "network": "200.60.2.0/32" - } + {"network": "200.50.2.0/32"}, + {"network": "200.60.2.0/32"}, ] } }, "ipv6": { "unicast": { "advertise_networks": [ - { - "network": "200:50:2::/128" - }, - { - "network": "200:60:2::/128" - } + {"network": "200:50:2::/128"}, + {"network": "200:60:2::/128"}, ] } - } + }, } } }, @@ -1173,25 +1037,17 @@ def test_origin_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}} } } - } + }, } } }, @@ -1201,25 +1057,17 @@ def test_origin_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r3": {"next_hop_self": True}}} } } - } + }, } } }, @@ -1230,7 +1078,7 @@ def test_origin_attribute(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -1238,54 +1086,41 @@ def test_origin_attribute(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to create static routes input_dict_3 = { "r5": { "static_routes": [ - { - "network": "200.50.2.0/32", - "next_hop": "Null0" - }, - { - "network": "200.60.2.0/32", - "next_hop": "Null0" - }, - { - "network": "200:50:2::/128", - "next_hop": "Null0" - }, - { - "network": "200:60:2::/128", - "next_hop": "Null0" - } + {"network": "200.50.2.0/32", "next_hop": "Null0"}, + {"network": "200.60.2.0/32", "next_hop": "Null0"}, + {"network": "200:50:2::/128", "next_hop": "Null0"}, + {"network": "200:60:2::/128", "next_hop": "Null0"}, ] } } result = create_static_routes(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" attribute = "origin" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - {"r4": input_dict["r4"]}, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, {"r4": input_dict["r4"]}, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1317,27 +1152,19 @@ def test_med_attribute(request): "ipv4": { "unicast": { "advertise_networks": [ - { - "network": "200.50.2.0/32" - }, - { - "network": "200.60.2.0/32" - } + {"network": "200.50.2.0/32"}, + {"network": "200.60.2.0/32"}, ] } }, "ipv6": { "unicast": { "advertise_networks": [ - { - "network": "200:50:2::/128" - }, - { - "network": "200:60:2::/128" - } + {"network": "200:50:2::/128"}, + {"network": "200:60:2::/128"}, ] } - } + }, } } }, @@ -1347,145 +1174,122 @@ def test_med_attribute(request): "ipv4": { "unicast": { "advertise_networks": [ - { - "network": "200.50.2.0/32" - }, - { - "network": "200.60.2.0/32" - } + {"network": "200.50.2.0/32"}, + {"network": "200.60.2.0/32"}, ] } }, "ipv6": { "unicast": { "advertise_networks": [ - { - "network": "200:50:2::/128" - }, - { - "network": "200:60:2::/128" - } + {"network": "200:50:2::/128"}, + {"network": "200:60:2::/128"}, ] } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create Prefix list input_dict_2 = { "r2": { "prefix_lists": { "ipv4": { - "pf_ls_r2_ipv4": [{ - "seqid": 10, - "network": "200.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_ls_r2_ipv4": [ + { + "seqid": 10, + "network": "200.0.0.0/8", + "le": "32", + "action": "permit", + } + ] }, "ipv6": { - "pf_ls_r2_ipv6": [{ - "seqid": 20, - "network": "200::/8", - "le": "128", - "action": "permit" - }] - } + "pf_ls_r2_ipv6": [ + { + "seqid": 20, + "network": "200::/8", + "le": "128", + "action": "permit", + } + ] + }, } }, "r3": { "prefix_lists": { "ipv4": { - "pf_ls_r3_ipv4": [{ - "seqid": 10, - "network": "200.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_ls_r3_ipv4": [ + { + "seqid": 10, + "network": "200.0.0.0/8", + "le": "32", + "action": "permit", + } + ] }, "ipv6": { - "pf_ls_r3_ipv6": [{ - "seqid": 20, - "network": "200::/8", - "le": "128", - "action": "permit" - }] - } + "pf_ls_r3_ipv6": [ + { + "seqid": 20, + "network": "200::/8", + "le": "128", + "action": "permit", + } + ] + }, } - } + }, } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map input_dict_3 = { "r2": { "route_maps": { - "RMAP_MED_R2": [{ - "action": "permit", - "seq_id": "10", - "match": { - "ipv4": { - "prefix_lists": "pf_ls_r2_ipv4" - } + "RMAP_MED_R2": [ + { + "action": "permit", + "seq_id": "10", + "match": {"ipv4": {"prefix_lists": "pf_ls_r2_ipv4"}}, + "set": {"metric": 100}, }, - "set": { - "med": 100 - } - }, - { - "action": "permit", - "seq_id": "20", - "match": { - "ipv6": { - "prefix_lists": "pf_ls_r2_ipv6" - } + { + "action": "permit", + "seq_id": "20", + "match": {"ipv6": {"prefix_lists": "pf_ls_r2_ipv6"}}, + "set": {"metric": 100}, }, - "set": { - "med": 100 - } - }] + ] } }, "r3": { "route_maps": { - "RMAP_MED_R3": [{ - "action": "permit", - "seq_id": "10", - "match": { - "ipv4": { - "prefix_lists": "pf_ls_r3_ipv4" - } + "RMAP_MED_R3": [ + { + "action": "permit", + "seq_id": "10", + "match": {"ipv4": {"prefix_lists": "pf_ls_r3_ipv4"}}, + "set": {"metric": 10}, }, - "set": { - "med": 10 - } - }, - { - "action": "permit", - "seq_id": "20", - "match": { - "ipv6": { - "prefix_lists": "pf_ls_r3_ipv6" - } + { + "action": "permit", + "seq_id": "20", + "match": {"ipv6": {"prefix_lists": "pf_ls_r3_ipv6"}}, + "set": {"metric": 10}, }, - "set": { - "med": 10 - } - }] + ] } - } + }, } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure neighbor for route map input_dict_4 = { @@ -1499,17 +1303,15 @@ def test_med_attribute(request): "dest_link": { "r2-link1": { "route_maps": [ - {"name": "RMAP_MED_R2", - "direction": "in"} + { + "name": "RMAP_MED_R2", + "direction": "in", + } ] } } }, - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}}, } } }, @@ -1520,20 +1322,18 @@ def test_med_attribute(request): "dest_link": { "r2-link1": { "route_maps": [ - {"name": "RMAP_MED_R2", - "direction": "in"} + { + "name": "RMAP_MED_R2", + "direction": "in", + } ] } } }, - "r1": { - "dest_link": { - "r2": {"next_hop_self": True} - } - } + "r1": {"dest_link": {"r2": {"next_hop_self": True}}}, } } - } + }, } } }, @@ -1543,107 +1343,95 @@ def test_med_attribute(request): "ipv4": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - }, + "r1": {"dest_link": {"r3": {"next_hop_self": True}}}, "r5": { "dest_link": { "r3": { "route_maps": [ - {"name": "RMAP_MED_R3", - "direction": "in"} + { + "name": "RMAP_MED_R3", + "direction": "in", + } ] } } - } + }, } } }, "ipv6": { "unicast": { "neighbor": { - "r1": { - "dest_link": { - "r3": {"next_hop_self": True} - } - }, + "r1": {"dest_link": {"r3": {"next_hop_self": True}}}, "r5": { "dest_link": { "r3": { "route_maps": [ - {"name": "RMAP_MED_R3", - "direction": "in"} + { + "name": "RMAP_MED_R3", + "direction": "in", + } ] } } - } + }, } } - } + }, } } - } + }, } - result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" - attribute = "med" + attribute = "metric" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - input_dict, attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, input_dict, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Modify route-map to set med value input_dict_3 = { "r3": { "route_maps": { - "RMAP_MED_R3": [{ - "action": "permit", - "seq_id": "10", - "match": { - "ipv4": { - "prefix_lists": "pf_ls_r3_ipv4" - } + "RMAP_MED_R3": [ + { + "action": "permit", + "seq_id": "10", + "match": {"ipv4": {"prefix_lists": "pf_ls_r3_ipv4"}}, + "set": {"metric": 200}, }, - "set": { - "med": 200 - } - }, - { - "action": "permit", - "seq_id": "20", - "match": { - "ipv6": { - "prefix_lists": "pf_ls_r3_ipv6" - } + { + "action": "permit", + "seq_id": "20", + "match": {"ipv6": {"prefix_lists": "pf_ls_r3_ipv6"}}, + "set": {"metric": 200}, }, - "set": { - "med": 200 - } - }] + ] } } } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" - attribute = "med" + attribute = "metric" for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - input_dict, attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, input_dict, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1674,29 +1462,28 @@ def test_admin_distance(request): { "network": "200.50.2.0/32", "admin_distance": 80, - "next_hop": "10.0.0.14" + "next_hop": "10.0.0.14", }, { "network": "200.50.2.0/32", "admin_distance": 60, - "next_hop": "10.0.0.18" + "next_hop": "10.0.0.18", }, { "network": "200:50:2::/128", "admin_distance": 80, - "next_hop": "fd00::1" + "next_hop": "fd00::1", }, { "network": "200:50:2::/128", "admin_distance": 60, - "next_hop": "fd00::1" - } + "next_hop": "fd00::1", + }, ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes input_dict_2 = { @@ -1707,7 +1494,7 @@ def test_admin_distance(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -1715,60 +1502,63 @@ def test_admin_distance(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } + }, } } } } result = create_router_bgp(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying best path dut = "r1" attribute = "admin_distance" input_dict = { - "ipv4": { - "r2": { - "static_routes": [{ - "network": "200.50.2.0/32", - "admin_distance": 80, - "next_hop": "10.0.0.14" - }, - { - "network": "200.50.2.0/32", - "admin_distance": 60, - "next_hop": "10.0.0.18" - } - ] - } - }, - "ipv6": { - "r2": { - "static_routes": [{ - "network": "200:50:2::/128", - "admin_distance": 80, - "next_hop": "fd00::1" - }, - { - "network": "200:50:2::/128", - "admin_distance": 60, - "next_hop": "fd00::1" - }] + "ipv4": { + "r2": { + "static_routes": [ + { + "network": "200.50.2.0/32", + "admin_distance": 80, + "next_hop": "10.0.0.14", + }, + { + "network": "200.50.2.0/32", + "admin_distance": 60, + "next_hop": "10.0.0.18", + }, + ] } - } + }, + "ipv6": { + "r2": { + "static_routes": [ + { + "network": "200:50:2::/128", + "admin_distance": 80, + "next_hop": "fd00::1", + }, + { + "network": "200:50:2::/128", + "admin_distance": 60, + "next_hop": "fd00::1", + }, + ] + } + }, } for addr_type in ADDR_TYPES: - result = verify_best_path_as_per_admin_distance(tgen, addr_type, dut, - input_dict[addr_type], - attribute) + result = verify_best_path_as_per_admin_distance( + tgen, addr_type, dut, input_dict[addr_type], attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) diff --git a/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py b/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py index b8975997ea..22952f645c 100755 --- a/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py +++ b/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py @@ -60,16 +60,17 @@ from lib.topogen import Topogen, get_topogen # Import topoJson from lib, to create topology and initial configuration from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, reset_config_on_routers, - verify_rib, create_static_routes, - create_prefix_lists, verify_prefix_lists + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + create_prefix_lists, + verify_prefix_lists, ) from lib.topolog import logger -from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp_and_verify -) +from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology creation @@ -109,7 +110,7 @@ def setup_module(mod): testsuite_run_time = time.asctime(time.localtime(time.time())) logger.info("Testsuite start time: {}".format(testsuite_run_time)) - logger.info("="*40) + logger.info("=" * 40) logger.info("Running setup_module to create topology") @@ -133,8 +134,9 @@ def setup_module(mod): # Api call verify whether BGP is converged BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) - assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:" - " {}".format(BGP_CONVERGENCE)) + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format( + BGP_CONVERGENCE + ) logger.info("Running setup_module() done") @@ -153,9 +155,11 @@ def teardown_module(mod): # Stop toplogy and Remove tmp files tgen.stop_topology() - logger.info("Testsuite end time: {}". - format(time.asctime(time.localtime(time.time())))) - logger.info("="*40) + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + ##################################################### # @@ -180,34 +184,26 @@ def test_ip_prefix_lists_in_permit(request): # Create Static routes input_dict = { "r1": { - "static_routes": [{ - "network": "20.0.20.1/32", - "no_of_ip": 1, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "20.0.20.1/32", "no_of_ip": 1, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create ip prefix list input_dict_2 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] + "pf_list_1": [{"seqid": 10, "network": "any", "action": "permit"}] } } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure bgp neighbor with prefix list input_dict_3 = { @@ -218,7 +214,7 @@ def test_ip_prefix_lists_in_permit(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -235,10 +231,7 @@ def test_ip_prefix_lists_in_permit(request): "dest_link": { "r3": { "prefix_lists": [ - { - "name": "pf_list_1", - "direction": "in" - } + {"name": "pf_list_1", "direction": "in"} ] } } @@ -248,18 +241,16 @@ def test_ip_prefix_lists_in_permit(request): } } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -283,43 +274,34 @@ def test_ip_prefix_lists_out_permit(request): # Create Static routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 1, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "10.0.20.1/32", "no_of_ip": 1, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create Static routes input_dict_1 = { "r1": { - "static_routes": [{ - "network": "20.0.20.1/32", - "no_of_ip": 1, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "20.0.20.1/32", "no_of_ip": 1, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) input_dict_5 = { "r3": { - "static_routes": [{ - "network": "10.0.0.2/30", - "no_of_ip": 1, - "next_hop": "10.0.0.9" - }] + "static_routes": [ + {"network": "10.0.0.2/30", "no_of_ip": 1, "next_hop": "10.0.0.9"} + ] } } result = create_static_routes(tgen, input_dict_5) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes @@ -328,18 +310,15 @@ def test_ip_prefix_lists_out_permit(request): "r1": { "prefix_lists": { "ipv4": { - "pf_list_1": [{ - "seqid": 10, - "network": "20.0.20.1/32", - "action": "permit" - }] + "pf_list_1": [ + {"seqid": 10, "network": "20.0.20.1/32", "action": "permit"} + ] } } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure prefix list to bgp neighbor # Configure bgp neighbor with prefix list @@ -356,7 +335,7 @@ def test_ip_prefix_lists_out_permit(request): "prefix_lists": [ { "name": "pf_list_1", - "direction": "out" + "direction": "out", } ] } @@ -365,8 +344,8 @@ def test_ip_prefix_lists_out_permit(request): }, "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} - ] + {"redist_type": "connected"}, + ], } } } @@ -375,19 +354,20 @@ def test_ip_prefix_lists_out_permit(request): } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) - - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: Routes still" \ - " present in RIB".format(tc_name) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name) write_test_footer(tc_name) @@ -410,16 +390,13 @@ def test_ip_prefix_lists_in_deny_and_permit_any(request): # Create Static Routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 1, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "10.0.20.1/32", "no_of_ip": 1, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes # Create ip prefix list @@ -428,24 +405,15 @@ def test_ip_prefix_lists_in_deny_and_permit_any(request): "prefix_lists": { "ipv4": { "pf_list_1": [ - { - "seqid": "10", - "network": "10.0.20.1/32", - "action": "deny" - }, - { - "seqid": "11", - "network": "any", - "action": "permit" - } + {"seqid": "10", "network": "10.0.20.1/32", "action": "deny"}, + {"seqid": "11", "network": "any", "action": "permit"}, ] } } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure bgp neighbor with prefix list input_dict_3 = { @@ -456,7 +424,7 @@ def test_ip_prefix_lists_in_deny_and_permit_any(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -473,10 +441,7 @@ def test_ip_prefix_lists_in_deny_and_permit_any(request): "dest_link": { "r3": { "prefix_lists": [ - { - "name": "pf_list_1", - "direction": "in" - } + {"name": "pf_list_1", "direction": "in"} ] } } @@ -486,19 +451,21 @@ def test_ip_prefix_lists_in_deny_and_permit_any(request): } } } - } + }, } # Configure prefix list to bgp neighbor result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name) write_test_footer(tc_name) @@ -525,23 +492,19 @@ def test_delete_prefix_lists(request): "prefix_lists": { "ipv4": { "pf_list_1": [ - { - "seqid": "10", - "network": "10.0.20.1/32", - "action": "deny" - } + {"seqid": "10", "network": "10.0.20.1/32", "action": "deny"} ] } } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_prefix_lists(tgen, input_dict_2) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Delete prefix list input_dict_2 = { @@ -553,7 +516,7 @@ def test_delete_prefix_lists(request): "seqid": "10", "network": "10.0.20.1/32", "action": "deny", - "delete": True + "delete": True, } ] } @@ -561,12 +524,10 @@ def test_delete_prefix_lists(request): } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -590,30 +551,24 @@ def test_ip_prefix_lists_out_deny_and_permit_any(request): # Create Static Routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 9, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create Static Routes input_dict_1 = { "r2": { - "static_routes": [{ - "network": "20.0.20.1/32", - "no_of_ip": 9, - "next_hop": "10.0.0.1" - }] + "static_routes": [ + {"network": "20.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.1"} + ] } } result = create_static_routes(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes @@ -627,21 +582,16 @@ def test_ip_prefix_lists_out_deny_and_permit_any(request): "seqid": "10", "network": "10.0.0.0/8", "le": "32", - "action": "deny" + "action": "deny", }, - { - "seqid": "11", - "network": "any", - "action": "permit" - } + {"seqid": "11", "network": "any", "action": "permit"}, ] } } } } result = create_prefix_lists(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure prefix list to bgp neighbor input_dict_4 = { @@ -652,7 +602,7 @@ def test_ip_prefix_lists_out_deny_and_permit_any(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -666,7 +616,7 @@ def test_ip_prefix_lists_out_deny_and_permit_any(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -685,7 +635,7 @@ def test_ip_prefix_lists_out_deny_and_permit_any(request): "prefix_lists": [ { "name": "pf_list_1", - "direction": "out" + "direction": "out", } ] } @@ -696,25 +646,26 @@ def test_ip_prefix_lists_out_deny_and_permit_any(request): } } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r4" protocol = "bgp" result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r4" protocol = "bgp" - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name) write_test_footer(tc_name) @@ -737,16 +688,13 @@ def test_modify_prefix_lists_in_permit_to_deny(request): # Create Static Routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 9, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes @@ -755,19 +703,20 @@ def test_modify_prefix_lists_in_permit_to_deny(request): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1": [{ - "seqid": "10", - "network": "10.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_list_1": [ + { + "seqid": "10", + "network": "10.0.0.0/8", + "le": "32", + "action": "permit", + } + ] } } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure prefix list to bgp neighbor input_dict_3 = { @@ -778,7 +727,7 @@ def test_modify_prefix_lists_in_permit_to_deny(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -792,13 +741,10 @@ def test_modify_prefix_lists_in_permit_to_deny(request): "unicast": { "neighbor": { "r1": { - "dest_link":{ + "dest_link": { "r3": { "prefix_lists": [ - { - "name": "pf_list_1", - "direction": "in" - } + {"name": "pf_list_1", "direction": "in"} ] } } @@ -808,18 +754,16 @@ def test_modify_prefix_lists_in_permit_to_deny(request): } } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Modify prefix list input_dict_1 = { @@ -831,34 +775,31 @@ def test_modify_prefix_lists_in_permit_to_deny(request): "seqid": "10", "network": "10.0.0.0/8", "le": "32", - "action": "deny" + "action": "deny", }, - { - "seqid": "11", - "network": "any", - "action": "permit" - } + {"seqid": "11", "network": "any", "action": "permit"}, ] } } } } result = create_prefix_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to clear bgp, so config changes would be reflected dut = "r3" result = clear_bgp_and_verify(tgen, topo, dut) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name) write_test_footer(tc_name) @@ -882,16 +823,13 @@ def test_modify_prefix_lists_in_deny_to_permit(request): # Create Static Routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 9, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes @@ -905,21 +843,16 @@ def test_modify_prefix_lists_in_deny_to_permit(request): "seqid": "10", "network": "10.0.0.0/8", "le": "32", - "action": "deny" + "action": "deny", }, - { - "seqid": "11", - "network": "any", - "action": "permit" - } + {"seqid": "11", "network": "any", "action": "permit"}, ] } } } } result = create_prefix_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure prefix list to bgp neighbor input_dict_2 = { @@ -930,7 +863,7 @@ def test_modify_prefix_lists_in_deny_to_permit(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -947,10 +880,7 @@ def test_modify_prefix_lists_in_deny_to_permit(request): "dest_link": { "r3": { "prefix_lists": [ - { - "name": "pf_list_1", - "direction": "in" - } + {"name": "pf_list_1", "direction": "in"} ] } } @@ -960,51 +890,51 @@ def test_modify_prefix_lists_in_deny_to_permit(request): } } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name) # Modify ip prefix list input_dict_1 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1": [{ - "seqid": "10", - "network": "10.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_list_1": [ + { + "seqid": "10", + "network": "10.0.0.0/8", + "le": "32", + "action": "permit", + } + ] } } } - } result = create_prefix_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to clear bgp, so config changes would be reflected dut = "r3" result = clear_bgp_and_verify(tgen, topo, dut) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -1028,16 +958,13 @@ def test_modify_prefix_lists_out_permit_to_deny(request): # Create Static Routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 9, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes @@ -1046,20 +973,20 @@ def test_modify_prefix_lists_out_permit_to_deny(request): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1": [{ - "seqid": "10", - "network": "10.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_list_1": [ + { + "seqid": "10", + "network": "10.0.0.0/8", + "le": "32", + "action": "permit", + } + ] } } } - } result = create_prefix_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure prefix list to bgp neighbor input_dict_2 = { @@ -1070,7 +997,7 @@ def test_modify_prefix_lists_out_permit_to_deny(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -1089,7 +1016,7 @@ def test_modify_prefix_lists_out_permit_to_deny(request): "prefix_lists": [ { "name": "pf_list_1", - "direction": "out" + "direction": "out", } ] } @@ -1100,18 +1027,16 @@ def test_modify_prefix_lists_out_permit_to_deny(request): } } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r4" protocol = "bgp" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Modify ip prefix list input_dict_1 = { @@ -1123,35 +1048,31 @@ def test_modify_prefix_lists_out_permit_to_deny(request): "seqid": "10", "network": "10.0.0.0/8", "le": "32", - "action": "deny" + "action": "deny", }, - { - "seqid": "11", - "network": "any", - "action": "permit" - } + {"seqid": "11", "network": "any", "action": "permit"}, ] } } } - } result = create_prefix_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to clear bgp, so config changes would be reflected dut = "r3" result = clear_bgp_and_verify(tgen, topo, dut) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r4" protocol = "bgp" - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name) write_test_footer(tc_name) @@ -1175,16 +1096,13 @@ def test_modify_prefix_lists_out_deny_to_permit(request): # Create Static Routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 9, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes # Create ip prefix list @@ -1197,22 +1115,16 @@ def test_modify_prefix_lists_out_deny_to_permit(request): "seqid": "10", "network": "10.0.0.0/8", "le": "32", - "action": "deny" + "action": "deny", }, - { - "seqid": "11", - "network": "any", - "action": "permit" - } + {"seqid": "11", "network": "any", "action": "permit"}, ] } } } - } result = create_prefix_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure prefix list to bgp neighbor input_dict_2 = { @@ -1223,7 +1135,7 @@ def test_modify_prefix_lists_out_deny_to_permit(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -1237,12 +1149,12 @@ def test_modify_prefix_lists_out_deny_to_permit(request): "unicast": { "neighbor": { "r4": { - "dest_link":{ + "dest_link": { "r3": { "prefix_lists": [ { "name": "pf_list_1", - "direction": "out" + "direction": "out", } ] } @@ -1253,51 +1165,51 @@ def test_modify_prefix_lists_out_deny_to_permit(request): } } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r4" protocol = "bgp" - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name) # Modify ip prefix list input_dict_1 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1": [{ - "seqid": "10", - "network": "10.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_list_1": [ + { + "seqid": "10", + "network": "10.0.0.0/8", + "le": "32", + "action": "permit", + } + ] } } } - } result = create_prefix_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to clear bgp, so config changes would be reflected dut = "r3" result = clear_bgp_and_verify(tgen, topo, dut) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r4" protocol = "bgp" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -1321,30 +1233,24 @@ def test_ip_prefix_lists_implicit_deny(request): # Create Static Routes input_dict = { "r1": { - "static_routes": [{ - "network": "10.0.20.1/32", - "no_of_ip": 9, - "next_hop": "10.0.0.2" - }] + "static_routes": [ + {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create Static Routes input_dict_1 = { "r2": { - "static_routes": [{ - "network": "20.0.20.1/32", - "no_of_ip": 9, - "next_hop": "10.0.0.1" - }] + "static_routes": [ + {"network": "20.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.1"} + ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to redistribute static routes # Create ip prefix list @@ -1352,20 +1258,20 @@ def test_ip_prefix_lists_implicit_deny(request): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1": [{ - "seqid": "10", - "network": "10.0.0.0/8", - "le": "32", - "action": "permit" - }] + "pf_list_1": [ + { + "seqid": "10", + "network": "10.0.0.0/8", + "le": "32", + "action": "permit", + } + ] } } } - } result = create_prefix_lists(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Configure prefix list to bgp neighbor input_dict_4 = { @@ -1376,7 +1282,7 @@ def test_ip_prefix_lists_implicit_deny(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -1390,7 +1296,7 @@ def test_ip_prefix_lists_implicit_deny(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } } @@ -1409,7 +1315,7 @@ def test_ip_prefix_lists_implicit_deny(request): "prefix_lists": [ { "name": "pf_list_1", - "direction": "out" + "direction": "out", } ] } @@ -1420,25 +1326,26 @@ def test_ip_prefix_lists_implicit_deny(request): } } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r4" protocol = "bgp" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r4" protocol = "bgp" - result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: Routes still" \ - " present in RIB".format(tc_name) + result = verify_rib( + tgen, "ipv4", dut, input_dict_1, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name) write_test_footer(tc_name) diff --git a/tests/topotests/bgp-route-map/test_route_map_topo1.py b/tests/topotests/bgp-route-map/test_route_map_topo1.py index 22dd3a6380..1aa951edaa 100755 --- a/tests/topotests/bgp-route-map/test_route_map_topo1.py +++ b/tests/topotests/bgp-route-map/test_route_map_topo1.py @@ -82,16 +82,29 @@ from mininet.topo import Topo # Required to instantiate the topology builder class. from lib.topojson import * from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, verify_bgp_community, - verify_rib, delete_route_maps, create_bgp_community_lists, - interface_status, create_route_maps, create_prefix_lists, - verify_route_maps, check_address_types, - shutdown_bringup_interface, verify_prefix_lists, reset_config_on_routers) + start_topology, + write_test_header, + write_test_footer, + verify_bgp_community, + verify_rib, + delete_route_maps, + create_bgp_community_lists, + interface_status, + create_route_maps, + create_prefix_lists, + verify_route_maps, + check_address_types, + shutdown_bringup_interface, + verify_prefix_lists, + reset_config_on_routers, +) from lib.topolog import logger from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp_and_verify, verify_bgp_attributes) + verify_bgp_convergence, + create_router_bgp, + clear_bgp_and_verify, + verify_bgp_attributes, +) from lib.topojson import build_topo_from_json, build_config_from_json @@ -109,15 +122,9 @@ except IOError: # Global variables bgp_convergence = False -NETWORK = { - "ipv4": ["11.0.20.1/32", "20.0.20.1/32"], - "ipv6": ["1::1/128", "2::1/128"] -} +NETWORK = {"ipv4": ["11.0.20.1/32", "20.0.20.1/32"], "ipv6": ["1::1/128", "2::1/128"]} MASK = {"ipv4": "32", "ipv6": "128"} -NEXT_HOP = { - "ipv4": "10.0.0.2", - "ipv6": "fd00::2" -} +NEXT_HOP = {"ipv4": "10.0.0.2", "ipv6": "fd00::2"} ADDR_TYPES = check_address_types() @@ -170,8 +177,9 @@ def setup_module(mod): # Api call verify whether BGP is converged bgp_convergence = verify_bgp_convergence(tgen, topo) - assert bgp_convergence is True, ("setup_module :Failed \n Error:" - " {}".format(bgp_convergence)) + assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format( + bgp_convergence + ) logger.info("Running setup_module() done") @@ -190,8 +198,9 @@ def teardown_module(): # Stop toplogy and Remove tmp files tgen.stop_topology() - logger.info("Testsuite end time: {}". - format(time.asctime(time.localtime(time.time())))) + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) logger.info("=" * 40) @@ -230,7 +239,8 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): result = create_static_routes(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Api call to redistribute static routes input_dict_1 = { @@ -242,7 +252,7 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -250,18 +260,19 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } - } + }, + }, } } } result = create_router_bgp(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_2 = { "r4": { @@ -277,7 +288,8 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): result = create_static_routes(tgen, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Api call to redistribute static routes input_dict_5 = { @@ -288,7 +300,7 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -296,81 +308,94 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_2 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "action": "permit", - "network": NETWORK["ipv4"][0] - }], - "pf_list_2_ipv4": [{ - "seqid": 10, - "action": "permit", - "network": NETWORK["ipv4"][1] - }] + "pf_list_1_ipv4": [ + { + "seqid": 10, + "action": "permit", + "network": NETWORK["ipv4"][0], + } + ], + "pf_list_2_ipv4": [ + { + "seqid": 10, + "action": "permit", + "network": NETWORK["ipv4"][1], + } + ], }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "action": "permit", - "network": NETWORK["ipv6"][0] - }], - "pf_list_2_ipv6": [{ - "seqid": 100, - "action": "permit", - "network": NETWORK["ipv6"][1] - }] - } + "pf_list_1_ipv6": [ + { + "seqid": 100, + "action": "permit", + "network": NETWORK["ipv6"][0], + } + ], + "pf_list_2_ipv6": [ + { + "seqid": 100, + "action": "permit", + "network": NETWORK["ipv6"][1], + } + ], + }, } } } result = create_prefix_lists(tgen, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map for addr_type in ADDR_TYPES: input_dict_6 = { - "r3": { - "route_maps": { - "rmap_match_tag_1_{}".format(addr_type): [{ - "action": "deny", - "match": { - addr_type: { - "prefix_lists": - "pf_list_1_{}".format(addr_type) + "r3": { + "route_maps": { + "rmap_match_tag_1_{}".format(addr_type): [ + { + "action": "deny", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, } - } - }], - "rmap_match_tag_2_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type: { - "prefix_lists": - "pf_list_2_{}".format(addr_type) + ], + "rmap_match_tag_2_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_list_2_{}".format(addr_type) + } + }, } - } - }] + ], + } } } - } result = create_route_maps(tgen, input_dict_6) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_7 = { @@ -384,12 +409,14 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): "dest_link": { "r3": { "route_maps": [ - {"name": - "rmap_match_tag_1_ipv4", - "direction": "in"}, - {"name": - "rmap_match_tag_1_ipv4", - "direction": "out"} + { + "name": "rmap_match_tag_1_ipv4", + "direction": "in", + }, + { + "name": "rmap_match_tag_1_ipv4", + "direction": "out", + }, ] } } @@ -404,19 +431,21 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): "dest_link": { "r3": { "route_maps": [ - {"name": - "rmap_match_tag_1_ipv6", - "direction": "in"}, - {"name": - "rmap_match_tag_1_ipv6", - "direction": "out"} + { + "name": "rmap_match_tag_1_ipv6", + "direction": "in", + }, + { + "name": "rmap_match_tag_1_ipv6", + "direction": "out", + }, ] } } } } } - } + }, } } } @@ -424,7 +453,8 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): result = create_router_bgp(tgen, topo, input_dict_7) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for adt in ADDR_TYPES: # Verifying RIB routes @@ -436,17 +466,17 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): { "network": [NETWORK[adt][1]], "no_of_ip": 9, - "next_hop": NEXT_HOP[adt] + "next_hop": NEXT_HOP[adt], } ] } } - result = verify_rib(tgen, adt, dut, input_dict_2, protocol=protocol, - expected=False) + result = verify_rib( + tgen, adt, dut, input_dict_2, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" - "routes are not present in rib \n Error: {}".format( - tc_name, result) + "routes are not present in rib \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) # Verifying RIB routes @@ -457,26 +487,28 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request): { "network": [NETWORK[adt][0]], "no_of_ip": 9, - "next_hop": NEXT_HOP[adt] + "next_hop": NEXT_HOP[adt], } ] } } - result = verify_rib(tgen, adt, dut, input_dict, protocol=protocol, - expected=False) + result = verify_rib( + tgen, adt, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n " - "routes are not present in rib \n Error: {}".format( - tc_name, result) + "routes are not present in rib \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) write_test_footer(tc_name) -@pytest.mark.parametrize("prefix_action, rmap_action", [("permit", "permit"), - ("permit", "deny"), ("deny", "permit"), - ("deny", "deny")]) +@pytest.mark.parametrize( + "prefix_action, rmap_action", + [("permit", "permit"), ("permit", "deny"), ("deny", "permit"), ("deny", "deny")], +) def test_route_map_with_action_values_combination_of_prefix_action_p0( - request, prefix_action, rmap_action): + request, prefix_action, rmap_action +): """ TC_36: Test permit/deny statements operation in route-maps with a permutation and @@ -501,7 +533,7 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0( { "network": NETWORK[adt][0], "no_of_ip": 9, - "next_hop": NEXT_HOP[adt] + "next_hop": NEXT_HOP[adt], } ] } @@ -509,7 +541,8 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0( result = create_static_routes(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Api call to redistribute static routes input_dict_1 = { @@ -521,7 +554,7 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0( "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -529,65 +562,64 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0( "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } - } + }, + }, } } } result = create_router_bgp(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Permit in perfix list and route-map input_dict_2 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": prefix_action - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": prefix_action} + ] }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": prefix_action - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": prefix_action} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": rmap_action, - "match": { - addr_type: { - "prefix_lists": - "pf_list_1_{}".format(addr_type) - } + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": rmap_action, + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, } - } - ] + ] + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_7 = { @@ -601,9 +633,10 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0( "dest_link": { "r3": { "route_maps": [ - {"name": - "rmap_match_pf_1_ipv4", - "direction": "in"} + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } ] } } @@ -618,16 +651,17 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0( "dest_link": { "r3": { "route_maps": [ - {"name": - "rmap_match_pf_1_ipv6", - "direction": "in"} + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } ] } } } } } - } + }, } } } @@ -635,7 +669,8 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0( result = create_router_bgp(tgen, topo, input_dict_7) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) dut = "r3" protocol = "bgp" @@ -651,17 +686,18 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0( } } - #tgen.mininet_cli() - result = verify_rib(tgen, adt, dut, input_dict_2, protocol=protocol, - expected=False) + # tgen.mininet_cli() + result = verify_rib( + tgen, adt, dut, input_dict_2, protocol=protocol, expected=False + ) if "deny" in [prefix_action, rmap_action]: assert result is not True, "Testcase {} : Failed \n " - "Routes are still present \n Error: {}".\ - format(tc_name, result) + "Routes are still present \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) else: assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) def test_route_map_multiple_seq_different_match_set_clause_p0(request): @@ -683,16 +719,19 @@ def test_route_map_multiple_seq_different_match_set_clause_p0(request): # Create Static routes input_dict = { "r1": { - "static_routes": [{ - "network": NETWORK[adt][0], - "no_of_ip": 1, - "next_hop": NEXT_HOP[adt] - }] + "static_routes": [ + { + "network": NETWORK[adt][0], + "no_of_ip": 1, + "next_hop": NEXT_HOP[adt], + } + ] } } result = create_static_routes(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Api call to redistribute static routes input_dict_1 = { @@ -703,7 +742,7 @@ def test_route_map_multiple_seq_different_match_set_clause_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -711,94 +750,82 @@ def test_route_map_multiple_seq_different_match_set_clause_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create ip prefix list input_dict_2 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [ - { - "action": "permit", - "match": { - addr_type: { - "prefix_lists": - "pf_list_2_{}".format(addr_type) - } + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_list_2_{}".format(addr_type) + } + }, + "set": {"path": {"as_num": 500}}, }, - "set": { - "aspath": { - "as_num": 500 - } - } - }, - { - "action": "permit", - "match": { - addr_type: { - "prefix_lists": - "pf_list_2_{}".format(addr_type) - } + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_list_2_{}".format(addr_type) + } + }, + "set": {"locPrf": 150,}, }, - "set": { - "localpref": 150, - } - }, - { - "action": "permit", - "match": { - addr_type: { - "prefix_lists": - "pf_list_1_{}".format(addr_type) - } + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 50}, }, - "set": { - "med": 50 - } - } - ] - } - } + ] + } + } } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { @@ -811,25 +838,27 @@ def test_route_map_multiple_seq_different_match_set_clause_p0(request): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "out", + } + ] } } - } + }, } } }, @@ -839,65 +868,64 @@ def test_route_map_multiple_seq_different_match_set_clause_p0(request): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "out", + } + ] } } - } + }, } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for adt in ADDR_TYPES: # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = { - "r3": { - "route_maps": { - "rmap_match_pf_list1": [{ - "set": { - "med": 50, - } - }], - } - } + "r3": {"route_maps": {"rmap_match_pf_list1": [{"set": {"metric": 50,}}],}} } static_routes = [NETWORK[adt][0]] time.sleep(2) - result = verify_bgp_attributes(tgen, adt, dut, static_routes, - "rmap_match_pf_list1", input_dict) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_attributes( + tgen, adt, dut, static_routes, "rmap_match_pf_list1", input_dict + ) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) dut = "r4" - result = verify_bgp_attributes(tgen, adt, dut, static_routes, - "rmap_match_pf_list1", input_dict) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_attributes( + tgen, adt, dut, static_routes, "rmap_match_pf_list1", input_dict + ) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) logger.info("Testcase " + tc_name + " :Passed \n") @@ -924,16 +952,19 @@ def test_route_map_set_only_no_match_p0(request): # Create Static routes input_dict = { "r1": { - "static_routes": [{ - "network": NETWORK[adt][0], - "no_of_ip": 1, - "next_hop": NEXT_HOP[adt] - }] + "static_routes": [ + { + "network": NETWORK[adt][0], + "no_of_ip": 1, + "next_hop": NEXT_HOP[adt], + } + ] } } result = create_static_routes(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Api call to redistribute static routes input_dict_1 = { @@ -944,7 +975,7 @@ def test_route_map_set_only_no_match_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -952,17 +983,18 @@ def test_route_map_set_only_no_match_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map input_dict_3 = { @@ -971,11 +1003,7 @@ def test_route_map_set_only_no_match_p0(request): "rmap_match_pf_1": [ { "action": "permit", - "set": { - "med": 50, - "localpref": 150, - "weight": 4000 - } + "set": {"metric": 50, "locPrf": 150, "weight": 4000}, } ] } @@ -983,7 +1011,8 @@ def test_route_map_set_only_no_match_p0(request): } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { @@ -996,23 +1025,27 @@ def test_route_map_set_only_no_match_p0(request): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": "rmap_match_pf_1", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": "rmap_match_pf_1", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1", + "direction": "out", + } + ] } } - } + }, } } }, @@ -1022,61 +1055,63 @@ def test_route_map_set_only_no_match_p0(request): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": "rmap_match_pf_1", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": "rmap_match_pf_1", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1", + "direction": "out", + } + ] } } - } + }, } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) time.sleep(2) for adt in ADDR_TYPES: input_dict_4 = { "r3": { "route_maps": { - "rmap_match_pf_1": [ - { - "action": "permit", - "set": { - "med": 50, - } - } - ] + "rmap_match_pf_1": [{"action": "permit", "set": {"metric": 50,}}] } } } # Verifying RIB routes static_routes = [NETWORK[adt][0]] - result = verify_bgp_attributes(tgen, adt, "r3", static_routes, - "rmap_match_pf_1", input_dict_3) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) - - result = verify_bgp_attributes(tgen, adt, "r4", static_routes, - "rmap_match_pf_1", input_dict_4) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_attributes( + tgen, adt, "r3", static_routes, "rmap_match_pf_1", input_dict_3 + ) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_bgp_attributes( + tgen, adt, "r4", static_routes, "rmap_match_pf_1", input_dict_4 + ) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) logger.info("Testcase " + tc_name + " :Passed \n") @@ -1103,16 +1138,19 @@ def test_route_map_match_only_no_set_p0(request): # Create Static routes input_dict = { "r1": { - "static_routes": [{ - "network": NETWORK[adt][0], - "no_of_ip": 1, - "next_hop": NEXT_HOP[adt] - }] + "static_routes": [ + { + "network": NETWORK[adt][0], + "no_of_ip": 1, + "next_hop": NEXT_HOP[adt], + } + ] } } result = create_static_routes(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Api call to redistribute static routes input_dict_1 = { @@ -1123,7 +1161,7 @@ def test_route_map_match_only_no_set_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -1131,62 +1169,56 @@ def test_route_map_match_only_no_set_p0(request): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create ip prefix list input_dict_2 = { "r1": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { "r1": { "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "set": { - "med": 50, - "localpref": 150, - } - } + "rmap_match_pf_1_{}".format(addr_type): [ + {"action": "permit", "set": {"metric": 50, "locPrf": 150,}} ] } } } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { @@ -1199,11 +1231,12 @@ def test_route_map_match_only_no_set_p0(request): "r3": { "dest_link": { "r1": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "out", + } + ] } } } @@ -1216,63 +1249,62 @@ def test_route_map_match_only_no_set_p0(request): "r3": { "dest_link": { "r1": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "out", + } + ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create ip prefix list input_dict_5 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map for addr_type in ADDR_TYPES: input_dict_6 = { "r3": { "route_maps": { - "rmap_match_pf_2_{}".format(addr_type): [{ + "rmap_match_pf_2_{}".format(addr_type): [ + { "action": "permit", "match": { addr_type: { - "prefix_lists": - "pf_list_1_{}".format(addr_type) + "prefix_lists": "pf_list_1_{}".format(addr_type) } - } + }, } ] } @@ -1280,7 +1312,8 @@ def test_route_map_match_only_no_set_p0(request): } result = create_route_maps(tgen, input_dict_6) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_7 = { @@ -1293,25 +1326,27 @@ def test_route_map_match_only_no_set_p0(request): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv4", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv4", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv4", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv4", + "direction": "out", + } + ] } } - } + }, } } }, @@ -1321,47 +1356,50 @@ def test_route_map_match_only_no_set_p0(request): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "out", + } + ] } } - } + }, } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_7) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for adt in ADDR_TYPES: # Verifying RIB routes static_routes = [NETWORK[adt][0]] - result = verify_bgp_attributes(tgen, adt, "r3", static_routes, - "rmap_match_pf_1", input_dict_3) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_attributes( + tgen, adt, "r3", static_routes, "rmap_match_pf_1", input_dict_3 + ) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) - - diff --git a/tests/topotests/bgp-route-map/test_route_map_topo2.py b/tests/topotests/bgp-route-map/test_route_map_topo2.py index f2398c33ff..3056aa29f3 100755 --- a/tests/topotests/bgp-route-map/test_route_map_topo2.py +++ b/tests/topotests/bgp-route-map/test_route_map_topo2.py @@ -122,17 +122,31 @@ from mininet.topo import Topo # Required to instantiate the topology builder class. from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, create_static_routes, - verify_rib, delete_route_maps, create_bgp_community_lists, - interface_status, create_route_maps, create_prefix_lists, - verify_route_maps, check_address_types, verify_bgp_community, - shutdown_bringup_interface, verify_prefix_lists, reset_config_on_routers, - verify_create_community_list) + start_topology, + write_test_header, + write_test_footer, + create_static_routes, + verify_rib, + delete_route_maps, + create_bgp_community_lists, + interface_status, + create_route_maps, + create_prefix_lists, + verify_route_maps, + check_address_types, + verify_bgp_community, + shutdown_bringup_interface, + verify_prefix_lists, + reset_config_on_routers, + verify_create_community_list, +) from lib.topolog import logger from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp_and_verify, verify_bgp_attributes) + verify_bgp_convergence, + create_router_bgp, + clear_bgp_and_verify, + verify_bgp_attributes, +) from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology and configuration creation @@ -147,10 +161,7 @@ except IOError: # Global variables # Global variables bgp_convergence = False -NETWORK = { - "ipv4": ["11.0.20.1/32", "11.0.20.2/32"], - "ipv6": ["2::1/128", "2::2/128"] -} +NETWORK = {"ipv4": ["11.0.20.1/32", "11.0.20.2/32"], "ipv6": ["2::1/128", "2::2/128"]} bgp_convergence = False BGP_CONVERGENCE = False @@ -180,7 +191,7 @@ def setup_module(mod): """ testsuite_run_time = time.asctime(time.localtime(time.time())) logger.info("Testsuite start time: {}".format(testsuite_run_time)) - logger.info("="*40) + logger.info("=" * 40) logger.info("Running setup_module to create topology") @@ -205,8 +216,9 @@ def setup_module(mod): # Api call verify whether BGP is converged bgp_convergence = verify_bgp_convergence(tgen, topo) - assert bgp_convergence is True, ("setup_module :Failed \n Error:" - " {}".format(bgp_convergence)) + assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format( + bgp_convergence + ) logger.info("Running setup_module() done") @@ -222,9 +234,10 @@ def teardown_module(mod): # Stop toplogy and Remove tmp files tgen.stop_topology() - logger.info("Testsuite end time: {}".format( - time.asctime(time.localtime(time.time())))) - logger.info("="*40) + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) ##################################################### @@ -254,134 +267,126 @@ def test_rmap_match_prefix_list_permit_in_and_outbound_prefixes_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit", - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit",} + ] }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 10, - "network": "any", - "action": "permit", - }] - } + "pf_list_1_ipv6": [ + {"seqid": 10, "network": "any", "action": "permit",} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: - # Create route map + # Create route map input_dict_3 = { "r3": { "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_" + addr_type - } + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: {"prefix_lists": "pf_list_1_" + addr_type} + }, + "set": {"locPrf": 150, "weight": 100}, }, - "set": { - "localpref": 150, - "weight": 100 - } - }, ], - "rmap_match_pf_2_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_" + addr_type - } + "rmap_match_pf_2_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: {"prefix_lists": "pf_list_1_" + addr_type} + }, + "set": {"metric": 50}, }, - "set": { - "med": 50 - } - }, - ] + ], } } } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv4", - "direction": "out" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "out" - }] - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv4", + "direction": "out", + } + ] + } + } + }, + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "out", + } + ] + } + } + }, + } + } + }, + } + } + } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -390,48 +395,52 @@ def test_rmap_match_prefix_list_permit_in_and_outbound_prefixes_p0(): # dual stack changes for addr_type in ADDR_TYPES: - result4 = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result4 is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result4) + tc_name, result4 + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } # dual stack changes for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result4 = verify_bgp_attributes(tgen, addr_type, dut, routes[ - addr_type],rmap_name, input_dict_3) + result4 = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result4 is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result4) + tc_name, result4 + ) # Verifying RIB routes dut = "r4" protocol = "bgp" # dual stack changes for addr_type in ADDR_TYPES: - result4 = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result4 is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result4) + tc_name, result4 + ) # Verifying BGP set attributes dut = "r4" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } # dual stack changes for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_2_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) # Uncomment next line for debugging @@ -462,267 +471,271 @@ def test_modify_set_match_clauses_in_rmap_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit", - }], - "pf_list_2_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit",} + ], + "pf_list_2_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ], }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 10, - "network": "any", - "action": "permit", - }], - "pf_list_2_ipv6": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 10, "network": "any", "action": "permit",} + ], + "pf_list_2_ipv6": [ + {"seqid": 10, "network": "any", "action": "permit"} + ], + }, } } - } + } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150,}, } - }, - "set": { - "localpref": 150, - } - }], - "rmap_match_pf_2_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) + ], + "rmap_match_pf_2_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 50}, } - }, - "set": { - "med": 50 - } - }] + ], + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv4", - "direction": "out" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "out" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv4", + "direction": "out", + } + ] + } + } + }, + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "out", + } + ] + } + } + }, + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } # dual stack changes for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result4 = verify_bgp_attributes(tgen, addr_type, dut, routes[ - addr_type],rmap_name, input_dict_3) + result4 = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result4 is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result4) + tc_name, result4 + ) # Verifying RIB routes dut = "r4" protocol = "bgp" # dual stack changes for addr_type in ADDR_TYPES: - result4 = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result4 is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result4) + tc_name, result4 + ) # Verifying BGP set attributes dut = "r4" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_2_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[ - addr_type],rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Modify set/match clause of in-used route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 1000, - } - }], - "rmap_match_pf_2_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "med": 2000 + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 1000,}, + } + ], + "rmap_match_pf_2_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 2000}, + } + ], } - }] - } - } + } } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying RIB routes dut = "r3" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying RIB routes dut = "r4" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r4" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_2_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -750,37 +763,29 @@ def test_delete_route_maps_p1(): # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_tag_1_{}".format(addr_type): [{ - "action": "deny", - "match": { - addr_type: { - "tag": "4001" - } - } - }] + "r3": { + "route_maps": { + "rmap_match_tag_1_{}".format(addr_type): [ + {"action": "deny", "match": {addr_type: {"tag": "4001"}}} + ] + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Delete route maps for addr_type in ADDR_TYPES: - input_dict = { - "r3": { - "route_maps": ["rmap_match_tag_1_{}".format(addr_type)] - } - } + input_dict = {"r3": {"route_maps": ["rmap_match_tag_1_{}".format(addr_type)]}} result = delete_route_maps(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) result = verify_route_maps(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) # Uncomment next line for debugging @@ -810,226 +815,223 @@ def test_modify_prefix_list_referenced_by_rmap_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit", - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit",} + ] }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit", - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit",} + ] + }, } } - } + } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150, "weight": 100}, } - }, - "set": { - "localpref": 150, - "weight": 100 - } - }], - "rmap_match_pf_2_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) + ], + "rmap_match_pf_2_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 50}, } - }, - "set": { - "med": 50 - } - }] + ], + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv4", - "direction": "out" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "out" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv4", + "direction": "out", + } + ] + } + } + }, + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "out", + } + ] + } + } + }, + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying RIB routes dut = "r4" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r4" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_2_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Modify ip prefix list input_dict_2 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "deny" - }] - }, - "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "deny" - }] - } + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "deny"} + ] + }, + "ipv6": { + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "deny"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) sleep(5) # Verifying RIB routes dut = "r3" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" - "routes are not present \n Error: {}".format( - tc_name, result) + "routes are not present \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) # Verifying RIB routes dut = "r4" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" "Expected behaviour: routes are not present \n " - "Error: {}".format( - tc_name, result) + "Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -1059,234 +1061,240 @@ def test_remove_prefix_list_referenced_by_rmap_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } - } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150,}, } - }, - "set": { - "localpref": 150, - } - }], - "rmap_match_pf_2_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "med": 50 - } - }] + ], + "rmap_match_pf_2_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 50}, + } + ], + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map for addr_type in ADDR_TYPES: input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv4", - "direction": "out" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "out" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv4", + "direction": "out", + } + ] + } + } + }, + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "out", + } + ] + } + } + }, + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying RIB routes dut = "r4" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r4" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_2_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Remove/Delete prefix list input_dict_3 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit", - "delete": True - }] - }, + "pf_list_1_ipv4": [ + { + "seqid": 10, + "network": "any", + "action": "permit", + "delete": True, + } + ] + }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit", - "delete": True - }] + "pf_list_1_ipv6": [ + { + "seqid": 100, + "network": "any", + "action": "permit", + "delete": True, + } + ] + }, } } - } } result = create_prefix_lists(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_prefix_lists(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to clear bgp, so config changes would be reflected dut = "r3" result = clear_bgp_and_verify(tgen, topo, dut) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" - "routes are not present \n Error: {}".format( - tc_name, result) + "routes are not present \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) # Verifying RIB routes dut = "r4" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" - "routes are not present \n Error: {}".\ - format(tc_name, result) + "routes are not present \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) write_test_footer(tc_name) @@ -1318,157 +1326,170 @@ def test_add_and_remove_community_list_referenced_by_rmap_p0(): # Create route map for addr_type in ADDR_TYPES: input_dict_5 = { - "r1": { - "route_maps": { - "rm_r1_out_{}".format(addr_type): [{ - "action": "permit", - "set": { - "large_community": {"num": "1:1:1 1:2:3 2:1:1 2:2:2"} - } - }] + "r1": { + "route_maps": { + "rm_r1_out_{}".format(addr_type): [ + { + "action": "permit", + "set": { + "large_community": {"num": "1:1:1 1:2:3 2:1:1 2:2:2"} + }, + } + ] + } } } - } result = create_route_maps(tgen, input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_6 = { - "r1": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r3": { - "dest_link": { - "r1": { - "route_maps": [{ - "name": "rm_r1_out_ipv4", - "direction": "out" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r3": { - "dest_link": { - "r1": { - "route_maps": [{ - "name": "rm_r1_out_ipv6", - "direction": "out" - }] - } - } - } - } - } - } - } - } - } - } + "r1": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r1": { + "route_maps": [ + { + "name": "rm_r1_out_ipv4", + "direction": "out", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r1": { + "route_maps": [ + { + "name": "rm_r1_out_ipv6", + "direction": "out", + } + ] + } + } + } + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_6) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: # Create standard large commumity-list - input_dict_1 = { - "r3": { - "bgp_community_lists": [ - { - "community_type": "standard", - "action": "permit", - "name": "rmap_lcomm_{}".format(addr_type), - "value": "1:1:1 1:2:3 2:1:1 2:2:2", - "large": True - } - ] - } + input_dict_1 = { + "r3": { + "bgp_community_lists": [ + { + "community_type": "standard", + "action": "permit", + "name": "rmap_lcomm_{}".format(addr_type), + "value": "1:1:1 1:2:3 2:1:1 2:2:2", + "large": True, + } + ] } - result = create_bgp_community_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + } + result = create_bgp_community_lists(tgen, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) # Verify BGP large community is created result = verify_create_community_list(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: - # Create route map + # Create route map input_dict_2 = { - "r3": { - "route_maps": { - "rm_r3_in_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type : { - "large-community-list": {"id": "rmap_lcomm_"+ - addr_type} - } - } - }] + "r3": { + "route_maps": { + "rm_r3_in_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "large-community-list": { + "id": "rmap_lcomm_" + addr_type + } + } + }, + } + ] + } } } - } result = create_route_maps(tgen, input_dict_2) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_3 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": "rm_r3_in_ipv4", - "direction": "in" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": "rm_r3_in_ipv6", - "direction": "in" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rm_r3_in_ipv4", + "direction": "in", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rm_r3_in_ipv6", + "direction": "in", + } + ] + } + } + } + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) sleep(5) # Verifying RIB routes @@ -1476,25 +1497,25 @@ def test_add_and_remove_community_list_referenced_by_rmap_p0(): protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verify large-community-list dut = "r3" networks = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] - } - input_dict_4 = { - "largeCommunity": "1:1:1 1:2:3 2:1:1 2:2:2" + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } + input_dict_4 = {"largeCommunity": "1:1:1 1:2:3 2:1:1 2:2:2"} for addr_type in ADDR_TYPES: - result = verify_bgp_community(tgen, addr_type, dut, networks[ - addr_type],input_dict_4) + result = verify_bgp_community( + tgen, addr_type, dut, networks[addr_type], input_dict_4 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) # Uncomment next line for debugging @@ -1520,242 +1541,220 @@ def test_multiple_match_statement_in_route_map_logical_ORed_p0(): # Api call to advertise networks input_dict_nw1 = { - "r1": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "advertise_networks": [ - {"network": "10.0.30.1/32"} - ] - } - }, - "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } - } + "r1": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": {"advertise_networks": [{"network": "10.0.30.1/32"}]} + }, + "ipv6": { + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } } + } result = create_router_bgp(tgen, topo, input_dict_nw1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Api call to advertise networks input_dict_nw2 = { - "r1": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "advertise_networks": [ - {"network": "20.0.30.1/32"} - ] - } - }, - "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "2::1/128"} - ] - } - } - } + "r1": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": {"advertise_networks": [{"network": "20.0.30.1/32"}]} + }, + "ipv6": { + "unicast": {"advertise_networks": [{"network": "2::1/128"}]} + }, } } } + } result = create_router_bgp(tgen, topo, input_dict_nw2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create ip prefix list input_dict_2 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create ip prefix list input_dict_2 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_2_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, + "pf_list_2_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, "ipv6": { - "pf_list_2_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_2_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) - input_dict_3_addr_type ={} + input_dict_3_addr_type = {} # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150}, } - }, - "set": { - "localpref": 150 - } - }] + ] + } } } - } input_dict_3_addr_type[addr_type] = input_dict_3 result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 200 - } - }] + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 200}, + } + ] + } } } - } input_dict_3_addr_type[addr_type] = input_dict_3 result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_6 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + } + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_6) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" - routes = { - "ipv4": ["10.0.30.1/32"], - "ipv6": ["1::1/128"] - } + routes = {"ipv4": ["10.0.30.1/32"], "ipv6": ["1::1/128"]} for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3_addr_type[addr_type]) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + result = verify_bgp_attributes( + tgen, + addr_type, + dut, + routes[addr_type], + rmap_name, + input_dict_3_addr_type[addr_type], + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) # Verifying BGP set attributes - routes = { - "ipv4": ["20.0.30.1/32"], - "ipv6": ["2::1/128"] - } + routes = {"ipv4": ["20.0.30.1/32"], "ipv6": ["2::1/128"]} for addr_type in ADDR_TYPES: - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1785,79 +1784,80 @@ def test_multiple_match_statement_in_route_map_logical_ANDed_p1(): input_dict_5 = { "r1": { "route_maps": { - "rm_r1_out_{}".format(addr_type): [{ - "action": "permit", - "set": { - "large_community": { - "num": "1:1:1 1:2:3 2:1:1 2:2:2"} + "rm_r1_out_{}".format(addr_type): [ + { + "action": "permit", + "set": { + "large_community": {"num": "1:1:1 1:2:3 2:1:1 2:2:2"} + }, } - }] + ] } } } result = create_route_maps(tgen, input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map for addr_type in ADDR_TYPES: input_dict_6 = { - "r1": { - "bgp": { - "address_family": { - addr_type: { - "unicast": { - "neighbor": { - "r3": { - "dest_link": { - "r1": { - "route_maps": [{ - "name": - "rm_r1_out_{}".format(addr_type), - "direction": "out" - }] - } - } - } - } - } - } - } - } - } + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r1": { + "route_maps": [ + { + "name": "rm_r1_out_{}".format( + addr_type + ), + "direction": "out", + } + ] + } + } + } + } + } + } + } + } + } } result = create_router_bgp(tgen, topo, input_dict_6) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create ip prefix list input_dict_2 = { "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: - # Create standard large commumity-list + # Create standard large commumity-list input_dict_1 = { "r3": { "bgp_community_lists": [ @@ -1866,98 +1866,105 @@ def test_multiple_match_statement_in_route_map_logical_ANDed_p1(): "action": "permit", "name": "rmap_lcomm_{}".format(addr_type), "value": "1:1:1 1:2:3 2:1:1 2:2:2", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verify BGP large community is created result = verify_create_community_list(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { "r3": { "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 150, + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150,}, } - }] + ] } } } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for addr_type in ADDR_TYPES: - # Create route map + # Create route map input_dict_3 = { "r3": { "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type : { - "large_community_list": {"id": "rmap_lcomm_"+ - addr_type} - } - }, - "set": { - "localpref": 150, + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "large_community_list": { + "id": "rmap_lcomm_" + addr_type + } + } + }, + "set": {"locPrf": 150,}, } - }] + ] } } } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map for addr_type in ADDR_TYPES: input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - addr_type: { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_{}".format(addr_type), - "direction": "in" - }] - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_{}".format( + addr_type + ), + "direction": "in", + } + ] + } + } + } + } + } + } + } + } + } } result = create_router_bgp(tgen, topo, input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # sleep(10) # Verifying RIB routes dut = "r3" @@ -1966,20 +1973,23 @@ def test_multiple_match_statement_in_route_map_logical_ANDed_p1(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -2008,50 +2018,46 @@ def test_add_remove_rmap_to_specific_neighbor_p0(): input_dict_2 = { "r3": { "prefix_lists": { - "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "deny" - }] - }, - "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "deny" - }] - } + "ipv4": { + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "deny"} + ] + }, + "ipv6": { + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "deny"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 150, - } - }] + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150,}, + } + ] + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { @@ -2064,11 +2070,12 @@ def test_add_remove_rmap_to_specific_neighbor_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] } } } @@ -2081,36 +2088,36 @@ def test_add_remove_rmap_to_specific_neighbor_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n Error" - "Routes are still present: {}".format( - tc_name, result) + "Routes are still present: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) # Remove applied rmap from neighbor @@ -2124,12 +2131,13 @@ def test_add_remove_rmap_to_specific_neighbor_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", "direction": "in", - "delete": True - }] + "delete": True, + } + ] } } } @@ -2142,26 +2150,26 @@ def test_add_remove_rmap_to_specific_neighbor_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", "direction": "in", - "delete": True - }] + "delete": True, + } + ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -2170,7 +2178,8 @@ def test_add_remove_rmap_to_specific_neighbor_p0(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -2200,50 +2209,45 @@ def test_clear_bgp_and_flap_interface_to_verify_rmap_properties_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 150, - "weight": 100 - } - }] + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150, "weight": 100}, + } + ] + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { @@ -2256,11 +2260,12 @@ def test_clear_bgp_and_flap_interface_to_verify_rmap_properties_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] } } } @@ -2273,25 +2278,25 @@ def test_clear_bgp_and_flap_interface_to_verify_rmap_properties_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -2300,26 +2305,28 @@ def test_clear_bgp_and_flap_interface_to_verify_rmap_properties_p0(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # clear bgp, so config changes would be reflected dut = "r3" result = clear_bgp_and_verify(tgen, topo, dut) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -2328,20 +2335,23 @@ def test_clear_bgp_and_flap_interface_to_verify_rmap_properties_p0(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Flap interface to see if route-map properties are intact # Shutdown interface @@ -2358,8 +2368,7 @@ def test_clear_bgp_and_flap_interface_to_verify_rmap_properties_p0(): # Verify BGP convergence once interface is up result = verify_bgp_convergence(tgen, topo) - assert result is True, ( - "setup_module :Failed \n Error:" " {}".format(result)) + assert result is True, "setup_module :Failed \n Error:" " {}".format(result) # Verifying RIB routes dut = "r3" @@ -2368,20 +2377,23 @@ def test_clear_bgp_and_flap_interface_to_verify_rmap_properties_p0(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -2409,22 +2421,21 @@ def test_rmap_without_match_and_set_clause_p0(): # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_no_match_set_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "5" - }], - "rmap_no_match_set_2_{}".format(addr_type): [{ - "action": "deny", - "seq_id": "5" - }] + "r3": { + "route_maps": { + "rmap_no_match_set_1_{}".format(addr_type): [ + {"action": "permit", "seq_id": "5"} + ], + "rmap_no_match_set_2_{}".format(addr_type): [ + {"action": "deny", "seq_id": "5"} + ], + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { @@ -2437,25 +2448,27 @@ def test_rmap_without_match_and_set_clause_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_no_match_set_1_ipv4", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_no_match_set_1_ipv4", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_no_match_set_2_ipv4", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_no_match_set_2_ipv4", + "direction": "out", + } + ] } } - } + }, } } }, @@ -2465,36 +2478,37 @@ def test_rmap_without_match_and_set_clause_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_no_match_set_1_ipv6", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_no_match_set_1_ipv6", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_no_match_set_2_ipv6", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_no_match_set_2_ipv6", + "direction": "out", + } + ] } } - } + }, } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -2503,17 +2517,18 @@ def test_rmap_without_match_and_set_clause_p0(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying RIB routes dut = "r4" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" - "routes are not present \n Error: {}".format( - tc_name, result) + "routes are not present \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) write_test_footer(tc_name) @@ -2542,74 +2557,69 @@ def test_set_localpref_weight_to_ebgp_and_med_to_ibgp_peers_p0(): input_dict_2 = { "r3": { "prefix_lists": { - "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, - "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "ipv4": { + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, + "ipv6": { + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map - input_dict_3_addr_type ={} + input_dict_3_addr_type = {} for addr_type in ADDR_TYPES: input_dict_3 = { "r3": { "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ + "rmap_match_pf_1_{}".format(addr_type): [ + { "action": "permit", "match": { addr_type: { - "prefix_lists": "pf_list_1_{}".format( - addr_type) + "prefix_lists": "pf_list_1_{}".format(addr_type) } }, - "set": { - "med": 50 + "set": {"metric": 50}, + } + ], + "rmap_match_pf_2_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) } - }], - "rmap_match_pf_2_{}".format(addr_type): [{ + }, + "set": {"locPrf": 150}, + } + ], + "rmap_match_pf_3_{}".format(addr_type): [ + { "action": "permit", "match": { addr_type: { - "prefix_lists": "pf_list_1_{}".format( - addr_type) - }}, - "set": { - "localpref": 150 - } - }], - "rmap_match_pf_3_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format( - addr_type) - }}, - "set": { - "weight": 1000 - } - }] - } + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"weight": 1000}, } - } + ], + } + } + } input_dict_3_addr_type[addr_type] = input_dict_3 result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { @@ -2622,36 +2632,39 @@ def test_set_localpref_weight_to_ebgp_and_med_to_ibgp_peers_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv4", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv4", + "direction": "out", + } + ] } } }, "r5": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_3_ipv4", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_3_ipv4", + "direction": "out", + } + ] } } - } + }, } } }, @@ -2661,123 +2674,137 @@ def test_set_localpref_weight_to_ebgp_and_med_to_ibgp_peers_p0(): "r1": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "out", + } + ] } } }, "r5": { "dest_link": { "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_3_ipv6", - "direction": "out" - }] + "route_maps": [ + { + "name": "rmap_match_pf_3_ipv6", + "direction": "out", + } + ] } } - } + }, } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } rmap_name = "rmap_match_pf_1" for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[ - addr_type],rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying RIB routes dut = "r4" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r4" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } rmap_name = "rmap_match_pf_2" for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_2_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3_addr_type[addr_type], - expected=False) + result = verify_bgp_attributes( + tgen, + addr_type, + dut, + routes[addr_type], + rmap_name, + input_dict_3_addr_type[addr_type], + expected=False, + ) assert result is not True, "Testcase {} : Failed \n" - "Attributes are not set \n Error: {}".format( - tc_name, result) + "Attributes are not set \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) # Verifying RIB routes dut = "r5" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r5" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } rmap_name = "rmap_match_pf_3" for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_3_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3_addr_type[addr_type], - expected=False) + result = verify_bgp_attributes( + tgen, + addr_type, + dut, + routes[addr_type], + rmap_name, + input_dict_3_addr_type[addr_type], + expected=False, + ) assert result is not True, "Testcase {} : Failed \n" - "Attributes are not set \n Error: {}".format( - tc_name, result) + "Attributes are not set \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) write_test_footer(tc_name) @@ -2808,97 +2835,92 @@ def test_multiple_set_on_single_sequence_in_rmap_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 150, - "weight": 100, - "med": 50 - } - }] + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150, "weight": 100, "metric": 50}, + } + ] + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + } + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -2907,22 +2929,25 @@ def test_multiple_set_on_single_sequence_in_rmap_p0(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } rmap_name = "rmap_match_pf_1" for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -2951,150 +2976,147 @@ def test_route_maps_with_continue_clause_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "10", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 150 + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "10", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150}, + "continue": "30", }, - "continue": "30" - }, - { - "action": "permit", - "seq_id": "20", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } + { + "action": "permit", + "seq_id": "20", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 200}, }, - "set": { - "med": 200 - } - }, - { - "action": "permit", - "seq_id": "30", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } + { + "action": "permit", + "seq_id": "30", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 100}, }, - "set": { - "med": 100 - } - } - ] + ] + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + } + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" rmap_name = "rmap_match_pf_1" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] - } - seq_id = { - "ipv4": ["10", "30"], - "ipv6": ["10", "30"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } + seq_id = {"ipv4": ["10", "30"], "ipv6": ["10", "30"]} for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[ - addr_type],rmap_name, input_dict_3, seq_id[addr_type]) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + result = verify_bgp_attributes( + tgen, + addr_type, + dut, + routes[addr_type], + rmap_name, + input_dict_3, + seq_id[addr_type], + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) @@ -3123,120 +3145,114 @@ def test_route_maps_with_goto_clause_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": "10", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "10", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "goto": "30", }, - "goto": "30" - }, - { - "action": "permit", - "seq_id": "20", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } + { + "action": "permit", + "seq_id": "20", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 100}, }, - "set": { - "med": 100 - } - }, - { - "action": "permit", - "seq_id": "30", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } + { + "action": "permit", + "seq_id": "30", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 200}, }, - "set": { - "med": 200 - } - } - ] + ] + } } } - } result = create_route_maps(tgen, input_dict_3) # tgen.mininet_cli() assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + } + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -3245,25 +3261,31 @@ def test_route_maps_with_goto_clause_p0(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" rmap_name = "rmap_match_pf_1" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] - } - seq_id = { - "ipv4": ["10", "30"], - "ipv6": ["10", "30"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } + seq_id = {"ipv4": ["10", "30"], "ipv6": ["10", "30"]} for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[ - addr_type],rmap_name, input_dict_3, seq_id[addr_type]) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + result = verify_bgp_attributes( + tgen, + addr_type, + dut, + routes[addr_type], + rmap_name, + input_dict_3, + seq_id[addr_type], + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) @@ -3292,107 +3314,104 @@ def test_route_maps_with_call_clause_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 150 - }, - "call": "rmap_match_pf_2_{}".format(addr_type) - }], - "rmap_match_pf_2_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "med": 200 - } - }] + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150}, + "call": "rmap_match_pf_2_{}".format(addr_type), + } + ], + "rmap_match_pf_2_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 200}, + } + ], + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv6", - "direction": "in" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv6", + "direction": "in", + } + ] + } + } + } + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -3401,29 +3420,34 @@ def test_route_maps_with_call_clause_p0(): for addr_type in ADDR_TYPES: result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Verifying BGP set attributes dut = "r3" routes = { - "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], - "ipv6": ["1::1/128", "1::2/128"] + "ipv4": ["10.0.20.1/32", "10.0.20.2/32"], + "ipv6": ["1::1/128", "1::2/128"], } rmap_name = "rmap_match_pf_1" for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_1_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) rmap_name = "rmap_match_pf_2" for addr_type in ADDR_TYPES: rmap_name = "rmap_match_pf_2_{}".format(addr_type) - result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type], - rmap_name, input_dict_3) + result = verify_bgp_attributes( + tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3 + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -3453,150 +3477,149 @@ def test_create_rmap_match_prefix_list_to_deny_in_and_outbound_prefixes_p0(): "r3": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": "any", - "action": "permit" - }] - }, + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + }, "ipv6": { - "pf_list_1_ipv6": [{ - "seqid": 100, - "network": "any", - "action": "permit" - }] - } + "pf_list_1_ipv6": [ + {"seqid": 100, "network": "any", "action": "permit"} + ] + }, } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map for addr_type in ADDR_TYPES: input_dict_3 = { - "r3": { - "route_maps": { - "rmap_match_pf_1_{}".format(addr_type): [{ - "action": "deny", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "localpref": 150, - } - }], - "rmap_match_pf_2_{}".format(addr_type): [{ - "action": "deny", - "match": { - addr_type: { - "prefix_lists": "pf_list_1_{}".format(addr_type) - } - }, - "set": { - "med": 50 - } - }] + "r3": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "deny", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"locPrf": 150,}, + } + ], + "rmap_match_pf_2_{}".format(addr_type): [ + { + "action": "deny", + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + "set": {"metric": 50}, + } + ], + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r3": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "out" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r1": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_1_ipv4", - "direction": "in" - }] - } - } - }, - "r4": { - "dest_link": { - "r3": { - "route_maps": [{ - "name": - "rmap_match_pf_2_ipv6", - "direction": "out" - }] - } - } - } - } - } - } - } - } - } - } + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "out", + } + ] + } + } + }, + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_1_ipv4", + "direction": "in", + } + ] + } + } + }, + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "rmap_match_pf_2_ipv6", + "direction": "out", + } + ] + } + } + }, + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" protocol = "bgp" input_dict = topo["routers"] for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" - "routes are not present \n Error: {}".\ - format(tc_name, result) + "routes are not present \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) # Verifying RIB routes dut = "r4" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" - "routes are not present \n Error: {}".\ - format(tc_name, result) + "routes are not present \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) write_test_footer(tc_name) @@ -3627,18 +3650,15 @@ def test_create_rmap_to_match_tag_permit_inbound_prefixes_p0(): input_dict = { "r1": { "static_routes": [ - { - "network": NETWORK[addr_type], - "next_hop": "Null0", - "tag": 4001 - } + {"network": NETWORK[addr_type], "next_hop": "Null0", "tag": 4001} ] } } result = create_static_routes(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Api call to redistribute static routes input_dict_1 = { @@ -3650,7 +3670,7 @@ def test_create_rmap_to_match_tag_permit_inbound_prefixes_p0(): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -3658,84 +3678,82 @@ def test_create_rmap_to_match_tag_permit_inbound_prefixes_p0(): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } - } + }, + }, } } } result = create_router_bgp(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map input_dict_3 = { - "r1": { - "route_maps": { - "rmap_match_tag_1_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type: { - "tag": "4001" - } - } - }] + "r1": { + "route_maps": { + "rmap_match_tag_1_{}".format(addr_type): [ + {"action": "permit", "match": {addr_type: {"tag": "4001"}}} + ] + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r1": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r3": { - "dest_link": { - "r1": { - "route_maps": [{ - "name": - "rmap_match_tag_1_ipv4", - "direction": "out" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r3": { - "dest_link": { - "r1": { - "route_maps": [{ - "name": - "rmap_match_tag_1_ipv6", - "direction": "out" - }] - } - } - } - } - } - } - } - } - } - } + "r1": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r1": { + "route_maps": [ + { + "name": "rmap_match_tag_1_ipv4", + "direction": "out", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r1": { + "route_maps": [ + { + "name": "rmap_match_tag_1_ipv6", + "direction": "out", + } + ] + } + } + } + } + } + }, + } + } + } + } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -3745,18 +3763,14 @@ def test_create_rmap_to_match_tag_permit_inbound_prefixes_p0(): input_dict = { "r1": { "static_routes": [ - { - "network": NETWORK[addr_type], - "next_hop": "Null0", - "tag": 4001 - } + {"network": NETWORK[addr_type], "next_hop": "Null0", "tag": 4001} ] } } - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol) + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -3786,18 +3800,15 @@ def test_create_rmap_to_match_tag_deny_outbound_prefixes_p0(): input_dict = { "r1": { "static_routes": [ - { - "network": NETWORK[addr_type], - "next_hop": "Null0", - "tag": 4001 - } + {"network": NETWORK[addr_type], "next_hop": "Null0", "tag": 4001} ] } } result = create_static_routes(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Api call to redistribute static routes input_dict_1 = { @@ -3809,7 +3820,7 @@ def test_create_rmap_to_match_tag_deny_outbound_prefixes_p0(): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } }, @@ -3817,84 +3828,82 @@ def test_create_rmap_to_match_tag_deny_outbound_prefixes_p0(): "unicast": { "redistribute": [ {"redist_type": "static"}, - {"redist_type": "connected"} + {"redist_type": "connected"}, ] } - } - } + }, + }, } } } result = create_router_bgp(tgen, topo, input_dict_1) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Create route map input_dict_3 = { - "r1": { - "route_maps": { - "rmap_match_tag_1_{}".format(addr_type): [{ - "action": "deny", - "match": { - addr_type: { - "tag": "4001" - } - } - }] + "r1": { + "route_maps": { + "rmap_match_tag_1_{}".format(addr_type): [ + {"action": "deny", "match": {addr_type: {"tag": "4001"}}} + ] + } } } - } result = create_route_maps(tgen, input_dict_3) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) # Configure neighbor for route map input_dict_4 = { - "r1": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": { - "neighbor": { - "r3": { - "dest_link": { - "r1": { - "route_maps": [{ - "name": - "rmap_match_tag_1_ipv4", - "direction": "out" - }] - } - } - } - } - } - }, - "ipv6": { - "unicast": { - "neighbor": { - "r3": { - "dest_link": { - "r1": { - "route_maps": [{ - "name": - "rmap_match_tag_1_ipv6", - "direction": "out" - }] - } - } - } - } - } - } - } - } - } + "r1": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r1": { + "route_maps": [ + { + "name": "rmap_match_tag_1_ipv4", + "direction": "out", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r1": { + "route_maps": [ + { + "name": "rmap_match_tag_1_ipv6", + "direction": "out", + } + ] + } + } + } + } + } + }, + } + } + } } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Verifying RIB routes dut = "r3" @@ -3904,19 +3913,15 @@ def test_create_rmap_to_match_tag_deny_outbound_prefixes_p0(): input_dict = { "r1": { "static_routes": [ - { - "network": NETWORK[addr_type], - "next_hop": "Null0", - "tag": 4001 - } + {"network": NETWORK[addr_type], "next_hop": "Null0", "tag": 4001} ] } } - result = verify_rib(tgen, addr_type, dut, input_dict, - protocol=protocol, expected=False) + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n" - "routes are denied \n Error: {}".format( - tc_name, result) + "routes are denied \n Error: {}".format(tc_name, result) logger.info("Expected behaviour: {}".format(result)) write_test_footer(tc_name) @@ -3924,6 +3929,7 @@ def test_create_rmap_to_match_tag_deny_outbound_prefixes_p0(): # Uncomment next line for debugging # tgen.mininet_cli() + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp-vrf-route-leak-basic/test_bgp.py b/tests/topotests/bgp-vrf-route-leak-basic/test_bgp.py index b0d60403db..5aba89e3ca 100755 --- a/tests/topotests/bgp-vrf-route-leak-basic/test_bgp.py +++ b/tests/topotests/bgp-vrf-route-leak-basic/test_bgp.py @@ -31,7 +31,7 @@ import sys import pytest CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -47,7 +47,8 @@ class BGPVRFTopo(Topo): tgen = get_topogen(self) for routern in range(1, 2): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + def setup_module(mod): "Sets up the pytest environment" @@ -58,17 +59,16 @@ def setup_module(mod): for rname, router in tgen.routers().iteritems(): router.run("/bin/bash {}/setup_vrfs".format(CWD)) router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) # After loading the configurations, this function loads configured daemons. tgen.start_router() - #tgen.mininet_cli() + # tgen.mininet_cli() + def teardown_module(mod): "Teardown the pytest environment" @@ -77,6 +77,7 @@ def teardown_module(mod): # This function tears down the whole topology. tgen.stop_topology() + def test_vrf_route_leak(): logger.info("Ensure that routes are leaked back and forth") tgen = get_topogen() @@ -84,49 +85,50 @@ def test_vrf_route_leak(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - r1 = tgen.gears['r1'] + r1 = tgen.gears["r1"] donna = r1.vtysh_cmd("show ip route vrf DONNA json", isjson=True) route0 = donna["10.0.0.0/24"][0] - assert route0['protocol'] == "connected" + assert route0["protocol"] == "connected" route1 = donna["10.0.1.0/24"][0] - assert route1['protocol'] == "bgp" - assert route1['selected'] == True - nhop = route1['nexthops'][0] - assert nhop['fib'] == True + assert route1["protocol"] == "bgp" + assert route1["selected"] == True + nhop = route1["nexthops"][0] + assert nhop["fib"] == True route2 = donna["10.0.2.0/24"][0] - assert route2['protocol'] == "connected" + assert route2["protocol"] == "connected" route3 = donna["10.0.3.0/24"][0] - assert route3['protocol'] == "bgp" - assert route3['selected'] == True - nhop = route3['nexthops'][0] - assert nhop['fib'] == True + assert route3["protocol"] == "bgp" + assert route3["selected"] == True + nhop = route3["nexthops"][0] + assert nhop["fib"] == True eva = r1.vtysh_cmd("show ip route vrf EVA json", isjson=True) route0 = eva["10.0.0.0/24"][0] - assert route0['protocol'] == "bgp" - assert route0['selected'] == True - nhop = route0['nexthops'][0] - assert nhop['fib'] == True + assert route0["protocol"] == "bgp" + assert route0["selected"] == True + nhop = route0["nexthops"][0] + assert nhop["fib"] == True route1 = eva["10.0.1.0/24"][0] - assert route1['protocol'] == "connected" + assert route1["protocol"] == "connected" route2 = eva["10.0.2.0/24"][0] - assert route2['protocol'] == "bgp" - assert route2['selected'] == True - nhop = route2['nexthops'][0] - assert nhop['fib'] == True + assert route2["protocol"] == "bgp" + assert route2["selected"] == True + nhop = route2["nexthops"][0] + assert nhop["fib"] == True route3 = eva["10.0.3.0/24"][0] - assert route3['protocol'] == "connected" - #tgen.mininet_cli() + assert route3["protocol"] == "connected" + # tgen.mininet_cli() + def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_aggregate-address_origin/test_bgp_aggregate-address_origin.py b/tests/topotests/bgp_aggregate-address_origin/test_bgp_aggregate-address_origin.py index be29d143dd..fa799f8256 100644 --- a/tests/topotests/bgp_aggregate-address_origin/test_bgp_aggregate-address_origin.py +++ b/tests/topotests/bgp_aggregate-address_origin/test_bgp_aggregate-address_origin.py @@ -39,7 +39,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -47,16 +47,18 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -66,51 +68,41 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_aggregate_address_origin(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r2'] + router = tgen.gears["r2"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) expected = { - '192.168.255.1': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 3 - } - } + "192.168.255.1": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 3}}, } } return topotest.json_cmp(output, expected) def _bgp_aggregate_address_has_metric(router): output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.0/24 json")) - expected = { - 'paths': [ - { - 'origin': 'IGP' - } - ] - } + expected = {"paths": [{"origin": "IGP"}]} return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_converge, router) @@ -121,8 +113,11 @@ def test_bgp_aggregate_address_origin(): test_func = functools.partial(_bgp_aggregate_address_has_metric, router) success, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) - assert result is None, 'Failed to see applied ORIGIN (igp) for aggregated prefix in "{}"'.format(router) + assert ( + result is None + ), 'Failed to see applied ORIGIN (igp) for aggregated prefix in "{}"'.format(router) + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py b/tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py index d6753e9b23..9c06c9d382 100644 --- a/tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py +++ b/tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py @@ -42,7 +42,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -50,16 +50,18 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -69,51 +71,41 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_maximum_prefix_invalid(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r2'] + router = tgen.gears["r2"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) expected = { - '192.168.255.1': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 3 - } - } + "192.168.255.1": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 3}}, } } return topotest.json_cmp(output, expected) def _bgp_aggregate_address_has_metric(router): output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.0/24 json")) - expected = { - 'paths': [ - { - 'med': 123 - } - ] - } + expected = {"paths": [{"metric": 123}]} return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_converge, router) @@ -124,8 +116,11 @@ def test_bgp_maximum_prefix_invalid(): test_func = functools.partial(_bgp_aggregate_address_has_metric, router) success, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) - assert result is None, 'Failed to see applied metric for aggregated prefix in "{}"'.format(router) + assert ( + result is None + ), 'Failed to see applied metric for aggregated prefix in "{}"'.format(router) + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_as_allow_in/bgp_as_allow_in.json b/tests/topotests/bgp_as_allow_in/bgp_as_allow_in.json new file mode 100644 index 0000000000..943876cdac --- /dev/null +++ b/tests/topotests/bgp_as_allow_in/bgp_as_allow_in.json @@ -0,0 +1,266 @@ +{ + "address_types": [ + "ipv4", + "ipv6" + ], + "ipv4base": "10.0.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 30, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:db8:f::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + } + } + } + } + } + } + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + } + } + } + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r5": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r5": { + "dest_link": { + "r3": {} + } + }, + "r2": { + "dest_link": { + "r3": {} + } + }, + "r4": { + "dest_link": { + "r3": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r5": { + "dest_link": { + "r3": {} + } + }, + "r2": { + "dest_link": { + "r3": {} + } + }, + "r4": { + "dest_link": { + "r3": {} + } + } + } + } + } + } + } + }, + "r4": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r4": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r4": {} + } + } + } + } + } + } + } + }, + "r5": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "500", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r5": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r5": {} + } + } + } + } + } + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py b/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py new file mode 100755 index 0000000000..89b15c46d3 --- /dev/null +++ b/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py @@ -0,0 +1,975 @@ +#!/usr/bin/python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Following tests are covered to test bgp allowas-in functionality: + +- Verify that routes coming from same AS are accepted only when + '"allowas-in" is configuerd. +- Verify that "allowas-in" feature works per address-family/VRF + 'basis and doesn't impact the other AFIs. +- Verify that the if number of occurrences of AS number in path is + 'more than the configured allowas-in value then we do not accept + 'the route. +- Verify that when we advertise a network, learned from the same AS + 'via allowas-in command, to an iBGP neighbor we see multiple + 'occurrences. +- Verify that when we advertise a network, learned from the same AS + 'via allowas-in command, to an eBGP neighbor we see multiple + 'occurrences of our own AS based on configured value+1. +""" + +import os +import sys +import time +import json +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + create_route_maps, + check_address_types, + step, +) +from lib.topolog import logger +from lib.bgp import ( + verify_bgp_convergence, + create_router_bgp, + clear_bgp_and_verify, + verify_bgp_rib, +) +from lib.topojson import build_topo_from_json, build_config_from_json + +# Reading the data from JSON File for topology creation +jsonFile = "{}/bgp_as_allow_in.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +# Global variables +BGP_CONVERGENCE = False +ADDR_TYPES = check_address_types() +NETWORK = {"ipv4": "2.2.2.2/32", "ipv6": "22:22::2/128"} +NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"} + + +class BGPALLOWASIN(Topo): + """ + Test BGPALLOWASIN - topology 1 + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(BGPALLOWASIN, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Checking BGP convergence + global BGP_CONVERGENCE + global ADDR_TYPES + + # Api call verify whether BGP is converged + BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format( + BGP_CONVERGENCE + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# +# Tests starting +# +##################################################### + + +def test_bgp_allowas_in_p0(request): + """ + Verify that routes coming from same AS are accepted only when + "allowas-in" is configuerd. + + """ + + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + reset_config_on_routers(tgen) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Advertise prefix 2.2.2.2/32 from Router-1(AS-200).") + step("Advertise an ipv6 prefix 22:22::2/128 from Router-1(AS-200).") + # configure static routes + dut = "r3" + protocol = "bgp" + + for addr_type in ADDR_TYPES: + # Enable static routes + input_dict_4 = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure redistribute static in Router BGP in R1") + + input_dict_2 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + step( + 'Check BGP table of router R3 using "sh bgp ipv4" and "sh bgp ' + 'ipv6" command.' + ) + step( + "We should not see prefix advertised from R1 in R3's BGP " + "table without allowas-in." + ) + logger.info("Verifying %s routes on r3, route should not be present", addr_type) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=NEXT_HOP_IP[addr_type], + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n" + "Expected behavior: routes should not present in rib \n" + "Error: {}".format(tc_name, result) + + step("Configure allowas-in on R3 for R2.") + step("We should see the prefix advertised from R1 in R3's BGP table.") + # Api call to enable allowas-in in bgp process. + input_dict_1 = { + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": { + "allowas-in": {"number_occurences": 1} + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +def test_bgp_allowas_in_per_addr_family_p0(request): + """ + Verify that "allowas-in" feature works per address-family/VRF + basis and doesn't impact the other AFIs. + + """ + + # This test is applicable only for dual stack. + if "ipv4" not in ADDR_TYPES or "ipv6" not in ADDR_TYPES: + pytest.skip("NOT APPLICABLE") + + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + reset_config_on_routers(tgen) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Advertise prefix 2.2.2.2/32 from Router-1(AS-200).") + step("Advertise an ipv6 prefix 22:22::2/128 from Router-1(AS-200).") + # configure static routes routes + dut = "r3" + protocol = "bgp" + + for addr_type in ADDR_TYPES: + # Enable static routes + input_dict_4 = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure redistribute static in Router BGP in R1") + + input_dict_2 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure allowas-in on R3 for R2 under IPv4 addr-family only") + # Api call to enable allowas-in in bgp process. + input_dict_1 = { + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": {"allowas-in": {"number_occurences": 1}} + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + static_route_ipv4 = { + "r1": { + "static_routes": [ + {"network": NETWORK["ipv4"], "next_hop": NEXT_HOP_IP["ipv4"]} + ] + } + } + + static_route_ipv6 = { + "r1": { + "static_routes": [ + {"network": NETWORK["ipv6"], "next_hop": NEXT_HOP_IP["ipv6"]} + ] + } + } + step("We should see R1 advertised prefix only in IPv4 AFI " "not in IPv6 AFI.") + result = verify_rib(tgen, "ipv4", dut, static_route_ipv4, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + result = verify_rib( + tgen, "ipv6", dut, static_route_ipv6, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \n" + "Expected behavior: routes are should not be present in ipv6 rib\n" + " Error: {}".format(tc_name, result) + + step("Repeat the same test for IPv6 AFI.") + step("Configure allowas-in on R3 for R2 under IPv6 addr-family only") + # Api call to enable allowas-in in bgp process. + input_dict_1 = { + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": { + "allowas-in": { + "number_occurences": 2, + "delete": True, + } + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": {"allowas-in": {"number_occurences": 2}} + } + } + } + } + }, + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + step("We should see R1 advertised prefix only in IPv6 AFI " "not in IPv4 AFI.") + result = verify_rib( + tgen, "ipv4", dut, static_route_ipv4, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \n" + "Expected behavior: routes should not be present in ipv4 rib\n" + " Error: {}".format(tc_name, result) + result = verify_rib(tgen, "ipv6", dut, static_route_ipv6, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_bgp_allowas_in_no_of_occurrences_p0(request): + """ + Verify that the if number of occurrences of AS number in path is + more than the configured allowas-in value then we do not accept + the route. + + """ + + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + reset_config_on_routers(tgen) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + dut = "r3" + protocol = "bgp" + + for addr_type in ADDR_TYPES: + # Enable static routes + static_routes = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, static_routes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure redistribute static in Router BGP in R1") + + input_dict_2 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure a route-map on R1 to prepend AS 4 times.") + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r1": { + "route_maps": { + "ASP_{}".format(addr_type): [ + { + "action": "permit", + "set": { + "path": { + "as_num": "200 200 200 200", + "as_action": "prepend", + } + }, + } + ] + } + } + } + result = create_route_maps(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure route map in out direction on R1") + # Configure neighbor for route map + input_dict_7 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": { + "route_maps": [ + { + "name": "ASP_{}".format( + addr_type + ), + "direction": "out", + } + ] + } + } + } + } + } + } + } + } + } + } + + result = create_router_bgp(tgen, topo, input_dict_7) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + for addr_type in ADDR_TYPES: + step('Configure "allowas-in 4" on R3 for R2.') + # Api call to enable allowas-in in bgp process. + input_dict_1 = { + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": { + "allowas-in": {"number_occurences": 4} + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + result = verify_rib( + tgen, addr_type, dut, static_routes, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \n " + "Expected behavior: routes are should not be present in rib\n" + "Error: {}".format(tc_name, result) + + for addr_type in ADDR_TYPES: + step('Configure "allowas-in 5" on R3 for R2.') + input_dict_1 = { + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": { + "allowas-in": {"number_occurences": 5} + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + static_routes = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + result = verify_rib(tgen, addr_type, dut, static_routes, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +def test_bgp_allowas_in_sameastoibgp_p1(request): + """ + Verify that when we advertise a network, learned from the same AS + via allowas-in command, to an iBGP neighbor we see multiple + occurrences. + + """ + + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + reset_config_on_routers(tgen) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + dut = "r3" + protocol = "bgp" + + for addr_type in ADDR_TYPES: + # Enable static routes + static_routes = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, static_routes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure redistribute static in Router BGP in R1") + + input_dict_2 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure a route-map on R2 to prepend AS 2 times.") + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "route_maps": { + "ASP_{}".format(addr_type): [ + { + "action": "permit", + "set": { + "path": {"as_num": "200 200", "as_action": "prepend"} + }, + } + ] + } + } + } + result = create_route_maps(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure route map in out direction on R2") + # Configure neighbor for route map + input_dict_7 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2": { + "route_maps": [ + { + "name": "ASP_{}".format( + addr_type + ), + "direction": "out", + } + ] + } + } + } + } + } + } + } + } + } + } + + result = create_router_bgp(tgen, topo, input_dict_7) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step('Configure "allowas-in 3" on R3 for R1.') + input_dict_1 = { + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": { + "allowas-in": {"number_occurences": 3} + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + input_dict_1 = { + "r4": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r4": { + "allowas-in": {"number_occurences": 3} + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + static_routes = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + dut = "r4" + path = "100 200 200 200" + result = verify_bgp_rib(tgen, addr_type, dut, static_routes, aspath=path) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +def test_bgp_allowas_in_sameastoebgp_p1(request): + """ + Verify that when we advertise a network, learned from the same AS + via allowas-in command, to an eBGP neighbor we see multiple + occurrences of our own AS based on configured value+1. + + """ + + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + reset_config_on_routers(tgen) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + dut = "r3" + protocol = "bgp" + + for addr_type in ADDR_TYPES: + # Enable static routes + static_routes = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, static_routes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure redistribute static in Router BGP in R1") + + input_dict_2 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure a route-map on R2 to prepend AS 2 times.") + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "route_maps": { + "ASP_{}".format(addr_type): [ + { + "action": "permit", + "set": { + "path": {"as_num": "200 200", "as_action": "prepend"} + }, + } + ] + } + } + } + result = create_route_maps(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure route map in out direction on R2") + # Configure neighbor for route map + input_dict_7 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2": { + "route_maps": [ + { + "name": "ASP_{}".format( + addr_type + ), + "direction": "out", + } + ] + } + } + } + } + } + } + } + } + } + } + + result = create_router_bgp(tgen, topo, input_dict_7) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + for addr_type in ADDR_TYPES: + step('Configure "allowas-in 3" on R3 for R1.') + input_dict_1 = { + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": { + "allowas-in": {"number_occurences": 3} + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + static_routes = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + dut = "r5" + path = "200 100 200 200 200" + result = verify_bgp_rib(tgen, addr_type, dut, static_routes, aspath=path) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/__init__.py b/tests/topotests/bgp_as_wide_bgp_identifier/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_as_wide_bgp_identifier/__init__.py diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/r1/bgpd.conf b/tests/topotests/bgp_as_wide_bgp_identifier/r1/bgpd.conf new file mode 100644 index 0000000000..fc273ba7c6 --- /dev/null +++ b/tests/topotests/bgp_as_wide_bgp_identifier/r1/bgpd.conf @@ -0,0 +1,5 @@ +! exit1 +router bgp 65001 + bgp router-id 10.10.10.10 + neighbor 192.168.255.1 remote-as 65002 +! diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/r1/zebra.conf b/tests/topotests/bgp_as_wide_bgp_identifier/r1/zebra.conf new file mode 100644 index 0000000000..c060e1402e --- /dev/null +++ b/tests/topotests/bgp_as_wide_bgp_identifier/r1/zebra.conf @@ -0,0 +1,6 @@ +! exit1 +interface r1-eth0 + ip address 192.168.255.2/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/r2/bgpd.conf b/tests/topotests/bgp_as_wide_bgp_identifier/r2/bgpd.conf new file mode 100644 index 0000000000..1134d98b38 --- /dev/null +++ b/tests/topotests/bgp_as_wide_bgp_identifier/r2/bgpd.conf @@ -0,0 +1,6 @@ +! spine +router bgp 65002 + bgp router-id 10.10.10.10 + neighbor 192.168.255.2 remote-as 65001 + neighbor 192.168.255.3 remote-as 65002 +! diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/r2/zebra.conf b/tests/topotests/bgp_as_wide_bgp_identifier/r2/zebra.conf new file mode 100644 index 0000000000..a45520f97f --- /dev/null +++ b/tests/topotests/bgp_as_wide_bgp_identifier/r2/zebra.conf @@ -0,0 +1,6 @@ +! spine +interface r2-eth0 + ip address 192.168.255.1/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/r3/bgpd.conf b/tests/topotests/bgp_as_wide_bgp_identifier/r3/bgpd.conf new file mode 100644 index 0000000000..fa943d10c3 --- /dev/null +++ b/tests/topotests/bgp_as_wide_bgp_identifier/r3/bgpd.conf @@ -0,0 +1,5 @@ +! exit2 +router bgp 65002 + bgp router-id 10.10.10.10 + neighbor 192.168.255.1 remote-as 65002 +! diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/r3/zebra.conf b/tests/topotests/bgp_as_wide_bgp_identifier/r3/zebra.conf new file mode 100644 index 0000000000..2f4dbc5efd --- /dev/null +++ b/tests/topotests/bgp_as_wide_bgp_identifier/r3/zebra.conf @@ -0,0 +1,6 @@ +! exit2 +interface r3-eth0 + ip address 192.168.255.3/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/test_bgp_as_wide_bgp_identifier.py b/tests/topotests/bgp_as_wide_bgp_identifier/test_bgp_as_wide_bgp_identifier.py new file mode 100644 index 0000000000..ebd6075b52 --- /dev/null +++ b/tests/topotests/bgp_as_wide_bgp_identifier/test_bgp_as_wide_bgp_identifier.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python + +# +# test_bgp_as_wide_bgp_identifier.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2020 by +# Donatas Abraitis <donatas.abraitis@gmail.com> +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +rfc6286: Autonomous-System-Wide Unique BGP Identifier for BGP-4 +Test if 'Bad BGP Identifier' notification is sent only to +internal peers (autonomous-system-wide). eBGP peers are not +affected and should work. +""" + +import os +import sys +import json +import time +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from mininet.topo import Topo + + +class TemplateTopo(Topo): + def build(self, *_args, **_opts): + tgen = get_topogen(self) + + for routern in range(1, 4): + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) + + +def setup_module(mod): + tgen = Topogen(TemplateTopo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for i, (rname, router) in enumerate(router_list.iteritems(), 1): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_as_wide_bgp_identifier(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + def _bgp_converge(router): + output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) + expected = {"192.168.255.1": {"bgpState": "Established"}} + return topotest.json_cmp(output, expected) + + def _bgp_failed(router): + output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) + expected = { + "192.168.255.1": { + "lastNotificationReason": "OPEN Message Error/Bad BGP Identifier" + } + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_converge, tgen.gears["r1"]) + success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + + assert result is None, 'Failed to converge: "{}"'.format(tgen.gears["r1"]) + + test_func = functools.partial(_bgp_failed, tgen.gears["r3"]) + success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + + assert result is None, 'Bad BGP Identifier notification not sent: "{}"'.format( + tgen.gears["r3"] + ) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py b/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py index ed350ebfeb..314ad12a6d 100644 --- a/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py +++ b/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py @@ -37,7 +37,7 @@ import time import pytest CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -45,16 +45,18 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -64,20 +66,20 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_maximum_prefix_invalid(): tgen = get_topogen() @@ -86,20 +88,30 @@ def test_bgp_maximum_prefix_invalid(): def _bgp_converge(router): while True: - output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) - if output['192.168.255.1']['bgpState'] == 'Established': - if output['192.168.255.1']['addressFamilyInfo']['ipv4Unicast']['acceptedPrefixCounter'] == 2: + output = json.loads( + tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json") + ) + if output["192.168.255.1"]["bgpState"] == "Established": + if ( + output["192.168.255.1"]["addressFamilyInfo"]["ipv4Unicast"][ + "acceptedPrefixCounter" + ] + == 2 + ): return True def _bgp_comm_list_delete(router): - output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp 172.16.255.254/32 json")) - if '333:333' in output['paths'][0]['community']['list']: + output = json.loads( + tgen.gears[router].vtysh_cmd("show ip bgp 172.16.255.254/32 json") + ) + if "333:333" in output["paths"][0]["community"]["list"]: return False return True - if _bgp_converge('r2'): - assert _bgp_comm_list_delete('r2') == True + if _bgp_converge("r2"): + assert _bgp_comm_list_delete("r2") == True + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_communities_topo1/bgp_communities.json b/tests/topotests/bgp_communities_topo1/bgp_communities.json new file mode 100644 index 0000000000..da6aec239f --- /dev/null +++ b/tests/topotests/bgp_communities_topo1/bgp_communities.json @@ -0,0 +1,175 @@ +{ + "address_types": [ + "ipv4", + "ipv6" + ], + "ipv4base": "10.0.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 30, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:db8:f::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r0": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + } + } + } + } + } + } + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + } + } + } + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "300", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": {} + } + } + } + } + } + } + } + }, + "r0": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto" + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/bgp_communities_topo1/test_bgp_communities.py b/tests/topotests/bgp_communities_topo1/test_bgp_communities.py new file mode 100644 index 0000000000..7d960d6916 --- /dev/null +++ b/tests/topotests/bgp_communities_topo1/test_bgp_communities.py @@ -0,0 +1,635 @@ +#!/usr/bin/python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Following tests are covered to test bgp community functionality: +- Verify routes are not advertised when NO-ADVERTISE Community is applied + +""" + +import os +import sys +import time +import json +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + check_address_types, + step, + create_route_maps, + create_prefix_lists, + create_route_maps, +) +from lib.topolog import logger +from lib.bgp import ( + verify_bgp_convergence, + create_router_bgp, + clear_bgp_and_verify, + verify_bgp_rib, +) +from lib.topojson import build_topo_from_json, build_config_from_json +from copy import deepcopy + +# Reading the data from JSON File for topology creation +jsonFile = "{}/bgp_communities.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +# Global variables +BGP_CONVERGENCE = False +ADDR_TYPES = check_address_types() +NETWORK = {"ipv4": "2.2.2.2/32", "ipv6": "22:22::2/128"} +NEXT_HOP_IP = {} + + +class BGPCOMMUNITIES(Topo): + """ + Test BGPCOMMUNITIES - topology 1 + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(BGPCOMMUNITIES, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Checking BGP convergence + global BGP_CONVERGENCE + global ADDR_TYPES + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Api call verify whether BGP is converged + BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format( + BGP_CONVERGENCE + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# +# Tests starting +# +##################################################### + + +def test_bgp_no_advertise_community_p0(request): + """ + Verify routes are not advertised when NO-ADVERTISE Community is applied + + """ + + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + reset_config_on_routers(tgen) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + NEXT_HOP_IP = { + "ipv4": topo["routers"]["r0"]["links"]["r1"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0], + } + + # configure static routes + dut = "r3" + protocol = "bgp" + + for addr_type in ADDR_TYPES: + # Enable static routes + input_dict = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure redistribute static and connected in Router BGP " "in R1") + + input_dict_2 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "redistribute": [ + {"redist_type": "static"}, + {"redist_type": "connected"}, + ] + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "BGP neighbors are up, static and connected route advertised from" + " R1 are present on R2 BGP table and RIB using show ip bgp and " + " show ip route" + ) + step( + "Static and connected route advertised from R1 are present on R3" + " BGP table and RIB using show ip bgp and show ip route" + ) + + dut = "r3" + protocol = "bgp" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure prefix list P1 on R2 to permit route coming from R1") + # Create ip prefix list + input_dict_2 = { + "r2": { + "prefix_lists": { + addr_type: { + "pf_list_1_{}".format(addr_type): [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + } + } + } + } + result = create_prefix_lists(tgen, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + # Create route map + input_dict_3 = { + "r2": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: {"prefix_lists": "pf_list_1_" + addr_type} + }, + "set": {"community": {"num": "no-advertise"}}, + } + ] + } + } + } + result = create_route_maps(tgen, input_dict_3) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + step( + "Apply route-map RM1 on R2, R2 to R3 BGP neighbor with no" + " advertise community" + ) + # Configure neighbor for route map + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": { + "route_maps": [ + { + "name": "rmap_match_pf_1_" + + addr_type, + "direction": "in", + } + ] + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After advertising no advertise community to BGP neighbor " + "static and connected router got removed from R3 verify using " + "show ip bgp & show ip route" + ) + + result = verify_bgp_rib(tgen, addr_type, dut, input_dict, expected=False) + assert result is not True, "Testcase {} : Failed \n " + " Routes still present in R3 router. Error: {}".format(tc_name, result) + + result = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \n " + " Routes still present in R3 router. Error: {}".format(tc_name, result) + + step("Remove and Add no advertise community") + # Configure neighbor for route map + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": { + "route_maps": [ + { + "name": "rmap_match_pf_1_" + + addr_type, + "direction": "in", + "delete": True, + } + ] + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing no advertise community from BGP neighbor " + "static and connected router got advertised to R3 and " + "removing route-map, verify route using show ip bgp" + " and show ip route" + ) + + result = verify_bgp_rib(tgen, addr_type, dut, input_dict) + assert result is True, "Testcase {} : Failed \n " + " Routes still present in R3 router. Error: {}".format(tc_name, result) + + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n " + " Routes still present in R3 router. Error: {}".format(tc_name, result) + + step("Repeat above steps when IBGP nbr configured between R1, R2 & R2, R3") + topo1 = deepcopy(topo) + + topo1["routers"]["r1"]["bgp"]["local_as"] = "100" + topo1["routers"]["r2"]["bgp"]["local_as"] = "100" + topo1["routers"]["r3"]["bgp"]["local_as"] = "100" + + for rtr in ["r1", "r2", "r3"]: + if "bgp" in topo1["routers"][rtr].keys(): + delete_bgp = {rtr: {"bgp": {"delete": True}}} + result = create_router_bgp(tgen, topo1, delete_bgp) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + config_bgp = { + rtr: {"bgp": {"local_as": topo1["routers"][rtr]["bgp"]["local_as"]}} + } + result = create_router_bgp(tgen, topo1, config_bgp) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + build_config_from_json(tgen, topo1, save_bkup=False) + + step("verify bgp convergence before starting test case") + + bgp_convergence = verify_bgp_convergence(tgen, topo1) + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + # configure static routes + dut = "r3" + protocol = "bgp" + + for addr_type in ADDR_TYPES: + # Enable static routes + input_dict = { + "r1": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]} + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure redistribute static and connected in Router " "BGP in R1") + + input_dict_2 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "redistribute": [ + {"redist_type": "static"}, + {"redist_type": "connected"}, + ] + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "BGP neighbors are up, static and connected route advertised from" + " R1 are present on R2 BGP table and RIB using show ip bgp and " + " show ip route" + ) + step( + "Static and connected route advertised from R1 are present on R3" + " BGP table and RIB using show ip bgp and show ip route" + ) + + dut = "r2" + protocol = "bgp" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure prefix list P1 on R2 to permit route coming from R1") + # Create ip prefix list + input_dict_2 = { + "r2": { + "prefix_lists": { + addr_type: { + "pf_list_1_{}".format(addr_type): [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + } + } + } + } + result = create_prefix_lists(tgen, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + # Create route map + input_dict_3 = { + "r2": { + "route_maps": { + "rmap_match_pf_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": "5", + "match": { + addr_type: {"prefix_lists": "pf_list_1_" + addr_type} + }, + "set": {"community": {"num": "no-advertise"}}, + } + ] + } + } + } + result = create_route_maps(tgen, input_dict_3) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + step( + "Apply route-map RM1 on R2, R2 to R3 BGP neighbor with no" + " advertise community" + ) + + # Configure neighbor for route map + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": { + "route_maps": [ + { + "name": "rmap_match_pf_1_" + + addr_type, + "direction": "in", + } + ] + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After advertising no advertise community to BGP neighbor " + "static and connected router got removed from R3 verify using " + "show ip bgp & show ip route" + ) + + result = verify_bgp_rib(tgen, addr_type, dut, input_dict) + assert result is True, "Testcase {} : Failed \n " + " Routes still present in R3 router. Error: {}".format(tc_name, result) + + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n " + " Routes still present in R3 router. Error: {}".format(tc_name, result) + + step("Remove and Add no advertise community") + # Configure neighbor for route map + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": { + "route_maps": [ + { + "name": "rmap_match_pf_1_" + + addr_type, + "direction": "in", + "delete": True, + } + ] + } + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing no advertise community from BGP neighbor " + "static and connected router got advertised to R3 and " + "removing route verify using show ip bgp and " + " show ip route" + ) + + result = verify_bgp_rib(tgen, addr_type, dut, input_dict) + assert result is True, "Testcase {} : Failed \n " + " Routes still present in R3 router. Error: {}".format(tc_name, result) + + result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n " + " Routes still present in R3 router. Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_default-route_route-map/test_bgp_default-originate_route-map.py b/tests/topotests/bgp_default-route_route-map/test_bgp_default-originate_route-map.py index 992ee85ab1..ba9a6dffb5 100644 --- a/tests/topotests/bgp_default-route_route-map/test_bgp_default-originate_route-map.py +++ b/tests/topotests/bgp_default-route_route-map/test_bgp_default-originate_route-map.py @@ -42,7 +42,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -50,16 +50,18 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -69,51 +71,41 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_default_originate_route_map(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r2'] + router = tgen.gears["r2"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) expected = { - '192.168.255.1': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 1 - } - } + "192.168.255.1": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 1}}, } } return topotest.json_cmp(output, expected) def _bgp_default_route_has_metric(router): output = json.loads(router.vtysh_cmd("show ip bgp 0.0.0.0/0 json")) - expected = { - 'paths': [ - { - 'med': 123 - } - ] - } + expected = {"paths": [{"metric": 123}]} return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_converge, router) @@ -124,8 +116,11 @@ def test_bgp_default_originate_route_map(): test_func = functools.partial(_bgp_default_route_has_metric, router) success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert result is None, 'Failed to see applied metric for default route in "{}"'.format(router) + assert ( + result is None + ), 'Failed to see applied metric for default route in "{}"'.format(router) + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_distance_change/test_bgp_distance_change.py b/tests/topotests/bgp_distance_change/test_bgp_distance_change.py index cf95aec098..6d09cd2e8c 100644 --- a/tests/topotests/bgp_distance_change/test_bgp_distance_change.py +++ b/tests/topotests/bgp_distance_change/test_bgp_distance_change.py @@ -41,7 +41,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -49,16 +49,18 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -68,60 +70,51 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_maximum_prefix_invalid(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r1'] + router = tgen.gears["r1"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.2 json")) expected = { - '192.168.255.2': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 2 - } - } + "192.168.255.2": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}}, } } return topotest.json_cmp(output, expected) def _bgp_distance_change(router): - router.vtysh_cmd(""" + router.vtysh_cmd( + """ configure terminal router bgp 65000 address-family ipv4 unicast distance bgp 123 123 123 - """) + """ + ) def _bgp_check_distance_change(router): output = json.loads(router.vtysh_cmd("show ip route 172.16.255.254/32 json")) - expected = { - '172.16.255.254/32': [ - { - 'protocol': 'bgp', - 'distance': 123 - } - ] - } + expected = {"172.16.255.254/32": [{"protocol": "bgp", "distance": 123}]} return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_converge, router) @@ -134,8 +127,11 @@ def test_bgp_maximum_prefix_invalid(): test_func = functools.partial(_bgp_check_distance_change, router) success, result = topotest.run_and_expect(test_func, None, count=15, wait=0.5) - assert result is None, 'Failed to see applied BGP distance in RIB "{}"'.format(router) + assert result is None, 'Failed to see applied BGP distance in RIB "{}"'.format( + router + ) + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py b/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py index 6660b4e866..bdacff3a9c 100644 --- a/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py +++ b/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py @@ -37,7 +37,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -45,24 +45,26 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 7): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r4"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r3']) - switch.add_link(tgen.gears['r4']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r5"]) + switch.add_link(tgen.gears["r6"]) - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r5']) - switch.add_link(tgen.gears['r6']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -72,20 +74,20 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_ebgp_requires_policy(): tgen = get_topogen() @@ -93,51 +95,46 @@ def test_ebgp_requires_policy(): pytest.skip(tgen.errors) def _bgp_converge(router): - output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) - expected = { - '192.168.255.1': { - 'bgpState': 'Established' - } - } + output = json.loads( + tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json") + ) + expected = {"192.168.255.1": {"bgpState": "Established"}} return topotest.json_cmp(output, expected) def _bgp_has_routes(router): - output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 routes json")) - expected = { - 'routes': { - '172.16.255.254/32': [ - { - 'valid': True - } - ] - } - } + output = json.loads( + tgen.gears[router].vtysh_cmd( + "show ip bgp neighbor 192.168.255.1 routes json" + ) + ) + expected = {"routes": {"172.16.255.254/32": [{"valid": True}]}} return topotest.json_cmp(output, expected) - test_func = functools.partial(_bgp_converge, 'r2') + test_func = functools.partial(_bgp_converge, "r2") success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert success is True, 'Failed bgp convergence (r2) in "{}"'.format(router) - test_func = functools.partial(_bgp_has_routes, 'r2') + test_func = functools.partial(_bgp_has_routes, "r2") success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert success is True, 'eBGP policy is not working (r2) in "{}"'.format(router) - test_func = functools.partial(_bgp_converge, 'r4') + test_func = functools.partial(_bgp_converge, "r4") success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert success is True, 'Failed bgp convergence (r4) in "{}"'.format(router) - test_func = functools.partial(_bgp_has_routes, 'r4') + test_func = functools.partial(_bgp_has_routes, "r4") success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert success is False, 'eBGP policy is not working (r4) in "{}"'.format(router) - test_func = functools.partial(_bgp_converge, 'r6') + test_func = functools.partial(_bgp_converge, "r6") success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert success is True, 'Failed bgp convergence (r6) in "{}"'.format(router) - test_func = functools.partial(_bgp_has_routes, 'r6') + test_func = functools.partial(_bgp_has_routes, "r6") success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert success is True, 'eBGP policy is not working (r6) in "{}"'.format(router) -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_instance_del_test/test_bgp_instance_del_test.py b/tests/topotests/bgp_instance_del_test/test_bgp_instance_del_test.py index 115c7793ad..47cc0eb39d 100755 --- a/tests/topotests/bgp_instance_del_test/test_bgp_instance_del_test.py +++ b/tests/topotests/bgp_instance_del_test/test_bgp_instance_del_test.py @@ -25,65 +25,72 @@ import os import sys import pytest -sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')) +sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")) from lib.ltemplate import * + def test_check_linux_vrf(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/check_linux_vrf.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/check_linux_vrf.py", False, CliOnFail, CheckFunc) + def test_adjacencies(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' - ltemplateTest('scripts/adjacencies.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' + ltemplateTest("scripts/adjacencies.py", False, CliOnFail, CheckFunc) + def SKIP_test_add_routes(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' - ltemplateTest('scripts/add_routes.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' + ltemplateTest("scripts/add_routes.py", False, CliOnFail, CheckFunc) + def test_check_routes(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' - ltemplateTest('scripts/check_routes.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' + ltemplateTest("scripts/check_routes.py", False, CliOnFail, CheckFunc) -#manual data path setup test - remove once have bgp/zebra vrf path working + +# manual data path setup test - remove once have bgp/zebra vrf path working def test_check_linux_mpls(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/check_linux_mpls.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/check_linux_mpls.py", False, CliOnFail, CheckFunc) + def test_del_bgp_instances(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' - ltemplateTest('scripts/del_bgp_instances.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' + ltemplateTest("scripts/del_bgp_instances.py", False, CliOnFail, CheckFunc) + -if __name__ == '__main__': +if __name__ == "__main__": retval = pytest.main(["-s"]) sys.exit(retval) diff --git a/tests/topotests/bgp_ipv6_rtadv/test_bgp_ipv6_rtadv.py b/tests/topotests/bgp_ipv6_rtadv/test_bgp_ipv6_rtadv.py index 6cf223af42..10b2f3595f 100644 --- a/tests/topotests/bgp_ipv6_rtadv/test_bgp_ipv6_rtadv.py +++ b/tests/topotests/bgp_ipv6_rtadv/test_bgp_ipv6_rtadv.py @@ -34,7 +34,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -48,17 +48,19 @@ from mininet.topo import Topo class BGPIPV6RTADVTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 2 routers. - tgen.add_router('r1') - tgen.add_router('r2') + tgen.add_router("r1") + tgen.add_router("r2") + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): "Sets up the pytest environment" @@ -69,17 +71,16 @@ def setup_module(mod): for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) # Initialize all routers. tgen.start_router() + def teardown_module(_mod): "Teardown the pytest environment" tgen = get_topogen() @@ -99,44 +100,51 @@ def test_protocols_convergence(): # Check IPv4 routing tables. logger.info("Checking IPv4 routes for convergence") for router in tgen.routers().values(): - json_file = '{}/{}/ipv4_routes.json'.format(CWD, router.name) + json_file = "{}/{}/ipv4_routes.json".format(CWD, router.name) if not os.path.isfile(json_file): - logger.info('skipping file {}'.format(json_file)) + logger.info("skipping file {}".format(json_file)) continue expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ip route json'.format(router.name), expected) - _, result = topotest.run_and_expect(test_func, None, count=160, - wait=0.5) + test_func = partial( + topotest.router_json_cmp, + router, + "show ip route json".format(router.name), + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg # Check IPv6 routing tables. logger.info("Checking IPv6 routes for convergence") for router in tgen.routers().values(): - json_file = '{}/{}/ipv6_routes.json'.format(CWD, router.name) + json_file = "{}/{}/ipv6_routes.json".format(CWD, router.name) if not os.path.isfile(json_file): - logger.info('skipping file {}'.format(json_file)) + logger.info("skipping file {}".format(json_file)) continue expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ipv6 route json'.format(router.name), expected) - _, result = topotest.run_and_expect(test_func, None, count=160, - wait=0.5) + test_func = partial( + topotest.router_json_cmp, + router, + "show ipv6 route json".format(router.name), + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg + def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/customize.py b/tests/topotests/bgp_l3vpn_to_bgp_direct/customize.py index b4649059bc..05db9ab14b 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/customize.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/customize.py @@ -88,12 +88,15 @@ from lib.ltemplate import ltemplateRtrCmd from mininet.topo import Topo import shutil + CWD = os.path.dirname(os.path.realpath(__file__)) # test name based on directory TEST = os.path.basename(CWD) + class ThisTestTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -102,68 +105,71 @@ class ThisTestTopo(Topo): # between routers, switches and hosts. # # Create P/PE routers - tgen.add_router('r1') - #check for mpls + tgen.add_router("r1") + # check for mpls if tgen.hasmpls != True: - logger.info('MPLS not available, tests will be skipped') + logger.info("MPLS not available, tests will be skipped") return for routern in range(2, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # Create CE routers for routern in range(1, 4): - tgen.add_router('ce{}'.format(routern)) + tgen.add_router("ce{}".format(routern)) - #CE/PE links - tgen.add_link(tgen.gears['ce1'], tgen.gears['r1'], 'ce1-eth0', 'r1-eth4') - tgen.add_link(tgen.gears['ce2'], tgen.gears['r3'], 'ce2-eth0', 'r3-eth4') - tgen.add_link(tgen.gears['ce3'], tgen.gears['r4'], 'ce3-eth0', 'r4-eth4') + # CE/PE links + tgen.add_link(tgen.gears["ce1"], tgen.gears["r1"], "ce1-eth0", "r1-eth4") + tgen.add_link(tgen.gears["ce2"], tgen.gears["r3"], "ce2-eth0", "r3-eth4") + tgen.add_link(tgen.gears["ce3"], tgen.gears["r4"], "ce3-eth0", "r4-eth4") # Create a switch with just one router connected to it to simulate a # empty network. switch = {} - switch[0] = tgen.add_switch('sw0') - switch[0].add_link(tgen.gears['r1'], nodeif='r1-eth0') - switch[0].add_link(tgen.gears['r2'], nodeif='r2-eth0') + switch[0] = tgen.add_switch("sw0") + switch[0].add_link(tgen.gears["r1"], nodeif="r1-eth0") + switch[0].add_link(tgen.gears["r2"], nodeif="r2-eth0") - switch[1] = tgen.add_switch('sw1') - switch[1].add_link(tgen.gears['r2'], nodeif='r2-eth1') - switch[1].add_link(tgen.gears['r3'], nodeif='r3-eth0') - switch[1].add_link(tgen.gears['r4'], nodeif='r4-eth0') + switch[1] = tgen.add_switch("sw1") + switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth1") + switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth0") + switch[1].add_link(tgen.gears["r4"], nodeif="r4-eth0") + + switch[1] = tgen.add_switch("sw2") + switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth2") + switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth1") - switch[1] = tgen.add_switch('sw2') - switch[1].add_link(tgen.gears['r2'], nodeif='r2-eth2') - switch[1].add_link(tgen.gears['r3'], nodeif='r3-eth1') def ltemplatePreRouterStartHook(): cc = ltemplateRtrCmd() tgen = get_topogen() - logger.info('pre router-start hook') - #check for mpls + logger.info("pre router-start hook") + # check for mpls if tgen.hasmpls != True: - logger.info('MPLS not available, skipping setup') + logger.info("MPLS not available, skipping setup") return False - #check for normal init + # check for normal init if len(tgen.net) == 1: - logger.info('Topology not configured, skipping setup') + logger.info("Topology not configured, skipping setup") return False - #configure r2 mpls interfaces - intfs = ['lo', 'r2-eth0', 'r2-eth1', 'r2-eth2'] + # configure r2 mpls interfaces + intfs = ["lo", "r2-eth0", "r2-eth1", "r2-eth2"] for intf in intfs: - cc.doCmd(tgen, 'r2', 'echo 1 > /proc/sys/net/mpls/conf/{}/input'.format(intf)) - #configure MPLS - rtrs = ['r1', 'r3', 'r4'] - cmds = ['echo 1 > /proc/sys/net/mpls/conf/lo/input'] + cc.doCmd(tgen, "r2", "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf)) + # configure MPLS + rtrs = ["r1", "r3", "r4"] + cmds = ["echo 1 > /proc/sys/net/mpls/conf/lo/input"] for rtr in rtrs: router = tgen.gears[rtr] for cmd in cmds: cc.doCmd(tgen, rtr, cmd) - intfs = ['lo', rtr+'-eth0', rtr+'-eth4'] + intfs = ["lo", rtr + "-eth0", rtr + "-eth4"] for intf in intfs: - cc.doCmd(tgen, rtr, 'echo 1 > /proc/sys/net/mpls/conf/{}/input'.format(intf)) - logger.info('setup mpls input') + cc.doCmd( + tgen, rtr, "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf) + ) + logger.info("setup mpls input") return True + def ltemplatePostRouterStartHook(): - logger.info('post router-start hook') + logger.info("post router-start hook") return True - diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/add_routes.py b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/add_routes.py index 3a24367a56..3f1157ad72 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/add_routes.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/add_routes.py @@ -1,51 +1,193 @@ from lutil import luCommand -luCommand('r1','vtysh -c "show bgp next"','99.0.0.. valid', 'wait', 'See CE static NH') -luCommand('r3','vtysh -c "show bgp next"','99.0.0.. valid', 'wait', 'See CE static NH') -luCommand('r4','vtysh -c "show bgp next"','99.0.0.. valid', 'wait', 'See CE static NH') -luCommand('r1','vtysh -c "show bgp ipv4 uni"','i5.*i5','wait','See CE routes') -luCommand('r3','vtysh -c "show bgp ipv4 uni"','i5.*i5','wait','See CE routes') -luCommand('r4','vtysh -c "show bgp ipv4 uni"','i5.*i5','wait','See CE routes') -luCommand('ce1','vtysh -c "show bgp ipv4 uni 5.1.0.0/24"','','none','See CE routes') -luCommand('r1','vtysh -c "show bgp ipv4 uni 5.1.0.0/24"','','none','See CE routes') -luCommand('ce2','vtysh -c "show bgp ipv4 uni 5.1.0.0/24"','','none','See CE routes') -luCommand('r3','vtysh -c "show bgp ipv4 uni 5.1.0.0/24"','','none','See CE routes') -luCommand('ce3','vtysh -c "show bgp ipv4 uni 5.1.2.0/24"','','none','See CE routes') -luCommand('r4','vtysh -c "show bgp ipv4 uni 5.1.2.0/24"','','none','See CE routes') -luCommand('r1','vtysh -c "add vrf cust1 prefix 99.0.0.1/32"','.','none','IP Address') -luCommand('r1','vtysh -c "show vnc registrations local"','99.0.0.1','wait','Local Registration') -luCommand('r1','vtysh -c "show vnc registrations imported"','2 out of 2 imported','wait','Imported Registrations') -luCommand('r3','vtysh -c "show bgp ipv4 vpn"','i99.0.0.1/32','wait','See R1s static address') -luCommand('r4','vtysh -c "show bgp ipv4 vpn"','i99.0.0.1/32','wait','See R1s static address') -luCommand('r3','vtysh -c "show bgp ipv4 vpn rd 10:1"','i5.*i5','wait','See R1s imports') -luCommand('r4','vtysh -c "show bgp ipv4 vpn rd 10:1"','i5.*i5','wait','See R1s imports') +luCommand( + "r1", 'vtysh -c "show bgp next"', "99.0.0.. valid", "wait", "See CE static NH" +) +luCommand( + "r3", 'vtysh -c "show bgp next"', "99.0.0.. valid", "wait", "See CE static NH" +) +luCommand( + "r4", 'vtysh -c "show bgp next"', "99.0.0.. valid", "wait", "See CE static NH" +) +luCommand("r1", 'vtysh -c "show bgp ipv4 uni"', "i5.*i5", "wait", "See CE routes") +luCommand("r3", 'vtysh -c "show bgp ipv4 uni"', "i5.*i5", "wait", "See CE routes") +luCommand("r4", 'vtysh -c "show bgp ipv4 uni"', "i5.*i5", "wait", "See CE routes") +luCommand("ce1", 'vtysh -c "show bgp ipv4 uni 5.1.0.0/24"', "", "none", "See CE routes") +luCommand("r1", 'vtysh -c "show bgp ipv4 uni 5.1.0.0/24"', "", "none", "See CE routes") +luCommand("ce2", 'vtysh -c "show bgp ipv4 uni 5.1.0.0/24"', "", "none", "See CE routes") +luCommand("r3", 'vtysh -c "show bgp ipv4 uni 5.1.0.0/24"', "", "none", "See CE routes") +luCommand("ce3", 'vtysh -c "show bgp ipv4 uni 5.1.2.0/24"', "", "none", "See CE routes") +luCommand("r4", 'vtysh -c "show bgp ipv4 uni 5.1.2.0/24"', "", "none", "See CE routes") -luCommand('r3','vtysh -c "add vrf cust1 prefix 99.0.0.2/32"','.','none','IP Address') -luCommand('r3','vtysh -c "show vnc registrations local"','99.0.0.2','wait','Local Registration') -have2ndImports = luCommand('r3','vtysh -c "show vnc registrations imported"','2 out of 2 imported','none','Imported Registrations',2) +luCommand( + "r1", 'vtysh -c "add vrf cust1 prefix 99.0.0.1/32"', ".", "none", "IP Address" +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations local"', + "99.0.0.1", + "wait", + "Local Registration", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations imported"', + "2 out of 2 imported", + "wait", + "Imported Registrations", +) +luCommand( + "r3", + 'vtysh -c "show bgp ipv4 vpn"', + "i99.0.0.1/32", + "wait", + "See R1s static address", +) +luCommand( + "r4", + 'vtysh -c "show bgp ipv4 vpn"', + "i99.0.0.1/32", + "wait", + "See R1s static address", +) +luCommand( + "r3", 'vtysh -c "show bgp ipv4 vpn rd 10:1"', "i5.*i5", "wait", "See R1s imports" +) +luCommand( + "r4", 'vtysh -c "show bgp ipv4 vpn rd 10:1"', "i5.*i5", "wait", "See R1s imports" +) + +luCommand( + "r3", 'vtysh -c "add vrf cust1 prefix 99.0.0.2/32"', ".", "none", "IP Address" +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations local"', + "99.0.0.2", + "wait", + "Local Registration", +) +have2ndImports = luCommand( + "r3", + 'vtysh -c "show vnc registrations imported"', + "2 out of 2 imported", + "none", + "Imported Registrations", + 2, +) if have2ndImports: - luCommand('r3','vtysh -c "show vnc registrations imported"','2 out of 2 imported','pass','Imported Registrations') -luCommand('r1','vtysh -c "show bgp ipv4 vpn"','i99.0.0.2/32','wait','See R3s static address') -luCommand('r4','vtysh -c "show bgp ipv4 vpn"','i99.0.0.2/32','wait','See R3s static address') + luCommand( + "r3", + 'vtysh -c "show vnc registrations imported"', + "2 out of 2 imported", + "pass", + "Imported Registrations", + ) +luCommand( + "r1", + 'vtysh -c "show bgp ipv4 vpn"', + "i99.0.0.2/32", + "wait", + "See R3s static address", +) +luCommand( + "r4", + 'vtysh -c "show bgp ipv4 vpn"', + "i99.0.0.2/32", + "wait", + "See R3s static address", +) if have2ndImports: - luCommand('r1','vtysh -c "show bgp ipv4 vpn rd 10:3"','i5.*i5','none','See R3s imports') - luCommand('r4','vtysh -c "show bgp ipv4 vpn rd 10:3"','i5.*i5','none','See R3s imports') + luCommand( + "r1", + 'vtysh -c "show bgp ipv4 vpn rd 10:3"', + "i5.*i5", + "none", + "See R3s imports", + ) + luCommand( + "r4", + 'vtysh -c "show bgp ipv4 vpn rd 10:3"', + "i5.*i5", + "none", + "See R3s imports", + ) -luCommand('r4','vtysh -c "add vrf cust1 prefix 99.0.0.3/32"','.','none','IP Address') -luCommand('r4','vtysh -c "show vnc registrations local"','99.0.0.3','wait','Local Registration') -luCommand('r4','vtysh -c "show vnc registrations imported"','2 out of 2 imported','wait','Imported Registrations') -luCommand('r1','vtysh -c "show bgp ipv4 vpn"','i99.0.0.3/32','wait','See R4s static address') -luCommand('r3','vtysh -c "show bgp ipv4 vpn"','i99.0.0.3/32','wait','See R4s static address') -luCommand('r1','vtysh -c "show bgp ipv4 vpn rd 10:4"','i5.*i5','wait','See R4s imports') -luCommand('r3','vtysh -c "show bgp ipv4 vpn rd 10:4"','i5.*i5','wait','See R4s imports') +luCommand( + "r4", 'vtysh -c "add vrf cust1 prefix 99.0.0.3/32"', ".", "none", "IP Address" +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations local"', + "99.0.0.3", + "wait", + "Local Registration", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations imported"', + "2 out of 2 imported", + "wait", + "Imported Registrations", +) +luCommand( + "r1", + 'vtysh -c "show bgp ipv4 vpn"', + "i99.0.0.3/32", + "wait", + "See R4s static address", +) +luCommand( + "r3", + 'vtysh -c "show bgp ipv4 vpn"', + "i99.0.0.3/32", + "wait", + "See R4s static address", +) +luCommand( + "r1", 'vtysh -c "show bgp ipv4 vpn rd 10:4"', "i5.*i5", "wait", "See R4s imports" +) +luCommand( + "r3", 'vtysh -c "show bgp ipv4 vpn rd 10:4"', "i5.*i5", "wait", "See R4s imports" +) -luCommand('r1','vtysh -c "show vnc registrations remote"','5.1.2.0/24 .*5.1.3.0/24','wait','R4s registrations') -luCommand('r3','vtysh -c "show vnc registrations remote"','5.1.2.0/24 .*5.1.3.0/24','wait','R4s registrations') +luCommand( + "r1", + 'vtysh -c "show vnc registrations remote"', + "5.1.2.0/24 .*5.1.3.0/24", + "wait", + "R4s registrations", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations remote"', + "5.1.2.0/24 .*5.1.3.0/24", + "wait", + "R4s registrations", +) if have2ndImports: - luCommand('r1','vtysh -c "show vnc registrations remote"','5.1.0.0/24 .*5.1.1.0/24','wait','Remote registrations') - luCommand('r3','vtysh -c "show vnc registrations remote"','5.1.0.0/24 .*5.1.1.0/24','wait','Remote registrations') -luCommand('r4','vtysh -c "show vnc registrations remote"','5.1.0.0/24 .*5.1.1.0/24','wait','Remote registrations') -luCommand('r1','vtysh -c "show vnc registrations"','.','none') -luCommand('r3','vtysh -c "show vnc registrations"','.','none') -luCommand('r4','vtysh -c "show vnc registrations"','.','none') + luCommand( + "r1", + 'vtysh -c "show vnc registrations remote"', + "5.1.0.0/24 .*5.1.1.0/24", + "wait", + "Remote registrations", + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations remote"', + "5.1.0.0/24 .*5.1.1.0/24", + "wait", + "Remote registrations", + ) +luCommand( + "r4", + 'vtysh -c "show vnc registrations remote"', + "5.1.0.0/24 .*5.1.1.0/24", + "wait", + "Remote registrations", +) +luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none") diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py index 1317a510d1..ea059c576e 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py @@ -1,20 +1,64 @@ from lutil import luCommand -luCommand('ce1','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping') -luCommand('ce2','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping') -luCommand('ce3','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping') -luCommand('ce1','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180) -luCommand('ce2','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180) -luCommand('ce3','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180) -luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180) -luCommand('r1','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180) -luCommand('r3','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180) -luCommand('r4','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180) -luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up') -luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up') -luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up') -luCommand('r1','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping') -luCommand('r1','ping 4.4.4.4 -c 1',' 0. packet loss','wait','PE->PE4 (loopback) ping') -luCommand('r4','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping') + +luCommand("ce1", "ping 192.168.1.1 -c 1", " 0. packet loss", "pass", "CE->PE ping") +luCommand("ce2", "ping 192.168.1.1 -c 1", " 0. packet loss", "pass", "CE->PE ping") +luCommand("ce3", "ping 192.168.1.1 -c 1", " 0. packet loss", "pass", "CE->PE ping") +luCommand("ce1", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180) +luCommand("ce2", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180) +luCommand("ce3", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180) +luCommand( + "r1", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r3", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r4", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r2", + 'vtysh -c "show bgp summary"', + " 00:0.* 00:0.* 00:0", + "wait", + "Core adjacencies up", + 180, +) +luCommand( + "r1", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180 +) +luCommand( + "r3", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180 +) +luCommand( + "r4", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180 +) +luCommand( + "r1", + 'vtysh -c "show bgp vrf all summary"', + " 00:0.* 00:0", + "pass", + "All adjacencies up", +) +luCommand( + "r3", + 'vtysh -c "show bgp vrf all summary"', + " 00:0.* 00:0", + "pass", + "All adjacencies up", +) +luCommand( + "r4", + 'vtysh -c "show bgp vrf all summary"', + " 00:0.* 00:0", + "pass", + "All adjacencies up", +) +luCommand( + "r1", "ping 3.3.3.3 -c 1", " 0. packet loss", "wait", "PE->PE3 (loopback) ping" +) +luCommand( + "r1", "ping 4.4.4.4 -c 1", " 0. packet loss", "wait", "PE->PE4 (loopback) ping" +) +luCommand( + "r4", "ping 3.3.3.3 -c 1", " 0. packet loss", "wait", "PE->PE3 (loopback) ping" +) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/check_routes.py b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/check_routes.py index 492be9e4da..96b4978261 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/check_routes.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/check_routes.py @@ -1,17 +1,55 @@ from lutil import luCommand -luCommand('ce1','vtysh -c "show bgp ipv4 uni"','7 routes and 7','wait','Local and remote routes') -luCommand('ce2','vtysh -c "show bgp ipv4 uni"','7 routes and 9','wait','Local and remote routes') -luCommand('ce3','vtysh -c "show bgp ipv4 uni"','7 routes and 7','wait','Local and remote routes') -luCommand('r1','vtysh -c "show bgp ipv4 uni"','7 routes and 9','pass','Unicast SAFI') -luCommand('r2','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Unicast SAFI') -luCommand('r3','vtysh -c "show bgp ipv4 uni"','7 routes and 9','pass','Unicast SAFI') -luCommand('r4','vtysh -c "show bgp ipv4 uni"','7 routes and 9','pass','Unicast SAFI') -have2ndImports = luCommand('r3','vtysh -c "show vnc registrations imported"','2 out of 2 imported','none','Imported Registrations',2) + +luCommand( + "ce1", + 'vtysh -c "show bgp ipv4 uni"', + "7 routes and 7", + "wait", + "Local and remote routes", +) +luCommand( + "ce2", + 'vtysh -c "show bgp ipv4 uni"', + "7 routes and 9", + "wait", + "Local and remote routes", +) +luCommand( + "ce3", + 'vtysh -c "show bgp ipv4 uni"', + "7 routes and 7", + "wait", + "Local and remote routes", +) +luCommand( + "r1", 'vtysh -c "show bgp ipv4 uni"', "7 routes and 9", "pass", "Unicast SAFI" +) +luCommand( + "r2", + 'vtysh -c "show bgp ipv4 uni"', + "No BGP prefixes displayed", + "pass", + "Unicast SAFI", +) +luCommand( + "r3", 'vtysh -c "show bgp ipv4 uni"', "7 routes and 9", "pass", "Unicast SAFI" +) +luCommand( + "r4", 'vtysh -c "show bgp ipv4 uni"', "7 routes and 9", "pass", "Unicast SAFI" +) +have2ndImports = luCommand( + "r3", + 'vtysh -c "show vnc registrations imported"', + "2 out of 2 imported", + "none", + "Imported Registrations", + 2, +) if have2ndImports: - num = '9 routes and 9' + num = "9 routes and 9" else: - num = '7 routes and 7' -luCommand('r1','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI') -luCommand('r2','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI') -luCommand('r3','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI') -luCommand('r4','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI') + num = "7 routes and 7" +luCommand("r1", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI") +luCommand("r2", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI") +luCommand("r3", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI") +luCommand("r4", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI") diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/cleanup_all.py b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/cleanup_all.py index 3a2f037833..9f21d99913 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/cleanup_all.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/cleanup_all.py @@ -1,17 +1,114 @@ from lutil import luCommand -luCommand('r1','vtysh -c "clear vrf cust1 prefix 99.0.0.1/32"','.','none','Cleared VRF route') -luCommand('r3','vtysh -c "clear vrf cust1 prefix 99.0.0.2/32"','.','none','Cleared VRF route') -luCommand('r4','vtysh -c "clear vrf cust1 prefix 99.0.0.3/32"','.','none','Cleared VRF route') -luCommand('r1','vtysh -c "show vnc registrations local"','99.0.0.1','fail','Local Registration cleared') -luCommand('r3','vtysh -c "show vnc registrations local"','99.0.0.2','fail','Local Registration cleared') -luCommand('r4','vtysh -c "show vnc registrations local"','99.0.0.3','fail','Local Registration cleared') -luCommand('r1','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated') -luCommand('r2','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Unicast SAFI') -luCommand('r3','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated') -luCommand('r4','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated') -luCommand('ce1','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes') -luCommand('ce2','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes') -luCommand('ce3','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes') -luCommand('r1','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared') -luCommand('r3','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared') -luCommand('r4','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared') + +luCommand( + "r1", + 'vtysh -c "clear vrf cust1 prefix 99.0.0.1/32"', + ".", + "none", + "Cleared VRF route", +) +luCommand( + "r3", + 'vtysh -c "clear vrf cust1 prefix 99.0.0.2/32"', + ".", + "none", + "Cleared VRF route", +) +luCommand( + "r4", + 'vtysh -c "clear vrf cust1 prefix 99.0.0.3/32"', + ".", + "none", + "Cleared VRF route", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations local"', + "99.0.0.1", + "fail", + "Local Registration cleared", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations local"', + "99.0.0.2", + "fail", + "Local Registration cleared", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations local"', + "99.0.0.3", + "fail", + "Local Registration cleared", +) +luCommand( + "r1", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Unicast SAFI updated", +) +luCommand( + "r2", + 'vtysh -c "show bgp ipv4 uni"', + "No BGP prefixes displayed", + "pass", + "Unicast SAFI", +) +luCommand( + "r3", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Unicast SAFI updated", +) +luCommand( + "r4", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Unicast SAFI updated", +) +luCommand( + "ce1", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Local and remote routes", +) +luCommand( + "ce2", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Local and remote routes", +) +luCommand( + "ce3", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Local and remote routes", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations remote"', + "Prefix ", + "fail", + "Remote Registration cleared", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations remote"', + "Prefix ", + "fail", + "Remote Registration cleared", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations remote"', + "Prefix ", + "fail", + "Remote Registration cleared", +) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/test_bgp_l3vpn_to_bgp_direct.py b/tests/topotests/bgp_l3vpn_to_bgp_direct/test_bgp_l3vpn_to_bgp_direct.py index f710c84c37..d226904102 100755 --- a/tests/topotests/bgp_l3vpn_to_bgp_direct/test_bgp_l3vpn_to_bgp_direct.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/test_bgp_l3vpn_to_bgp_direct.py @@ -25,46 +25,51 @@ import os import sys import pytest -sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')) +sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")) from lib.ltemplate import * + def test_adjacencies(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/adjacencies.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/adjacencies.py", False, CliOnFail, CheckFunc) + def test_add_routes(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/add_routes.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/add_routes.py", False, CliOnFail, CheckFunc) + def test_check_routes(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/check_routes.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/check_routes.py", False, CliOnFail, CheckFunc) + def test_cleanup_all(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/cleanup_all.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/cleanup_all.py", False, CliOnFail, CheckFunc) + -if __name__ == '__main__': +if __name__ == "__main__": retval = pytest.main(["-s"]) sys.exit(retval) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py index e62d139a0c..fb919f02d0 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py @@ -89,12 +89,15 @@ from lib.ltemplate import ltemplateRtrCmd from mininet.topo import Topo import shutil + CWD = os.path.dirname(os.path.realpath(__file__)) # test name based on directory TEST = os.path.basename(CWD) + class ThisTestTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -103,125 +106,148 @@ class ThisTestTopo(Topo): # between routers, switches and hosts. # # Create P/PE routers - #check for mpls - tgen.add_router('r1') + # check for mpls + tgen.add_router("r1") if tgen.hasmpls != True: - logger.info('MPLS not available, tests will be skipped') + logger.info("MPLS not available, tests will be skipped") return mach = platform.machine() krel = platform.release() - if mach[:1] == 'a' and topotest.version_cmp(krel, '4.11') < 0: - logger.info('Need Kernel version 4.11 to run on arm processor') + if mach[:1] == "a" and topotest.version_cmp(krel, "4.11") < 0: + logger.info("Need Kernel version 4.11 to run on arm processor") return for routern in range(2, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # Create CE routers for routern in range(1, 5): - tgen.add_router('ce{}'.format(routern)) + tgen.add_router("ce{}".format(routern)) - #CE/PE links - tgen.add_link(tgen.gears['ce1'], tgen.gears['r1'], 'ce1-eth0', 'r1-eth4') - tgen.add_link(tgen.gears['ce2'], tgen.gears['r3'], 'ce2-eth0', 'r3-eth4') - tgen.add_link(tgen.gears['ce3'], tgen.gears['r4'], 'ce3-eth0', 'r4-eth4') - tgen.add_link(tgen.gears['ce4'], tgen.gears['r4'], 'ce4-eth0', 'r4-eth5') + # CE/PE links + tgen.add_link(tgen.gears["ce1"], tgen.gears["r1"], "ce1-eth0", "r1-eth4") + tgen.add_link(tgen.gears["ce2"], tgen.gears["r3"], "ce2-eth0", "r3-eth4") + tgen.add_link(tgen.gears["ce3"], tgen.gears["r4"], "ce3-eth0", "r4-eth4") + tgen.add_link(tgen.gears["ce4"], tgen.gears["r4"], "ce4-eth0", "r4-eth5") # Create a switch with just one router connected to it to simulate a # empty network. switch = {} - switch[0] = tgen.add_switch('sw0') - switch[0].add_link(tgen.gears['r1'], nodeif='r1-eth0') - switch[0].add_link(tgen.gears['r2'], nodeif='r2-eth0') + switch[0] = tgen.add_switch("sw0") + switch[0].add_link(tgen.gears["r1"], nodeif="r1-eth0") + switch[0].add_link(tgen.gears["r2"], nodeif="r2-eth0") - switch[1] = tgen.add_switch('sw1') - switch[1].add_link(tgen.gears['r2'], nodeif='r2-eth1') - switch[1].add_link(tgen.gears['r3'], nodeif='r3-eth0') - switch[1].add_link(tgen.gears['r4'], nodeif='r4-eth0') + switch[1] = tgen.add_switch("sw1") + switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth1") + switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth0") + switch[1].add_link(tgen.gears["r4"], nodeif="r4-eth0") + + switch[1] = tgen.add_switch("sw2") + switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth2") + switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth1") - switch[1] = tgen.add_switch('sw2') - switch[1].add_link(tgen.gears['r2'], nodeif='r2-eth2') - switch[1].add_link(tgen.gears['r3'], nodeif='r3-eth1') l3mdev_accept = 0 + def ltemplatePreRouterStartHook(): global l3mdev_accept cc = ltemplateRtrCmd() krel = platform.release() tgen = get_topogen() - logger.info('pre router-start hook, kernel=' + krel) + logger.info("pre router-start hook, kernel=" + krel) - if topotest.version_cmp(krel, '4.15') >= 0 and \ - topotest.version_cmp(krel, '4.18') <= 0: + if ( + topotest.version_cmp(krel, "4.15") >= 0 + and topotest.version_cmp(krel, "4.18") <= 0 + ): l3mdev_accept = 1 - if topotest.version_cmp(krel, '5.0') >= 0: + if topotest.version_cmp(krel, "5.0") >= 0: l3mdev_accept = 1 - logger.info('setting net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept)) - #check for mpls + logger.info("setting net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept)) + # check for mpls if tgen.hasmpls != True: - logger.info('MPLS not available, skipping setup') + logger.info("MPLS not available, skipping setup") return False - #check for normal init + # check for normal init if len(tgen.net) == 1: - logger.info('Topology not configured, skipping setup') + logger.info("Topology not configured, skipping setup") return False - #trace errors/unexpected output + # trace errors/unexpected output cc.resetCounts() - #configure r2 mpls interfaces - intfs = ['lo', 'r2-eth0', 'r2-eth1', 'r2-eth2'] + # configure r2 mpls interfaces + intfs = ["lo", "r2-eth0", "r2-eth1", "r2-eth2"] for intf in intfs: - cc.doCmd(tgen, 'r2', 'echo 1 > /proc/sys/net/mpls/conf/{}/input'.format(intf)) - - #configure cust1 VRFs & MPLS - rtrs = ['r1', 'r3', 'r4'] - cmds = ['ip link add {0}-cust1 type vrf table 10', - 'ip ru add oif {0}-cust1 table 10', - 'ip ru add iif {0}-cust1 table 10', - 'ip link set dev {0}-cust1 up', - 'sysctl -w net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept)] + cc.doCmd(tgen, "r2", "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf)) + + # configure cust1 VRFs & MPLS + rtrs = ["r1", "r3", "r4"] + cmds = [ + "ip link add {0}-cust1 type vrf table 10", + "ip ru add oif {0}-cust1 table 10", + "ip ru add iif {0}-cust1 table 10", + "ip link set dev {0}-cust1 up", + "sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept), + ] for rtr in rtrs: router = tgen.gears[rtr] for cmd in cmds: cc.doCmd(tgen, rtr, cmd.format(rtr)) - cc.doCmd(tgen, rtr, 'ip link set dev {0}-eth4 master {0}-cust1'.format(rtr)) - intfs = [rtr+'-cust1', 'lo', rtr+'-eth0', rtr+'-eth4'] + cc.doCmd(tgen, rtr, "ip link set dev {0}-eth4 master {0}-cust1".format(rtr)) + intfs = [rtr + "-cust1", "lo", rtr + "-eth0", rtr + "-eth4"] for intf in intfs: - cc.doCmd(tgen, rtr, 'echo 1 > /proc/sys/net/mpls/conf/{}/input'.format(intf)) - logger.info('setup {0} vrf {0}-cust1, {0}-eth4. enabled mpls input.'.format(rtr)) - #configure cust2 VRFs & MPLS - rtrs = ['r4'] - cmds = ['ip link add {0}-cust2 type vrf table 20', - 'ip ru add oif {0}-cust2 table 20', - 'ip ru add iif {0}-cust2 table 20', - 'ip link set dev {0}-cust2 up'] + cc.doCmd( + tgen, rtr, "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf) + ) + logger.info( + "setup {0} vrf {0}-cust1, {0}-eth4. enabled mpls input.".format(rtr) + ) + # configure cust2 VRFs & MPLS + rtrs = ["r4"] + cmds = [ + "ip link add {0}-cust2 type vrf table 20", + "ip ru add oif {0}-cust2 table 20", + "ip ru add iif {0}-cust2 table 20", + "ip link set dev {0}-cust2 up", + ] for rtr in rtrs: for cmd in cmds: cc.doCmd(tgen, rtr, cmd.format(rtr)) - cc.doCmd(tgen, rtr, 'ip link set dev {0}-eth5 master {0}-cust2'.format(rtr)) - intfs = [rtr+'-cust2', rtr+'-eth5'] + cc.doCmd(tgen, rtr, "ip link set dev {0}-eth5 master {0}-cust2".format(rtr)) + intfs = [rtr + "-cust2", rtr + "-eth5"] for intf in intfs: - cc.doCmd(tgen, rtr, 'echo 1 > /proc/sys/net/mpls/conf/{}/input'.format(intf)) - logger.info('setup {0} vrf {0}-cust2, {0}-eth5. enabled mpls input.'.format(rtr)) - #put ce4-eth0 into a VRF (no default instance!) - rtrs = ['ce4'] - cmds = ['ip link add {0}-cust2 type vrf table 20', - 'ip ru add oif {0}-cust2 table 20', - 'ip ru add iif {0}-cust2 table 20', - 'ip link set dev {0}-cust2 up', - 'sysctl -w net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept)] + cc.doCmd( + tgen, rtr, "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf) + ) + logger.info( + "setup {0} vrf {0}-cust2, {0}-eth5. enabled mpls input.".format(rtr) + ) + # put ce4-eth0 into a VRF (no default instance!) + rtrs = ["ce4"] + cmds = [ + "ip link add {0}-cust2 type vrf table 20", + "ip ru add oif {0}-cust2 table 20", + "ip ru add iif {0}-cust2 table 20", + "ip link set dev {0}-cust2 up", + "sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept), + ] for rtr in rtrs: for cmd in cmds: cc.doCmd(tgen, rtr, cmd.format(rtr)) - cc.doCmd(tgen, rtr, 'ip link set dev {0}-eth0 master {0}-cust2'.format(rtr)) + cc.doCmd(tgen, rtr, "ip link set dev {0}-eth0 master {0}-cust2".format(rtr)) if cc.getOutput() != 4: InitSuccess = False - logger.info('Unexpected output seen ({} times, tests will be skipped'.format(cc.getOutput())) + logger.info( + "Unexpected output seen ({} times, tests will be skipped".format( + cc.getOutput() + ) + ) else: InitSuccess = True - logger.info('VRF config successful!') + logger.info("VRF config successful!") return InitSuccess + def ltemplatePostRouterStartHook(): - logger.info('post router-start hook') + logger.info("post router-start hook") return True diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/add_routes.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/add_routes.py index 19b73d2057..5c7427763d 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/add_routes.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/add_routes.py @@ -1,13 +1,59 @@ from lutil import luCommand -luCommand('r1','vtysh -c "add vrf r1-cust1 prefix 99.0.0.1/32"','.','none','IP Address') -luCommand('r3','vtysh -c "add vrf r3-cust1 prefix 99.0.0.2/32"','.','none','IP Address') -luCommand('r4','vtysh -c "add vrf r4-cust1 prefix 99.0.0.3/32"','.','none','IP Address') -luCommand('r1','vtysh -c "show vnc registrations local"','99.0.0.1','pass','Local Registration') -luCommand('r3','vtysh -c "show vnc registrations local"','99.0.0.2','pass','Local Registration') -luCommand('r4','vtysh -c "show vnc registrations local"','99.0.0.3','pass','Local Registration') -luCommand('r1','vtysh -c "show vnc registrations remote"','4 out of 4','wait','Remote Registration', 10) -luCommand('r3','vtysh -c "show vnc registrations remote"','6 out of 6','wait','Remote Registration', 10) -luCommand('r4','vtysh -c "show vnc registrations remote"','4 out of 4','wait','Remote Registration', 10) -luCommand('r1','vtysh -c "show vnc registrations"','.','none') -luCommand('r3','vtysh -c "show vnc registrations"','.','none') -luCommand('r4','vtysh -c "show vnc registrations"','.','none') + +luCommand( + "r1", 'vtysh -c "add vrf r1-cust1 prefix 99.0.0.1/32"', ".", "none", "IP Address" +) +luCommand( + "r3", 'vtysh -c "add vrf r3-cust1 prefix 99.0.0.2/32"', ".", "none", "IP Address" +) +luCommand( + "r4", 'vtysh -c "add vrf r4-cust1 prefix 99.0.0.3/32"', ".", "none", "IP Address" +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations local"', + "99.0.0.1", + "pass", + "Local Registration", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations local"', + "99.0.0.2", + "pass", + "Local Registration", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations local"', + "99.0.0.3", + "pass", + "Local Registration", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations remote"', + "4 out of 4", + "wait", + "Remote Registration", + 10, +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations remote"', + "6 out of 6", + "wait", + "Remote Registration", + 10, +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations remote"', + "4 out of 4", + "wait", + "Remote Registration", + 10, +) +luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none") diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py index c2b0cf9e7a..53cf353fa0 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py @@ -1,18 +1,64 @@ from lutil import luCommand -luCommand('ce1','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180) -luCommand('ce2','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180) -luCommand('ce3','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180) -luCommand('ce4','vtysh -c "show bgp vrf all summary"',' 00:0','wait','Adjacencies up',180) -luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180) -luCommand('r1','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180) -luCommand('r3','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180) -luCommand('r4','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180) -luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up') -luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up') -luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0.* 00:0','pass','All adjacencies up') -luCommand('r1','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping') -luCommand('r1','ping 4.4.4.4 -c 1',' 0. packet loss','wait','PE->PE4 (loopback) ping') -luCommand('r4','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping') + +luCommand("ce1", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180) +luCommand("ce2", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180) +luCommand("ce3", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180) +luCommand( + "ce4", 'vtysh -c "show bgp vrf all summary"', " 00:0", "wait", "Adjacencies up", 180 +) +luCommand( + "r1", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r3", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r4", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r2", + 'vtysh -c "show bgp summary"', + " 00:0.* 00:0.* 00:0", + "wait", + "Core adjacencies up", + 180, +) +luCommand( + "r1", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180 +) +luCommand( + "r3", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180 +) +luCommand( + "r4", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180 +) +luCommand( + "r1", + 'vtysh -c "show bgp vrf all summary"', + " 00:0.* 00:0", + "pass", + "All adjacencies up", +) +luCommand( + "r3", + 'vtysh -c "show bgp vrf all summary"', + " 00:0.* 00:0", + "pass", + "All adjacencies up", +) +luCommand( + "r4", + 'vtysh -c "show bgp vrf all summary"', + " 00:0.* 00:0.* 00:0", + "pass", + "All adjacencies up", +) +luCommand( + "r1", "ping 3.3.3.3 -c 1", " 0. packet loss", "wait", "PE->PE3 (loopback) ping" +) +luCommand( + "r1", "ping 4.4.4.4 -c 1", " 0. packet loss", "wait", "PE->PE4 (loopback) ping" +) +luCommand( + "r4", "ping 3.3.3.3 -c 1", " 0. packet loss", "wait", "PE->PE3 (loopback) ping" +) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py index 9827a9e2c1..20113b1058 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py @@ -1,47 +1,83 @@ from lutil import luCommand, luLast from lib import topotest -ret = luCommand('r2', 'ip -M route show', - '\d*(?= via inet 10.0.2.4 dev r2-eth1)','wait','See mpls route to r4') +ret = luCommand( + "r2", + "ip -M route show", + "\d*(?= via inet 10.0.2.4 dev r2-eth1)", + "wait", + "See mpls route to r4", +) found = luLast() if ret != False and found != None: label4r4 = found.group(0) - luCommand('r2', 'ip -M route show', - '.', 'pass', - 'See %s as label to r4' % label4r4) - ret = luCommand('r2', 'ip -M route show', - '\d*(?= via inet 10.0.1.1 dev r2-eth0)', 'wait', - 'See mpls route to r1') + luCommand("r2", "ip -M route show", ".", "pass", "See %s as label to r4" % label4r4) + ret = luCommand( + "r2", + "ip -M route show", + "\d*(?= via inet 10.0.1.1 dev r2-eth0)", + "wait", + "See mpls route to r1", + ) found = luLast() if ret != False and found != None: label4r1 = found.group(0) - luCommand('r2', 'ip -M route show', - '.', 'pass', 'See %s as label to r1' % label4r1) - - luCommand('r1', 'ip route show vrf r1-cust1', - '99.0.0.4', 'pass', 'VRF->MPLS PHP route installed') - luCommand('r4', 'ip route show vrf r4-cust2', - '99.0.0.1','pass', 'VRF->MPLS PHP route installed') - - luCommand('r1', 'ip -M route show', '101', 'pass', 'MPLS->VRF route installed') - luCommand('r4', 'ip -M route show', '1041', 'pass', 'MPLS->VRF1 route installed') - luCommand('r4', 'ip -M route show', '1042', 'pass', 'MPLS->VRF2 route installed') - - luCommand('ce1', 'ping 99.0.0.4 -I 99.0.0.1 -c 1', - ' 0. packet loss','wait','CE->CE (loopback) ping - l3vpn+zebra case') - #skip due to VRF weirdness - #luCommand('ce4', 'ping 99.0.0.1 -I 99.0.0.4 -c 1', + luCommand("r2", "ip -M route show", ".", "pass", "See %s as label to r1" % label4r1) + + luCommand( + "r1", + "ip route show vrf r1-cust1", + "99.0.0.4", + "pass", + "VRF->MPLS PHP route installed", + ) + luCommand( + "r4", + "ip route show vrf r4-cust2", + "99.0.0.1", + "pass", + "VRF->MPLS PHP route installed", + ) + + luCommand("r1", "ip -M route show", "101", "pass", "MPLS->VRF route installed") + luCommand("r4", "ip -M route show", "1041", "pass", "MPLS->VRF1 route installed") + luCommand("r4", "ip -M route show", "1042", "pass", "MPLS->VRF2 route installed") + + luCommand( + "ce1", + "ping 99.0.0.4 -I 99.0.0.1 -c 1", + " 0. packet loss", + "wait", + "CE->CE (loopback) ping - l3vpn+zebra case", + ) + # skip due to VRF weirdness + # luCommand('ce4', 'ping 99.0.0.1 -I 99.0.0.4 -c 1', # ' 0. packet loss','wait','CE->CE (loopback) ping - l3vpn+zebra case') - luCommand('ce1', 'ping 99.0.0.4 -I 99.0.0.1 -c 1', - ' 0. packet loss','wait','CE->CE (loopback) ping') - #luCommand('ce4', 'ping 99.0.0.1 -I 99.0.0.4 -c 1', + luCommand( + "ce1", + "ping 99.0.0.4 -I 99.0.0.1 -c 1", + " 0. packet loss", + "wait", + "CE->CE (loopback) ping", + ) + # luCommand('ce4', 'ping 99.0.0.1 -I 99.0.0.4 -c 1', # ' 0. packet loss','wait','CE->CE (loopback) ping') - luCommand('r3', 'ip -M route show', '103', 'pass', 'MPLS->VRF route installed') - luCommand('ce2', 'ping 99.0.0.3 -I 99.0.0.2 -c 1', - ' 0. packet loss','wait','CE2->CE3 (loopback) ping') - luCommand('ce3', 'ping 99.0.0.4 -I 99.0.0.3 -c 1', - ' 0. packet loss','wait','CE3->CE4 (loopback) ping') + luCommand("r3", "ip -M route show", "103", "pass", "MPLS->VRF route installed") + luCommand( + "ce2", + "ping 99.0.0.3 -I 99.0.0.2 -c 1", + " 0. packet loss", + "wait", + "CE2->CE3 (loopback) ping", + ) + luCommand( + "ce3", + "ping 99.0.0.4 -I 99.0.0.3 -c 1", + " 0. packet loss", + "wait", + "CE3->CE4 (loopback) ping", + ) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py index 547a5949a3..b552ea0406 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py @@ -1,27 +1,93 @@ from lutil import luCommand from customize import l3mdev_accept -l3mdev_rtrs = ['r1', 'r3', 'r4', 'ce4'] +l3mdev_rtrs = ["r1", "r3", "r4", "ce4"] for rtr in l3mdev_rtrs: - luCommand(rtr,'sysctl net.ipv4.tcp_l3mdev_accept',' = \d*','none','') + luCommand(rtr, "sysctl net.ipv4.tcp_l3mdev_accept", " = \d*", "none", "") found = luLast() - luCommand(rtr,'ss -naep',':179','pass','IPv4:bgp, l3mdev{}'.format(found.group(0))) - luCommand(rtr,'ss -naep',':.*:179','pass','IPv6:bgp') - luCommand(rtr,'sysctl net.ipv4.tcp_l3mdev_accept',' = {}'.format(l3mdev_accept),'pass','l3mdev matches expected (real/expected{}/{})'.format(found.group(0),l3mdev_accept)) + luCommand( + rtr, "ss -naep", ":179", "pass", "IPv4:bgp, l3mdev{}".format(found.group(0)) + ) + luCommand(rtr, "ss -naep", ":.*:179", "pass", "IPv6:bgp") + luCommand( + rtr, + "sysctl net.ipv4.tcp_l3mdev_accept", + " = {}".format(l3mdev_accept), + "pass", + "l3mdev matches expected (real/expected{}/{})".format( + found.group(0), l3mdev_accept + ), + ) -rtrs = ['r1', 'r3', 'r4'] +rtrs = ["r1", "r3", "r4"] for rtr in rtrs: - luCommand(rtr, 'ip link show type vrf {}-cust1'.format(rtr),'cust1: .*UP','pass','VRF cust1 intf up') - luCommand(rtr, 'ip add show vrf {}-cust1'.format(rtr),'r..eth4.*UP','pass','VRF cust1 IP intf up') - luCommand(rtr, 'ip add show vrf {}-cust1'.format(rtr),'192.168','pass','VRF cust1 IP config') - luCommand(rtr, 'ip route show vrf {}-cust1'.format(rtr),'192.168...0/24 dev r.-eth','pass','VRF cust1 interface route') -luCommand('r4', 'ip link show type vrf r4-cust2','cust2: .*UP','pass','VRF cust2 up') -luCommand('r4', 'ip add show vrf r4-cust2','r..eth5.*UP.* 192.168','pass','VRF cust1 IP config') -luCommand(rtr, 'ip route show vrf r4-cust2'.format(rtr),'192.168...0/24 dev r.-eth','pass','VRF cust2 interface route') -rtrs = ['ce1', 'ce2', 'ce3'] + luCommand( + rtr, + "ip link show type vrf {}-cust1".format(rtr), + "cust1: .*UP", + "pass", + "VRF cust1 intf up", + ) + luCommand( + rtr, + "ip add show vrf {}-cust1".format(rtr), + "r..eth4.*UP", + "pass", + "VRF cust1 IP intf up", + ) + luCommand( + rtr, + "ip add show vrf {}-cust1".format(rtr), + "192.168", + "pass", + "VRF cust1 IP config", + ) + luCommand( + rtr, + "ip route show vrf {}-cust1".format(rtr), + "192.168...0/24 dev r.-eth", + "pass", + "VRF cust1 interface route", + ) +luCommand("r4", "ip link show type vrf r4-cust2", "cust2: .*UP", "pass", "VRF cust2 up") +luCommand( + "r4", + "ip add show vrf r4-cust2", + "r..eth5.*UP.* 192.168", + "pass", + "VRF cust1 IP config", +) +luCommand( + rtr, + "ip route show vrf r4-cust2".format(rtr), + "192.168...0/24 dev r.-eth", + "pass", + "VRF cust2 interface route", +) +rtrs = ["ce1", "ce2", "ce3"] for rtr in rtrs: - luCommand(rtr, 'ip route show','192.168...0/24 dev ce.-eth0','pass','CE interface route') - luCommand(rtr,'ping 192.168.1.1 -c 1',' 0. packet loss','wait','CE->PE ping') -luCommand('ce4', 'ip link show type vrf ce4-cust2','cust2: .*UP','pass','VRF cust2 up') -luCommand('ce4', 'ip route show vrf ce4-cust2','192.168...0/24 dev ce.-eth0','pass','CE interface route') -luCommand('ce4','ping 192.168.2.1 -c 1 -I ce4-cust2',' 0. packet loss','wait','CE4->PE4 ping') + luCommand( + rtr, + "ip route show", + "192.168...0/24 dev ce.-eth0", + "pass", + "CE interface route", + ) + luCommand(rtr, "ping 192.168.1.1 -c 1", " 0. packet loss", "wait", "CE->PE ping") +luCommand( + "ce4", "ip link show type vrf ce4-cust2", "cust2: .*UP", "pass", "VRF cust2 up" +) +luCommand( + "ce4", + "ip route show vrf ce4-cust2", + "192.168...0/24 dev ce.-eth0", + "pass", + "CE interface route", +) +luCommand( + "ce4", + "ping 192.168.2.1 -c 1 -I ce4-cust2", + " 0. packet loss", + "wait", + "CE4->PE4 ping", +) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py index e47ea5f2cd..f5a29b95c9 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py @@ -1,5 +1,5 @@ from lutil import luCommand -from bgprib import bgpribRequireVpnRoutes,bgpribRequireUnicastRoutes +from bgprib import bgpribRequireVpnRoutes, bgpribRequireUnicastRoutes ######################################################################## # CE routers: contain routes they originate @@ -12,32 +12,32 @@ from bgprib import bgpribRequireVpnRoutes,bgpribRequireUnicastRoutes # ce4 vtysh -c "show bgp ipv4 uni" want = [ - {'p':'5.1.0.0/24', 'n':'99.0.0.1'}, - {'p':'5.1.1.0/24', 'n':'99.0.0.1'}, - {'p':'99.0.0.1/32', 'n':'0.0.0.0'}, + {"p": "5.1.0.0/24", "n": "99.0.0.1"}, + {"p": "5.1.1.0/24", "n": "99.0.0.1"}, + {"p": "99.0.0.1/32", "n": "0.0.0.0"}, ] -bgpribRequireUnicastRoutes('ce1','ipv4','','Cust 1 routes in ce1',want) +bgpribRequireUnicastRoutes("ce1", "ipv4", "", "Cust 1 routes in ce1", want) want = [ - {'p':'5.1.0.0/24', 'n':'99.0.0.2'}, - {'p':'5.1.1.0/24', 'n':'99.0.0.2'}, - {'p':'99.0.0.2/32', 'n':'0.0.0.0'}, + {"p": "5.1.0.0/24", "n": "99.0.0.2"}, + {"p": "5.1.1.0/24", "n": "99.0.0.2"}, + {"p": "99.0.0.2/32", "n": "0.0.0.0"}, ] -bgpribRequireUnicastRoutes('ce2','ipv4','','Cust 2 routes in ce1',want) +bgpribRequireUnicastRoutes("ce2", "ipv4", "", "Cust 2 routes in ce1", want) want = [ - {'p':'5.1.2.0/24', 'n':'99.0.0.3'}, - {'p':'5.1.3.0/24', 'n':'99.0.0.3'}, - {'p':'99.0.0.3/32', 'n':'0.0.0.0'}, + {"p": "5.1.2.0/24", "n": "99.0.0.3"}, + {"p": "5.1.3.0/24", "n": "99.0.0.3"}, + {"p": "99.0.0.3/32", "n": "0.0.0.0"}, ] -bgpribRequireUnicastRoutes('ce3','ipv4','','Cust 3 routes in ce1',want) +bgpribRequireUnicastRoutes("ce3", "ipv4", "", "Cust 3 routes in ce1", want) want = [ - {'p':'5.4.2.0/24', 'n':'99.0.0.4'}, - {'p':'5.4.3.0/24', 'n':'99.0.0.4'}, - {'p':'99.0.0.4/32', 'n':'0.0.0.0'}, + {"p": "5.4.2.0/24", "n": "99.0.0.4"}, + {"p": "5.4.3.0/24", "n": "99.0.0.4"}, + {"p": "99.0.0.4/32", "n": "0.0.0.0"}, ] -bgpribRequireUnicastRoutes('ce4','ipv4','ce4-cust2','Cust 4 routes in ce1',want) +bgpribRequireUnicastRoutes("ce4", "ipv4", "ce4-cust2", "Cust 4 routes in ce1", want) ######################################################################## @@ -47,116 +47,169 @@ bgpribRequireUnicastRoutes('ce4','ipv4','ce4-cust2','Cust 4 routes in ce1',want) # r1 vtysh -c "show bgp vrf r1-cust1 ipv4" # want_r1_cust1_routes = [ - {'p':'5.1.0.0/24', 'n':'99.0.0.1'}, - {'p':'5.1.1.0/24', 'n':'99.0.0.1'}, - {'p':'99.0.0.1/32', 'n':'192.168.1.2'}, + {"p": "5.1.0.0/24", "n": "99.0.0.1"}, + {"p": "5.1.1.0/24", "n": "99.0.0.1"}, + {"p": "99.0.0.1/32", "n": "192.168.1.2"}, ] -bgpribRequireUnicastRoutes('r1','ipv4','r1-cust1','Customer 1 routes in r1 vrf',want_r1_cust1_routes) +bgpribRequireUnicastRoutes( + "r1", "ipv4", "r1-cust1", "Customer 1 routes in r1 vrf", want_r1_cust1_routes +) want_r3_cust1_routes = [ - {'p':'5.1.0.0/24', 'n':'99.0.0.2'}, - {'p':'5.1.1.0/24', 'n':'99.0.0.2'}, - {'p':'99.0.0.2/32', 'n':'192.168.1.2'}, + {"p": "5.1.0.0/24", "n": "99.0.0.2"}, + {"p": "5.1.1.0/24", "n": "99.0.0.2"}, + {"p": "99.0.0.2/32", "n": "192.168.1.2"}, ] -bgpribRequireUnicastRoutes('r3','ipv4','r3-cust1','Customer 1 routes in r3 vrf',want_r3_cust1_routes) +bgpribRequireUnicastRoutes( + "r3", "ipv4", "r3-cust1", "Customer 1 routes in r3 vrf", want_r3_cust1_routes +) want_r4_cust1_routes = [ - {'p':'5.1.2.0/24', 'n':'99.0.0.3'}, - {'p':'5.1.3.0/24', 'n':'99.0.0.3'}, - {'p':'99.0.0.3/32', 'n':'192.168.1.2'}, + {"p": "5.1.2.0/24", "n": "99.0.0.3"}, + {"p": "5.1.3.0/24", "n": "99.0.0.3"}, + {"p": "99.0.0.3/32", "n": "192.168.1.2"}, ] -bgpribRequireUnicastRoutes('r4','ipv4','r4-cust1','Customer 1 routes in r4 vrf',want_r4_cust1_routes) +bgpribRequireUnicastRoutes( + "r4", "ipv4", "r4-cust1", "Customer 1 routes in r4 vrf", want_r4_cust1_routes +) want_r4_cust2_routes = [ - {'p':'5.4.2.0/24', 'n':'99.0.0.4'}, - {'p':'5.4.3.0/24', 'n':'99.0.0.4'}, - {'p':'99.0.0.4/32', 'n':'192.168.2.2'}, + {"p": "5.4.2.0/24", "n": "99.0.0.4"}, + {"p": "5.4.3.0/24", "n": "99.0.0.4"}, + {"p": "99.0.0.4/32", "n": "192.168.2.2"}, ] -bgpribRequireUnicastRoutes('r4','ipv4','r4-cust2','Customer 2 routes in r4 vrf',want_r4_cust2_routes) +bgpribRequireUnicastRoutes( + "r4", "ipv4", "r4-cust2", "Customer 2 routes in r4 vrf", want_r4_cust2_routes +) ######################################################################## # PE routers: core unicast routes are empty ######################################################################## -luCommand('r1','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Core Unicast SAFI clean') -luCommand('r2','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Core Unicast SAFI clean') -luCommand('r3','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Core Unicast SAFI clean') -luCommand('r4','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Core Unicast SAFI clean') +luCommand( + "r1", + 'vtysh -c "show bgp ipv4 uni"', + "No BGP prefixes displayed", + "pass", + "Core Unicast SAFI clean", +) +luCommand( + "r2", + 'vtysh -c "show bgp ipv4 uni"', + "No BGP prefixes displayed", + "pass", + "Core Unicast SAFI clean", +) +luCommand( + "r3", + 'vtysh -c "show bgp ipv4 uni"', + "No BGP prefixes displayed", + "pass", + "Core Unicast SAFI clean", +) +luCommand( + "r4", + 'vtysh -c "show bgp ipv4 uni"', + "No BGP prefixes displayed", + "pass", + "Core Unicast SAFI clean", +) ######################################################################## # PE routers: local ce-originated routes are leaked to vpn ######################################################################## # nhzero is for the new code that sets nh of locally-leaked routes to 0 -#nhzero = 1 +# nhzero = 1 nhzero = 0 if nhzero: - luCommand('r1','vtysh -c "show bgp ipv4 vpn"', - 'Distinguisher: *10:1.*5.1.0.0/24 *0.0.0.0 .*5.1.1.0/24 *0.0.0.0 .*99.0.0.1/32 *0.0.0.0 ', - 'pass','vrf->vpn routes') - luCommand('r3','vtysh -c "show bgp ipv4 vpn"', - 'Distinguisher: *10:3.*5.1.0.0/24 *0.0.0.0 .*5.1.1.0/24 *0.0.0.0 .*99.0.0.2/32 *0.0.0.0 ', - 'pass','vrf->vpn routes') + luCommand( + "r1", + 'vtysh -c "show bgp ipv4 vpn"', + "Distinguisher: *10:1.*5.1.0.0/24 *0.0.0.0 .*5.1.1.0/24 *0.0.0.0 .*99.0.0.1/32 *0.0.0.0 ", + "pass", + "vrf->vpn routes", + ) + luCommand( + "r3", + 'vtysh -c "show bgp ipv4 vpn"', + "Distinguisher: *10:3.*5.1.0.0/24 *0.0.0.0 .*5.1.1.0/24 *0.0.0.0 .*99.0.0.2/32 *0.0.0.0 ", + "pass", + "vrf->vpn routes", + ) want = [ - {'rd':'10:41', 'p':'5.1.2.0/24', 'n':'0.0.0.0'}, - {'rd':'10:41', 'p':'5.1.3.0/24', 'n':'0.0.0.0'}, - {'rd':'10:41', 'p':'99.0.0.3/32', 'n':'0.0.0.0'}, - - {'rd':'10:42', 'p':'5.4.2.0/24', 'n':'0.0.0.0'}, - {'rd':'10:42', 'p':'5.4.3.0/24', 'n':'0.0.0.0'}, - {'rd':'10:42', 'p':'99.0.0.4/32', 'n':'0.0.0.0'}, + {"rd": "10:41", "p": "5.1.2.0/24", "n": "0.0.0.0"}, + {"rd": "10:41", "p": "5.1.3.0/24", "n": "0.0.0.0"}, + {"rd": "10:41", "p": "99.0.0.3/32", "n": "0.0.0.0"}, + {"rd": "10:42", "p": "5.4.2.0/24", "n": "0.0.0.0"}, + {"rd": "10:42", "p": "5.4.3.0/24", "n": "0.0.0.0"}, + {"rd": "10:42", "p": "99.0.0.4/32", "n": "0.0.0.0"}, ] - bgpribRequireVpnRoutes('r4','vrf->vpn routes',want) + bgpribRequireVpnRoutes("r4", "vrf->vpn routes", want) else: - luCommand('r1','vtysh -c "show bgp ipv4 vpn"', - r'Distinguisher: *10:1.*5.1.0.0/24 *99.0.0.1\b.*5.1.1.0/24 *99.0.0.1\b.*99.0.0.1/32 *192.168.1.2\b', - 'pass','vrf->vpn routes') - luCommand('r3','vtysh -c "show bgp ipv4 vpn"', - r'Distinguisher: *10:3.*5.1.0.0/24 *99.0.0.2\b.*5.1.1.0/24 *99.0.0.2\b.*99.0.0.2/32 *192.168.1.2\b', - 'pass','vrf->vpn routes') + luCommand( + "r1", + 'vtysh -c "show bgp ipv4 vpn"', + r"Distinguisher: *10:1.*5.1.0.0/24 *99.0.0.1\b.*5.1.1.0/24 *99.0.0.1\b.*99.0.0.1/32 *192.168.1.2\b", + "pass", + "vrf->vpn routes", + ) + luCommand( + "r3", + 'vtysh -c "show bgp ipv4 vpn"', + r"Distinguisher: *10:3.*5.1.0.0/24 *99.0.0.2\b.*5.1.1.0/24 *99.0.0.2\b.*99.0.0.2/32 *192.168.1.2\b", + "pass", + "vrf->vpn routes", + ) want = [ - {'rd':'10:41', 'p':'5.1.2.0/24', 'n':'99.0.0.3'}, - {'rd':'10:41', 'p':'5.1.3.0/24', 'n':'99.0.0.3'}, - {'rd':'10:41', 'p':'99.0.0.3/32', 'n':'192.168.1.2'}, - - {'rd':'10:42', 'p':'5.4.2.0/24', 'n':'99.0.0.4'}, - {'rd':'10:42', 'p':'5.4.3.0/24', 'n':'99.0.0.4'}, - {'rd':'10:42', 'p':'99.0.0.4/32', 'n':'192.168.2.2'}, + {"rd": "10:41", "p": "5.1.2.0/24", "n": "99.0.0.3"}, + {"rd": "10:41", "p": "5.1.3.0/24", "n": "99.0.0.3"}, + {"rd": "10:41", "p": "99.0.0.3/32", "n": "192.168.1.2"}, + {"rd": "10:42", "p": "5.4.2.0/24", "n": "99.0.0.4"}, + {"rd": "10:42", "p": "5.4.3.0/24", "n": "99.0.0.4"}, + {"rd": "10:42", "p": "99.0.0.4/32", "n": "192.168.2.2"}, ] - bgpribRequireVpnRoutes('r4','vrf->vpn routes',want) + bgpribRequireVpnRoutes("r4", "vrf->vpn routes", want) ######################################################################## # PE routers: exporting vrfs set MPLS vrf labels in kernel ######################################################################## -luCommand('r1','vtysh -c "show mpls table"',' 101 *BGP *r1-cust1','pass','vrf labels') -luCommand('r3','vtysh -c "show mpls table"',' 103 *BGP *r3-cust1','pass','vrf labels') -luCommand('r4','vtysh -c "show mpls table"',' 1041 *BGP *r4-cust1 .*1042 *BGP *r4-cust2','pass','vrf labels') +luCommand( + "r1", 'vtysh -c "show mpls table"', " 101 *BGP *r1-cust1", "pass", "vrf labels" +) +luCommand( + "r3", 'vtysh -c "show mpls table"', " 103 *BGP *r3-cust1", "pass", "vrf labels" +) +luCommand( + "r4", + 'vtysh -c "show mpls table"', + " 1041 *BGP *r4-cust1 .*1042 *BGP *r4-cust2", + "pass", + "vrf labels", +) ######################################################################## # Core VPN router: all customer routes ######################################################################## want_rd_routes = [ - {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'}, - {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'}, - {'rd':'10:1', 'p':'99.0.0.1/32', 'n':'1.1.1.1'}, - - {'rd':'10:3', 'p':'5.1.0.0/24', 'n':'3.3.3.3'}, - {'rd':'10:3', 'p':'5.1.0.0/24', 'n':'3.3.3.3'}, - {'rd':'10:3', 'p':'99.0.0.2/32', 'n':'3.3.3.3'}, - - {'rd':'10:41', 'p':'5.1.2.0/24', 'n':'4.4.4.4'}, - {'rd':'10:41', 'p':'5.1.3.0/24', 'n':'4.4.4.4'}, - {'rd':'10:41', 'p':'99.0.0.3/32', 'n':'4.4.4.4'}, - - {'rd':'10:42', 'p':'5.4.2.0/24', 'n':'4.4.4.4'}, - {'rd':'10:42', 'p':'5.4.3.0/24', 'n':'4.4.4.4'}, - {'rd':'10:42', 'p':'99.0.0.4/32', 'n':'4.4.4.4'}, + {"rd": "10:1", "p": "5.1.0.0/24", "n": "1.1.1.1"}, + {"rd": "10:1", "p": "5.1.0.0/24", "n": "1.1.1.1"}, + {"rd": "10:1", "p": "99.0.0.1/32", "n": "1.1.1.1"}, + {"rd": "10:3", "p": "5.1.0.0/24", "n": "3.3.3.3"}, + {"rd": "10:3", "p": "5.1.0.0/24", "n": "3.3.3.3"}, + {"rd": "10:3", "p": "99.0.0.2/32", "n": "3.3.3.3"}, + {"rd": "10:41", "p": "5.1.2.0/24", "n": "4.4.4.4"}, + {"rd": "10:41", "p": "5.1.3.0/24", "n": "4.4.4.4"}, + {"rd": "10:41", "p": "99.0.0.3/32", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "5.4.2.0/24", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "5.4.3.0/24", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "99.0.0.4/32", "n": "4.4.4.4"}, ] -bgpribRequireVpnRoutes('r2','Customer routes in provider vpn core',want_rd_routes) +bgpribRequireVpnRoutes("r2", "Customer routes in provider vpn core", want_rd_routes) ######################################################################## # PE routers: VPN routes from remote customers @@ -165,46 +218,46 @@ bgpribRequireVpnRoutes('r2','Customer routes in provider vpn core',want_rd_route # r1 vtysh -c "show bgp ipv4 vpn" # want_r1_remote_vpn_routes = [ - {'rd':'10:3', 'p':'5.1.0.0/24', 'n':'3.3.3.3'}, - {'rd':'10:3', 'p':'5.1.1.0/24', 'n':'3.3.3.3'}, - {'rd':'10:3', 'p':'99.0.0.2/32', 'n':'3.3.3.3'}, - - {'rd':'10:41', 'p':'5.1.2.0/24', 'n':'4.4.4.4'}, - {'rd':'10:41', 'p':'5.1.3.0/24', 'n':'4.4.4.4'}, - {'rd':'10:41', 'p':'99.0.0.3/32', 'n':'4.4.4.4'}, - - {'rd':'10:42', 'p':'5.4.2.0/24', 'n':'4.4.4.4'}, - {'rd':'10:42', 'p':'5.4.3.0/24', 'n':'4.4.4.4'}, - {'rd':'10:42', 'p':'99.0.0.4/32', 'n':'4.4.4.4'}, + {"rd": "10:3", "p": "5.1.0.0/24", "n": "3.3.3.3"}, + {"rd": "10:3", "p": "5.1.1.0/24", "n": "3.3.3.3"}, + {"rd": "10:3", "p": "99.0.0.2/32", "n": "3.3.3.3"}, + {"rd": "10:41", "p": "5.1.2.0/24", "n": "4.4.4.4"}, + {"rd": "10:41", "p": "5.1.3.0/24", "n": "4.4.4.4"}, + {"rd": "10:41", "p": "99.0.0.3/32", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "5.4.2.0/24", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "5.4.3.0/24", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "99.0.0.4/32", "n": "4.4.4.4"}, ] -bgpribRequireVpnRoutes('r1','Remote Customer routes in R1 vpn',want_r1_remote_vpn_routes) +bgpribRequireVpnRoutes( + "r1", "Remote Customer routes in R1 vpn", want_r1_remote_vpn_routes +) want_r3_remote_vpn_routes = [ - {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'}, - {'rd':'10:1', 'p':'5.1.1.0/24', 'n':'1.1.1.1'}, - {'rd':'10:1', 'p':'99.0.0.1/32', 'n':'1.1.1.1'}, - - {'rd':'10:41', 'p':'5.1.2.0/24', 'n':'4.4.4.4'}, - {'rd':'10:41', 'p':'5.1.3.0/24', 'n':'4.4.4.4'}, - {'rd':'10:41', 'p':'99.0.0.3/32', 'n':'4.4.4.4'}, - - {'rd':'10:42', 'p':'5.4.2.0/24', 'n':'4.4.4.4'}, - {'rd':'10:42', 'p':'5.4.3.0/24', 'n':'4.4.4.4'}, - {'rd':'10:42', 'p':'99.0.0.4/32', 'n':'4.4.4.4'}, + {"rd": "10:1", "p": "5.1.0.0/24", "n": "1.1.1.1"}, + {"rd": "10:1", "p": "5.1.1.0/24", "n": "1.1.1.1"}, + {"rd": "10:1", "p": "99.0.0.1/32", "n": "1.1.1.1"}, + {"rd": "10:41", "p": "5.1.2.0/24", "n": "4.4.4.4"}, + {"rd": "10:41", "p": "5.1.3.0/24", "n": "4.4.4.4"}, + {"rd": "10:41", "p": "99.0.0.3/32", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "5.4.2.0/24", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "5.4.3.0/24", "n": "4.4.4.4"}, + {"rd": "10:42", "p": "99.0.0.4/32", "n": "4.4.4.4"}, ] -bgpribRequireVpnRoutes('r3','Remote Customer routes in R3 vpn',want_r3_remote_vpn_routes) +bgpribRequireVpnRoutes( + "r3", "Remote Customer routes in R3 vpn", want_r3_remote_vpn_routes +) want_r4_remote_vpn_routes = [ - {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'}, - {'rd':'10:1', 'p':'5.1.1.0/24', 'n':'1.1.1.1'}, - {'rd':'10:1', 'p':'99.0.0.1/32', 'n':'1.1.1.1'}, - - {'rd':'10:3', 'p':'5.1.0.0/24', 'n':'3.3.3.3'}, - {'rd':'10:3', 'p':'5.1.1.0/24', 'n':'3.3.3.3'}, - {'rd':'10:3', 'p':'99.0.0.2/32', 'n':'3.3.3.3'}, + {"rd": "10:1", "p": "5.1.0.0/24", "n": "1.1.1.1"}, + {"rd": "10:1", "p": "5.1.1.0/24", "n": "1.1.1.1"}, + {"rd": "10:1", "p": "99.0.0.1/32", "n": "1.1.1.1"}, + {"rd": "10:3", "p": "5.1.0.0/24", "n": "3.3.3.3"}, + {"rd": "10:3", "p": "5.1.1.0/24", "n": "3.3.3.3"}, + {"rd": "10:3", "p": "99.0.0.2/32", "n": "3.3.3.3"}, ] -bgpribRequireVpnRoutes('r4','Remote Customer routes in R4 vpn',want_r4_remote_vpn_routes) - +bgpribRequireVpnRoutes( + "r4", "Remote Customer routes in R4 vpn", want_r4_remote_vpn_routes +) # r1 vtysh -c "show bgp vrf r1-cust1 ipv4" @@ -213,54 +266,58 @@ bgpribRequireVpnRoutes('r4','Remote Customer routes in R4 vpn',want_r4_remote_vp # PE routers: VRFs contain routes from remote customer nets ######################################################################## want_r1_remote_cust1_routes = [ - {'p':'5.1.0.0/24', 'n':'3.3.3.3'}, - {'p':'5.1.1.0/24', 'n':'3.3.3.3'}, - {'p':'99.0.0.2/32', 'n':'3.3.3.3'}, - - {'p':'5.1.2.0/24', 'n':'4.4.4.4'}, - {'p':'5.1.3.0/24', 'n':'4.4.4.4'}, - {'p':'99.0.0.3/32', 'n':'4.4.4.4'}, - - {'p':'5.4.2.0/24', 'n':'4.4.4.4'}, - {'p':'5.4.3.0/24', 'n':'4.4.4.4'}, - {'p':'99.0.0.3/32', 'n':'4.4.4.4'}, + {"p": "5.1.0.0/24", "n": "3.3.3.3"}, + {"p": "5.1.1.0/24", "n": "3.3.3.3"}, + {"p": "99.0.0.2/32", "n": "3.3.3.3"}, + {"p": "5.1.2.0/24", "n": "4.4.4.4"}, + {"p": "5.1.3.0/24", "n": "4.4.4.4"}, + {"p": "99.0.0.3/32", "n": "4.4.4.4"}, + {"p": "5.4.2.0/24", "n": "4.4.4.4"}, + {"p": "5.4.3.0/24", "n": "4.4.4.4"}, + {"p": "99.0.0.3/32", "n": "4.4.4.4"}, ] -bgpribRequireUnicastRoutes('r1','ipv4','r1-cust1','Customer 1 routes in r1 vrf',want_r1_remote_cust1_routes) +bgpribRequireUnicastRoutes( + "r1", "ipv4", "r1-cust1", "Customer 1 routes in r1 vrf", want_r1_remote_cust1_routes +) want_r3_remote_cust1_routes = [ - {'p':'5.1.0.0/24', 'n':'1.1.1.1'}, - {'p':'5.1.1.0/24', 'n':'1.1.1.1'}, - {'p':'99.0.0.1/32', 'n':'1.1.1.1'}, - - {'p':'5.1.2.0/24', 'n':'4.4.4.4'}, - {'p':'5.1.3.0/24', 'n':'4.4.4.4'}, - {'p':'99.0.0.3/32', 'n':'4.4.4.4'}, - - {'p':'5.4.2.0/24', 'n':'4.4.4.4'}, - {'p':'5.4.3.0/24', 'n':'4.4.4.4'}, - {'p':'99.0.0.3/32', 'n':'4.4.4.4'}, + {"p": "5.1.0.0/24", "n": "1.1.1.1"}, + {"p": "5.1.1.0/24", "n": "1.1.1.1"}, + {"p": "99.0.0.1/32", "n": "1.1.1.1"}, + {"p": "5.1.2.0/24", "n": "4.4.4.4"}, + {"p": "5.1.3.0/24", "n": "4.4.4.4"}, + {"p": "99.0.0.3/32", "n": "4.4.4.4"}, + {"p": "5.4.2.0/24", "n": "4.4.4.4"}, + {"p": "5.4.3.0/24", "n": "4.4.4.4"}, + {"p": "99.0.0.3/32", "n": "4.4.4.4"}, ] -bgpribRequireUnicastRoutes('r3','ipv4','r3-cust1','Customer 1 routes in r3 vrf',want_r3_remote_cust1_routes) +bgpribRequireUnicastRoutes( + "r3", "ipv4", "r3-cust1", "Customer 1 routes in r3 vrf", want_r3_remote_cust1_routes +) want_r4_remote_cust1_routes = [ - {'p':'5.1.0.0/24', 'n':'1.1.1.1'}, - {'p':'5.1.1.0/24', 'n':'1.1.1.1'}, - {'p':'5.1.0.0/24', 'n':'3.3.3.3'}, - {'p':'5.1.1.0/24', 'n':'3.3.3.3'}, - {'p':'99.0.0.1/32', 'n':'1.1.1.1'}, - {'p':'99.0.0.2/32', 'n':'3.3.3.3'}, + {"p": "5.1.0.0/24", "n": "1.1.1.1"}, + {"p": "5.1.1.0/24", "n": "1.1.1.1"}, + {"p": "5.1.0.0/24", "n": "3.3.3.3"}, + {"p": "5.1.1.0/24", "n": "3.3.3.3"}, + {"p": "99.0.0.1/32", "n": "1.1.1.1"}, + {"p": "99.0.0.2/32", "n": "3.3.3.3"}, ] -bgpribRequireUnicastRoutes('r4','ipv4','r4-cust1','Customer 1 routes in r4 vrf',want_r4_remote_cust1_routes) +bgpribRequireUnicastRoutes( + "r4", "ipv4", "r4-cust1", "Customer 1 routes in r4 vrf", want_r4_remote_cust1_routes +) want_r4_remote_cust2_routes = [ - {'p':'5.1.0.0/24', 'n':'1.1.1.1'}, - {'p':'5.1.1.0/24', 'n':'1.1.1.1'}, - {'p':'5.1.0.0/24', 'n':'3.3.3.3'}, - {'p':'5.1.1.0/24', 'n':'3.3.3.3'}, - {'p':'99.0.0.1/32', 'n':'1.1.1.1'}, - {'p':'99.0.0.2/32', 'n':'3.3.3.3'}, + {"p": "5.1.0.0/24", "n": "1.1.1.1"}, + {"p": "5.1.1.0/24", "n": "1.1.1.1"}, + {"p": "5.1.0.0/24", "n": "3.3.3.3"}, + {"p": "5.1.1.0/24", "n": "3.3.3.3"}, + {"p": "99.0.0.1/32", "n": "1.1.1.1"}, + {"p": "99.0.0.2/32", "n": "3.3.3.3"}, ] -bgpribRequireUnicastRoutes('r4','ipv4','r4-cust2','Customer 2 routes in r4 vrf',want_r4_remote_cust2_routes) +bgpribRequireUnicastRoutes( + "r4", "ipv4", "r4-cust2", "Customer 2 routes in r4 vrf", want_r4_remote_cust2_routes +) ######################################################################### @@ -270,49 +327,78 @@ bgpribRequireUnicastRoutes('r4','ipv4','r4-cust2','Customer 2 routes in r4 vrf', # r1 vtysh -c "show bgp vrf r1-cust1 ipv4" # r1 vtysh -c "show bgp vrf r1-cust1 ipv4 5.1.2.0/24" -luCommand('ce1','vtysh -c "show bgp ipv4 uni"','10 routes and 10','wait','Local and remote routes', 10) +luCommand( + "ce1", + 'vtysh -c "show bgp ipv4 uni"', + "10 routes and 10", + "wait", + "Local and remote routes", + 10, +) want = [ - {'p':'5.1.2.0/24', 'n':'192.168.1.1'}, - {'p':'5.1.3.0/24', 'n':'192.168.1.1'}, - {'p':'5.4.2.0/24', 'n':'192.168.1.1'}, - {'p':'5.4.3.0/24', 'n':'192.168.1.1'}, + {"p": "5.1.2.0/24", "n": "192.168.1.1"}, + {"p": "5.1.3.0/24", "n": "192.168.1.1"}, + {"p": "5.4.2.0/24", "n": "192.168.1.1"}, + {"p": "5.4.3.0/24", "n": "192.168.1.1"}, ] -bgpribRequireUnicastRoutes('ce1','ipv4','','Cust 1 routes from remote',want) - -luCommand('ce2','vtysh -c "show bgp ipv4 uni"','10 routes and 12','wait','Local and remote routes', 10) +bgpribRequireUnicastRoutes("ce1", "ipv4", "", "Cust 1 routes from remote", want) + +luCommand( + "ce2", + 'vtysh -c "show bgp ipv4 uni"', + "10 routes and 12", + "wait", + "Local and remote routes", + 10, +) want = [ - {'p':'5.1.0.0/24', 'n':'192.168.1.1'}, - {'p':'5.1.1.0/24', 'n':'192.168.1.1'}, - {'p':'5.1.2.0/24', 'n':'192.168.1.1'}, - {'p':'5.1.3.0/24', 'n':'192.168.1.1'}, - {'p':'5.4.2.0/24', 'n':'192.168.1.1'}, - {'p':'5.4.3.0/24', 'n':'192.168.1.1'}, + {"p": "5.1.0.0/24", "n": "192.168.1.1"}, + {"p": "5.1.1.0/24", "n": "192.168.1.1"}, + {"p": "5.1.2.0/24", "n": "192.168.1.1"}, + {"p": "5.1.3.0/24", "n": "192.168.1.1"}, + {"p": "5.4.2.0/24", "n": "192.168.1.1"}, + {"p": "5.4.3.0/24", "n": "192.168.1.1"}, ] -bgpribRequireUnicastRoutes('ce2','ipv4','','Cust 1 routes from remote',want) +bgpribRequireUnicastRoutes("ce2", "ipv4", "", "Cust 1 routes from remote", want) # human readable output for debugging -luCommand('r4','vtysh -c "show bgp vrf r4-cust1 ipv4 uni"') -luCommand('r4','vtysh -c "show bgp vrf r4-cust2 ipv4 uni"') -luCommand('r4','vtysh -c "show bgp ipv4 vpn"') -luCommand('r4','vtysh -c "show ip route vrf r4-cust1"') -luCommand('r4','vtysh -c "show ip route vrf r4-cust2"') - -luCommand('ce3','vtysh -c "show bgp ipv4 uni"','10 routes and 10','wait','Local and remote routes', 10) +luCommand("r4", 'vtysh -c "show bgp vrf r4-cust1 ipv4 uni"') +luCommand("r4", 'vtysh -c "show bgp vrf r4-cust2 ipv4 uni"') +luCommand("r4", 'vtysh -c "show bgp ipv4 vpn"') +luCommand("r4", 'vtysh -c "show ip route vrf r4-cust1"') +luCommand("r4", 'vtysh -c "show ip route vrf r4-cust2"') + +luCommand( + "ce3", + 'vtysh -c "show bgp ipv4 uni"', + "10 routes and 10", + "wait", + "Local and remote routes", + 10, +) # Requires bvl-bug-degenerate-no-label fix (FRR PR #2053) want = [ - {'p':'5.1.0.0/24', 'n':'192.168.1.1'}, - {'p':'5.1.1.0/24', 'n':'192.168.1.1'}, - {'p':'5.4.2.0/24', 'n':'192.168.1.1'}, - {'p':'5.4.3.0/24', 'n':'192.168.1.1'}, + {"p": "5.1.0.0/24", "n": "192.168.1.1"}, + {"p": "5.1.1.0/24", "n": "192.168.1.1"}, + {"p": "5.4.2.0/24", "n": "192.168.1.1"}, + {"p": "5.4.3.0/24", "n": "192.168.1.1"}, ] -bgpribRequireUnicastRoutes('ce3','ipv4','','Cust 1 routes from remote',want) - -luCommand('ce4','vtysh -c "show bgp vrf ce4-cust2 ipv4 uni"','10 routes and 10','wait','Local and remote routes', 10) +bgpribRequireUnicastRoutes("ce3", "ipv4", "", "Cust 1 routes from remote", want) + +luCommand( + "ce4", + 'vtysh -c "show bgp vrf ce4-cust2 ipv4 uni"', + "10 routes and 10", + "wait", + "Local and remote routes", + 10, +) want = [ - {'p':'5.1.0.0/24', 'n':'192.168.2.1'}, - {'p':'5.1.1.0/24', 'n':'192.168.2.1'}, - {'p':'5.1.2.0/24', 'n':'192.168.2.1'}, - {'p':'5.1.3.0/24', 'n':'192.168.2.1'}, + {"p": "5.1.0.0/24", "n": "192.168.2.1"}, + {"p": "5.1.1.0/24", "n": "192.168.2.1"}, + {"p": "5.1.2.0/24", "n": "192.168.2.1"}, + {"p": "5.1.3.0/24", "n": "192.168.2.1"}, ] -bgpribRequireUnicastRoutes('ce4','ipv4','ce4-cust2','Cust 2 routes from remote',want) - +bgpribRequireUnicastRoutes( + "ce4", "ipv4", "ce4-cust2", "Cust 2 routes from remote", want +) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/cleanup_all.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/cleanup_all.py index a721cf21bd..af77ab01c1 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/cleanup_all.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/cleanup_all.py @@ -1,17 +1,120 @@ from lutil import luCommand -luCommand('r1','vtysh -c "clear vrf r1-cust1 prefix 99.0.0.1/32"','.','none','Cleared VRF route') -luCommand('r3','vtysh -c "clear vrf r3-cust1 prefix 99.0.0.2/32"','.','none','Cleared VRF route') -luCommand('r4','vtysh -c "clear vrf r3-cust1 prefix 99.0.0.3/32"','.','none','Cleared VRF route') -luCommand('r1','vtysh -c "show vnc registrations local"','99.0.0.1','fail','Local Registration cleared') -luCommand('r3','vtysh -c "show vnc registrations local"','99.0.0.2','fail','Local Registration cleared') -luCommand('r4','vtysh -c "show vnc registrations local"','99.0.0.3','fail','Local Registration cleared') -luCommand('r1','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated', 10) -luCommand('r2','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Unicast SAFI') -luCommand('r3','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated', 10) -luCommand('r4','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated', 10) -luCommand('ce1','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes', 10) -luCommand('ce2','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes', 10) -luCommand('ce3','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes', 10) -luCommand('r1','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared') -luCommand('r3','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared') -luCommand('r4','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared') + +luCommand( + "r1", + 'vtysh -c "clear vrf r1-cust1 prefix 99.0.0.1/32"', + ".", + "none", + "Cleared VRF route", +) +luCommand( + "r3", + 'vtysh -c "clear vrf r3-cust1 prefix 99.0.0.2/32"', + ".", + "none", + "Cleared VRF route", +) +luCommand( + "r4", + 'vtysh -c "clear vrf r3-cust1 prefix 99.0.0.3/32"', + ".", + "none", + "Cleared VRF route", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations local"', + "99.0.0.1", + "fail", + "Local Registration cleared", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations local"', + "99.0.0.2", + "fail", + "Local Registration cleared", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations local"', + "99.0.0.3", + "fail", + "Local Registration cleared", +) +luCommand( + "r1", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Unicast SAFI updated", + 10, +) +luCommand( + "r2", + 'vtysh -c "show bgp ipv4 uni"', + "No BGP prefixes displayed", + "pass", + "Unicast SAFI", +) +luCommand( + "r3", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Unicast SAFI updated", + 10, +) +luCommand( + "r4", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Unicast SAFI updated", + 10, +) +luCommand( + "ce1", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Local and remote routes", + 10, +) +luCommand( + "ce2", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Local and remote routes", + 10, +) +luCommand( + "ce3", + 'vtysh -c "show bgp ipv4 uni"', + "2 routes and 2", + "wait", + "Local and remote routes", + 10, +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations remote"', + "Prefix ", + "fail", + "Remote Registration cleared", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations remote"', + "Prefix ", + "fail", + "Remote Registration cleared", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations remote"', + "Prefix ", + "fail", + "Remote Registration cleared", +) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/del_bgp_instances.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/del_bgp_instances.py index c25c2d9ec5..477578bdbd 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/del_bgp_instances.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/del_bgp_instances.py @@ -1,7 +1,30 @@ from lutil import luCommand -luCommand('r1','/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5227 vrf r1-cust1" -c "no router bgp 5226"','.','none','Cleared bgp instances') -luCommand('r2','/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5226"','.','none','Cleared bgp instances') -luCommand('r3','/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5227 vrf r3-cust1" -c "no router bgp 5226"','.','none','Cleared bgp instances') -luCommand('r4','/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5228 vrf r4-cust2" -c "no router bgp 5227 vrf r4-cust1" -c "no router bgp 5226"','.','none','Cleared bgp instances') - +luCommand( + "r1", + '/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5227 vrf r1-cust1" -c "no router bgp 5226"', + ".", + "none", + "Cleared bgp instances", +) +luCommand( + "r2", + '/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5226"', + ".", + "none", + "Cleared bgp instances", +) +luCommand( + "r3", + '/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5227 vrf r3-cust1" -c "no router bgp 5226"', + ".", + "none", + "Cleared bgp instances", +) +luCommand( + "r4", + '/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5228 vrf r4-cust2" -c "no router bgp 5227 vrf r4-cust1" -c "no router bgp 5226"', + ".", + "none", + "Cleared bgp instances", +) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/notification_check.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/notification_check.py index d447548783..2b0a85a91a 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/notification_check.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/notification_check.py @@ -1,9 +1,22 @@ from lutil import luCommand -rtrs = ['ce1', 'ce2', 'ce3', 'r1', 'r2', 'r3', 'r4'] + +rtrs = ["ce1", "ce2", "ce3", "r1", "r2", "r3", "r4"] for rtr in rtrs: - ret = luCommand(rtr, 'vtysh -c "show bgp neigh"', 'Notification received .([A-Za-z0-9/ ]*)', 'none', 'collect neighbor stats') + ret = luCommand( + rtr, + 'vtysh -c "show bgp neigh"', + "Notification received .([A-Za-z0-9/ ]*)", + "none", + "collect neighbor stats", + ) found = luLast() if ret != False and found != None: val = found.group(1) - ret = luCommand(rtr, 'vtysh -c "show bgp neigh"', 'Notification received', 'fail', 'Notify RXed! {}'.format(val)) -#done + ret = luCommand( + rtr, + 'vtysh -c "show bgp neigh"', + "Notification received", + "fail", + "Notify RXed! {}".format(val), + ) +# done diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/scale_down.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/scale_down.py index 0279e482ff..b4fa240495 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/scale_down.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/scale_down.py @@ -1,25 +1,87 @@ from lutil import luCommand -ret = luCommand('ce1', 'vtysh -c "show ip route" | grep -c \\ 10\\.\\*/32','(.*)','pass', 'Looking for sharp routes') + +ret = luCommand( + "ce1", + 'vtysh -c "show ip route" | grep -c \\ 10\\.\\*/32', + "(.*)", + "pass", + "Looking for sharp routes", +) found = luLast() if ret != False and found != None: num = int(found.group()) - luCommand('ce3', 'vtysh -c "show bgp sum"', - '.', 'pass', 'See %s sharp routes' % num) + luCommand( + "ce3", 'vtysh -c "show bgp sum"', ".", "pass", "See %s sharp routes" % num + ) if num > 0: - rtrs = ['ce1', 'ce2', 'ce3'] + rtrs = ["ce1", "ce2", "ce3"] for rtr in rtrs: - luCommand(rtr, 'vtysh -c "show bgp ipv4 uni" | grep Display','.', 'none', 'BGP routes pre remove') - luCommand(rtr, 'ip route show | cat -n | tail','.', 'none', 'Linux routes pre remove') - wait = 2*num/500 - luCommand('ce1', 'vtysh -c "sharp remove routes 10.0.0.0 {}"'.format(num),'.','none','Removing {} routes'.format(num)) - luCommand('ce2', 'vtysh -c "sharp remove routes 10.0.0.0 {}"'.format(num),'.','none','Removing {} routes'.format(num)) + luCommand( + rtr, + 'vtysh -c "show bgp ipv4 uni" | grep Display', + ".", + "none", + "BGP routes pre remove", + ) + luCommand( + rtr, + "ip route show | cat -n | tail", + ".", + "none", + "Linux routes pre remove", + ) + wait = 2 * num / 500 + luCommand( + "ce1", + 'vtysh -c "sharp remove routes 10.0.0.0 {}"'.format(num), + ".", + "none", + "Removing {} routes".format(num), + ) + luCommand( + "ce2", + 'vtysh -c "sharp remove routes 10.0.0.0 {}"'.format(num), + ".", + "none", + "Removing {} routes".format(num), + ) for rtr in rtrs: - luCommand(rtr, 'vtysh -c "show bgp ipv4 uni" | grep Display',' 10 route', 'wait', 'BGP routes removed', wait, wait_time=10) - luCommand(rtr, 'vtysh -c "show bgp ipv4 uni"','.', 'none', 'BGP routes post remove') + luCommand( + rtr, + 'vtysh -c "show bgp ipv4 uni" | grep Display', + " 10 route", + "wait", + "BGP routes removed", + wait, + wait_time=10, + ) + luCommand( + rtr, + 'vtysh -c "show bgp ipv4 uni"', + ".", + "none", + "BGP routes post remove", + ) for rtr in rtrs: - luCommand(rtr, 'ip route show | grep -c \\^10\\.','^0$', 'wait', 'Linux routes removed', wait, wait_time=10) - luCommand(rtr, 'ip route show','.', 'none', 'Linux routes post remove') - rtrs = ['r1', 'r3', 'r4'] + luCommand( + rtr, + "ip route show | grep -c \\^10\\.", + "^0$", + "wait", + "Linux routes removed", + wait, + wait_time=10, + ) + luCommand(rtr, "ip route show", ".", "none", "Linux routes post remove") + rtrs = ["r1", "r3", "r4"] for rtr in rtrs: - luCommand(rtr, 'ip route show vrf {}-cust1 | grep -c \\^10\\.'.format(rtr),'^0$','wait','VRF route removed',wait, wait_time=10) -#done + luCommand( + rtr, + "ip route show vrf {}-cust1 | grep -c \\^10\\.".format(rtr), + "^0$", + "wait", + "VRF route removed", + wait, + wait_time=10, + ) +# done diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/scale_up.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/scale_up.py index 4ecaa4c026..3c768640a1 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/scale_up.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/scale_up.py @@ -1,29 +1,38 @@ from lutil import luCommand + num = 50000 -b = int(num/(256*256)) +b = int(num / (256 * 256)) if b > 0: - r = num - b * (256*256) + r = num - b * (256 * 256) else: r = num -c = int(r/256) +c = int(r / 256) if c > 0: - d = r - c * 256 - 1 + d = r - c * 256 - 1 else: d = r -wait = 2*num/1000 +wait = 2 * num / 1000 mem_z = {} mem_b = {} -rtrs = ['ce1', 'ce2', 'ce3', 'r1', 'r2', 'r3', 'r4'] +rtrs = ["ce1", "ce2", "ce3", "r1", "r2", "r3", "r4"] for rtr in rtrs: - mem_z[rtr] = {'value': 0, 'units': 'unknown'} - mem_b[rtr] = {'value': 0, 'units': 'unknown'} - ret = luCommand(rtr, 'vtysh -c "show memory"', 'zebra: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*) .*bgpd: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*)', 'none', 'collect bgpd memory stats') + mem_z[rtr] = {"value": 0, "units": "unknown"} + mem_b[rtr] = {"value": 0, "units": "unknown"} + ret = luCommand( + rtr, + 'vtysh -c "show memory"', + "zebra: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*) .*bgpd: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*)", + "none", + "collect bgpd memory stats", + ) found = luLast() if ret != False and found != None: - mem_z[rtr] = {'value': int(found.group(1)), 'units': found.group(2)} - mem_b[rtr] = {'value': int(found.group(3)), 'units': found.group(4)} + mem_z[rtr] = {"value": int(found.group(1)), "units": found.group(2)} + mem_b[rtr] = {"value": int(found.group(3)), "units": found.group(4)} -luCommand('ce1', 'vtysh -c "show mem"', 'qmem sharpd', 'none','check if sharpd running') +luCommand( + "ce1", 'vtysh -c "show mem"', "qmem sharpd", "none", "check if sharpd running" +) doSharp = False found = luLast() if ret != False and found != None: @@ -31,47 +40,195 @@ if ret != False and found != None: doSharp = True if doSharp != True: - luCommand('ce1', 'vtysh -c "sharp data nexthop"', '.', 'pass','sharpd NOT running, skipping test') + luCommand( + "ce1", + 'vtysh -c "sharp data nexthop"', + ".", + "pass", + "sharpd NOT running, skipping test", + ) else: - luCommand('ce1', 'vtysh -c "sharp install routes 10.0.0.0 nexthop 99.0.0.1 {}"'.format(num),'','pass','Adding {} routes'.format(num)) - luCommand('ce2', 'vtysh -c "sharp install routes 10.0.0.0 nexthop 99.0.0.2 {}"'.format(num),'','pass','Adding {} routes'.format(num)) - rtrs = ['ce1', 'ce2', 'ce3'] + luCommand( + "ce1", + 'vtysh -c "sharp install routes 10.0.0.0 nexthop 99.0.0.1 {}"'.format(num), + "", + "pass", + "Adding {} routes".format(num), + ) + luCommand( + "ce2", + 'vtysh -c "sharp install routes 10.0.0.0 nexthop 99.0.0.2 {}"'.format(num), + "", + "pass", + "Adding {} routes".format(num), + ) + rtrs = ["ce1", "ce2", "ce3"] for rtr in rtrs: - luCommand(rtr, 'vtysh -c "show bgp ipv4 uni 10.{}.{}.{}"'.format(b,c,d), 'Last update:', 'wait', 'RXed last route, 10.{}.{}.{}'.format(b,c,d), wait, wait_time=10) - luCommand(rtr, 'vtysh -c "show bgp ipv4 uni" | grep -c 10\\.\\*/32', str(num), 'wait', 'See all sharp routes in BGP', wait, wait_time=10) - luCommand('r1', 'vtysh -c "show bgp vrf r1-cust1 ipv4 uni 10.{}.{}.{}"'.format(b,c,d),'99.0.0.1','wait','RXed -> 10.{}.{}.{} from CE1'.format(b,c,d), wait, wait_time=10) - luCommand('r3', 'vtysh -c "show bgp vrf r3-cust1 ipv4 uni 10.{}.{}.{}"'.format(b,c,d),'99.0.0.2','wait','RXed -> 10.{}.{}.{} from CE2'.format(b,c,d), wait, wait_time=10) - luCommand('r1', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'99.0.0.1','wait','see VPN safi -> 10.{}.{}.{} from CE1'.format(b,c,d)) - luCommand('r3', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'99.0.0.2','wait','see VPN safi -> 10.{}.{}.{} from CE2'.format(b,c,d)) - luCommand('r3', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'1.1.1.1','wait','see VPN safi -> 10.{}.{}.{} from CE1'.format(b,c,d)) - luCommand('r1', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'3.3.3.3','wait','see VPN safi -> 10.{}.{}.{} from CE2'.format(b,c,d)) - luCommand('r4', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'1.1.1.1','wait','see VPN safi -> 10.{}.{}.{} from CE1'.format(b,c,d)) - luCommand('r4', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'3.3.3.3','wait','see VPN safi -> 10.{}.{}.{} from CE2'.format(b,c,d)) - rtrs = ['ce1', 'ce2', 'ce3'] + luCommand( + rtr, + 'vtysh -c "show bgp ipv4 uni 10.{}.{}.{}"'.format(b, c, d), + "Last update:", + "wait", + "RXed last route, 10.{}.{}.{}".format(b, c, d), + wait, + wait_time=10, + ) + luCommand( + rtr, + 'vtysh -c "show bgp ipv4 uni" | grep -c 10\\.\\*/32', + str(num), + "wait", + "See all sharp routes in BGP", + wait, + wait_time=10, + ) + luCommand( + "r1", + 'vtysh -c "show bgp vrf r1-cust1 ipv4 uni 10.{}.{}.{}"'.format(b, c, d), + "99.0.0.1", + "wait", + "RXed -> 10.{}.{}.{} from CE1".format(b, c, d), + wait, + wait_time=10, + ) + luCommand( + "r3", + 'vtysh -c "show bgp vrf r3-cust1 ipv4 uni 10.{}.{}.{}"'.format(b, c, d), + "99.0.0.2", + "wait", + "RXed -> 10.{}.{}.{} from CE2".format(b, c, d), + wait, + wait_time=10, + ) + luCommand( + "r1", + 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d), + "99.0.0.1", + "wait", + "see VPN safi -> 10.{}.{}.{} from CE1".format(b, c, d), + ) + luCommand( + "r3", + 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d), + "99.0.0.2", + "wait", + "see VPN safi -> 10.{}.{}.{} from CE2".format(b, c, d), + ) + luCommand( + "r3", + 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d), + "1.1.1.1", + "wait", + "see VPN safi -> 10.{}.{}.{} from CE1".format(b, c, d), + ) + luCommand( + "r1", + 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d), + "3.3.3.3", + "wait", + "see VPN safi -> 10.{}.{}.{} from CE2".format(b, c, d), + ) + luCommand( + "r4", + 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d), + "1.1.1.1", + "wait", + "see VPN safi -> 10.{}.{}.{} from CE1".format(b, c, d), + ) + luCommand( + "r4", + 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d), + "3.3.3.3", + "wait", + "see VPN safi -> 10.{}.{}.{} from CE2".format(b, c, d), + ) + rtrs = ["ce1", "ce2", "ce3"] for rtr in rtrs: - luCommand(rtr, 'ip route get 10.{}.{}.{}'.format(b,c,d),'dev','wait','Route to 10.{}.{}.{} available'.format(b,c,d), wait, wait_time=10) - luCommand(rtr, 'ip route show | grep -c \\^10\\.', str(num), 'wait', 'See {} linux routes'.format(num), wait, wait_time=10) + luCommand( + rtr, + "ip route get 10.{}.{}.{}".format(b, c, d), + "dev", + "wait", + "Route to 10.{}.{}.{} available".format(b, c, d), + wait, + wait_time=10, + ) + luCommand( + rtr, + "ip route show | grep -c \\^10\\.", + str(num), + "wait", + "See {} linux routes".format(num), + wait, + wait_time=10, + ) - rtrs = ['r1', 'r3', 'r4'] + rtrs = ["r1", "r3", "r4"] for rtr in rtrs: - luCommand(rtr, 'ip route get vrf {}-cust1 10.{}.{}.{}'.format(rtr,b,c,d),'dev','wait','VRF route available',wait, wait_time=10) - luCommand(rtr, 'ip route show vrf {}-cust1 | grep -c \\^10\\.'.format(rtr), str(num), 'wait','See {} linux routes'.format(num), wait, wait_time=10) - rtrs = ['ce1', 'ce2', 'ce3', 'r1', 'r2', 'r3', 'r4'] + luCommand( + rtr, + "ip route get vrf {}-cust1 10.{}.{}.{}".format(rtr, b, c, d), + "dev", + "wait", + "VRF route available", + wait, + wait_time=10, + ) + luCommand( + rtr, + "ip route show vrf {}-cust1 | grep -c \\^10\\.".format(rtr), + str(num), + "wait", + "See {} linux routes".format(num), + wait, + wait_time=10, + ) + rtrs = ["ce1", "ce2", "ce3", "r1", "r2", "r3", "r4"] for rtr in rtrs: - ret = luCommand(rtr, 'vtysh -c "show memory"', 'zebra: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*) .*bgpd: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*)', 'none', 'collect bgpd memory stats') + ret = luCommand( + rtr, + 'vtysh -c "show memory"', + "zebra: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*) .*bgpd: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*)", + "none", + "collect bgpd memory stats", + ) found = luLast() if ret != False and found != None: val_z = int(found.group(1)) - if mem_z[rtr]['units'] != found.group(2): + if mem_z[rtr]["units"] != found.group(2): val_z *= 1000 - delta_z = val_z - int(mem_z[rtr]['value']) - ave_z = float(delta_z)/float(num) + delta_z = val_z - int(mem_z[rtr]["value"]) + ave_z = float(delta_z) / float(num) val_b = int(found.group(3)) - if mem_b[rtr]['units'] != found.group(4): + if mem_b[rtr]["units"] != found.group(4): val_b *= 1000 - delta_b = val_b - int(mem_b[rtr]['value']) - ave_b = float(delta_b)/float(num) - luCommand(rtr, 'vtysh -c "show thread cpu"', '.', 'pass', 'BGPd heap: {0} {1} --> {2} {3} ({4} {1}/vpn route)'.format(mem_b[rtr]['value'], mem_b[rtr]['units'], found.group(3), found.group(4), round(ave_b,4))) - luCommand(rtr, 'vtysh -c "show thread cpu"', '.', 'pass', 'Zebra heap: {0} {1} --> {2} {3} ({4} {1}/vpn route)'.format(mem_z[rtr]['value'], mem_z[rtr]['units'], found.group(1), found.group(2), round(ave_z,4))) -#done + delta_b = val_b - int(mem_b[rtr]["value"]) + ave_b = float(delta_b) / float(num) + luCommand( + rtr, + 'vtysh -c "show thread cpu"', + ".", + "pass", + "BGPd heap: {0} {1} --> {2} {3} ({4} {1}/vpn route)".format( + mem_b[rtr]["value"], + mem_b[rtr]["units"], + found.group(3), + found.group(4), + round(ave_b, 4), + ), + ) + luCommand( + rtr, + 'vtysh -c "show thread cpu"', + ".", + "pass", + "Zebra heap: {0} {1} --> {2} {3} ({4} {1}/vpn route)".format( + mem_z[rtr]["value"], + mem_z[rtr]["units"], + found.group(1), + found.group(2), + round(ave_z, 4), + ), + ) +# done diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py index 7e36398298..b537735c65 100755 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py @@ -25,119 +25,132 @@ import os import sys import pytest -sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')) +sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")) from lib.ltemplate import * + def test_check_linux_vrf(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/check_linux_vrf.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/check_linux_vrf.py", False, CliOnFail, CheckFunc) + def test_adjacencies(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' - ltemplateTest('scripts/adjacencies.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' + ltemplateTest("scripts/adjacencies.py", False, CliOnFail, CheckFunc) + def test_notification_check(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/notification_check.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/notification_check.py", False, CliOnFail, CheckFunc) + def SKIP_test_add_routes(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' - ltemplateTest('scripts/add_routes.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' + ltemplateTest("scripts/add_routes.py", False, CliOnFail, CheckFunc) + def test_check_routes(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' - ltemplateTest('scripts/check_routes.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' + ltemplateTest("scripts/check_routes.py", False, CliOnFail, CheckFunc) + -#manual data path setup test - remove once have bgp/zebra vrf path working +# manual data path setup test - remove once have bgp/zebra vrf path working def test_check_linux_mpls(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/check_linux_mpls.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/check_linux_mpls.py", False, CliOnFail, CheckFunc) + def test_notification_check(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/notification_check.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/notification_check.py", False, CliOnFail, CheckFunc) + def test_check_scale_up(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/scale_up.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/scale_up.py", False, CliOnFail, CheckFunc) + def test_notification_check(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/notification_check.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/notification_check.py", False, CliOnFail, CheckFunc) + def test_check_scale_down(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/scale_down.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/scale_down.py", False, CliOnFail, CheckFunc) + def test_notification_check(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' - ltemplateTest('scripts/notification_check.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' + ltemplateTest("scripts/notification_check.py", False, CliOnFail, CheckFunc) + def SKIP_test_cleanup_all(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'4.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' - ltemplateTest('scripts/cleanup_all.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('4.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)' + ltemplateTest("scripts/cleanup_all.py", False, CliOnFail, CheckFunc) + -if __name__ == '__main__': +if __name__ == "__main__": retval = pytest.main(["-s"]) sys.exit(retval) diff --git a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py index 83ec1e784d..334aaebb4b 100755 --- a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py +++ b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py @@ -57,17 +57,19 @@ from lib.topogen import Topogen, get_topogen from mininet.topo import Topo from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, reset_config_on_routers, - create_route_maps, create_bgp_community_lists, - create_prefix_lists, verify_bgp_community, step, - check_address_types + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + create_route_maps, + create_bgp_community_lists, + create_prefix_lists, + verify_bgp_community, + step, + check_address_types, ) from lib.topolog import logger -from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp_and_verify -) +from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify from lib.topojson import build_topo_from_json, build_config_from_json # Save the Current Working Directory to find configuration files. @@ -87,7 +89,7 @@ except IOError: bgp_convergence = False NETWORK = { "ipv4": ["200.50.2.0", "200.50.2.1", "200.50.2.0"], - "ipv6": ["1::1", "1::2", "1::0"] + "ipv6": ["1::1", "1::2", "1::0"], } MASK = {"ipv4": "32", "ipv6": "128"} NET_MASK = {"ipv4": "24", "ipv6": "120"} @@ -104,9 +106,8 @@ LARGE_COMM = { "pf_list_1": "0:0:1 0:0:10 0:0:100", "pf_list_2": "0:0:2 0:0:20 0:0:200", "agg_1": "0:0:1 0:0:2 0:0:10 0:0:20 0:0:100 0:0:200 2:1:1 " - "2:2:1 2:3:1 2:4:1 2:5:1", - "agg_2": "0:0:2 0:0:20 0:0:200 2:1:1 " - "2:2:1 2:3:1 2:4:1 2:5:1" + "2:2:1 2:3:1 2:4:1 2:5:1", + "agg_2": "0:0:2 0:0:20 0:0:200 2:1:1 " "2:2:1 2:3:1 2:4:1 2:5:1", } STANDARD_COMM = { "r1": "1:1 1:2 1:3 1:4 1:5", @@ -115,7 +116,7 @@ STANDARD_COMM = { "pf_list_1": "0:1 0:10 0:100", "pf_list_2": "0:2 0:20 0:200", "agg_1": "0:1 0:2 0:10 0:20 0:100 0:200 2:1 2:2 2:3 2:4 2:5", - "agg_2": "0:2 0:20 0:200 2:1 2:2 2:3 2:4 2:5" + "agg_2": "0:2 0:20 0:200 2:1 2:2 2:3 2:4 2:5", } @@ -169,8 +170,9 @@ def setup_module(mod): ##tgen.mininet_cli() # Api call verify whether BGP is converged bgp_convergence = verify_bgp_convergence(tgen, topo) - assert bgp_convergence is True, ("setup_module :Failed \n Error:" - " {}".format(bgp_convergence)) + assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format( + bgp_convergence + ) ADDR_TYPES = check_address_types() logger.info("Running setup_module() done") @@ -190,8 +192,9 @@ def teardown_module(): # Stop toplogy and Remove tmp files tgen.stop_topology() - logger.info("Testsuite end time: {}". - format(time.asctime(time.localtime(time.time())))) + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) logger.info("=" * 40) @@ -206,13 +209,9 @@ def config_router_r1(tgen, topo, tc_name): "action": "permit", "seq_id": "10", "set": { - "large_community": { - "num": LARGE_COMM["r1"] - }, - "community": { - "num": STANDARD_COMM["r1"] - } - } + "large_community": {"num": LARGE_COMM["r1"]}, + "community": {"num": STANDARD_COMM["r1"]}, + }, } ] } @@ -221,8 +220,7 @@ def config_router_r1(tgen, topo, tc_name): step("Configuring LC1 on r1") result = create_route_maps(tgen, input_dict_1) - assert result is True, "Test case {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) # Configure neighbor for route map input_dict_2 = { @@ -233,68 +231,64 @@ def config_router_r1(tgen, topo, tc_name): "unicast": { "advertise_networks": [ { - "network": "%s/%s" % ( - NETWORK["ipv4"][0], MASK["ipv4"]), - "no_of_network": 4 + "network": "%s/%s" + % (NETWORK["ipv4"][0], MASK["ipv4"]), + "no_of_network": 4, } ], "neighbor": { "r2": { "dest_link": { "r1-link1": { - "route_maps": [{ - "name": "LC1", - "direction": "out" - }] + "route_maps": [ + {"name": "LC1", "direction": "out"} + ] } } }, "r3": { "dest_link": { "r1-link1": { - "route_maps": [{ - "name": "LC1", - "direction": "out" - }] + "route_maps": [ + {"name": "LC1", "direction": "out"} + ] } } - } - } + }, + }, } }, "ipv6": { "unicast": { "advertise_networks": [ { - "network": "%s/%s" % ( - NETWORK["ipv6"][0], MASK["ipv6"]), - "no_of_network": 4 + "network": "%s/%s" + % (NETWORK["ipv6"][0], MASK["ipv6"]), + "no_of_network": 4, } ], "neighbor": { "r2": { "dest_link": { "r1-link1": { - "route_maps": [{ - "name": "LC1", - "direction": "out" - }] + "route_maps": [ + {"name": "LC1", "direction": "out"} + ] } } }, "r3": { "dest_link": { "r1-link1": { - "route_maps": [{ - "name": "LC1", - "direction": "out" - }] + "route_maps": [ + {"name": "LC1", "direction": "out"} + ] } } - } - } + }, + }, } - } + }, } } } @@ -302,8 +296,7 @@ def config_router_r1(tgen, topo, tc_name): step("Applying LC1 on r1 neighbors and advertising networks") result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Test case {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) CONFIG_ROUTER_R1 = True @@ -319,13 +312,9 @@ def config_router_r2(tgen, topo, tc_name): "action": "permit", "seq_id": "10", "set": { - "large_community": { - "num": LARGE_COMM["r2"] - }, - "community": { - "num": STANDARD_COMM["r2"] - } - } + "large_community": {"num": LARGE_COMM["r2"]}, + "community": {"num": STANDARD_COMM["r2"]}, + }, } ] } @@ -334,8 +323,7 @@ def config_router_r2(tgen, topo, tc_name): step("Configuring route-maps LC2 on r2") result = create_route_maps(tgen, input_dict) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) input_dict_1 = { "r2": { @@ -347,10 +335,9 @@ def config_router_r2(tgen, topo, tc_name): "r4": { "dest_link": { "r2-link1": { - "route_maps": [{ - "name": "LC2", - "direction": "out" - }] + "route_maps": [ + {"name": "LC2", "direction": "out"} + ] } } } @@ -363,16 +350,15 @@ def config_router_r2(tgen, topo, tc_name): "r4": { "dest_link": { "r2-link1": { - "route_maps": [{ - "name": "LC2", - "direction": "out" - }] + "route_maps": [ + {"name": "LC2", "direction": "out"} + ] } } } } } - } + }, } } } @@ -380,8 +366,7 @@ def config_router_r2(tgen, topo, tc_name): step("Applying LC2 on r2 neighbors in out direction") result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) CONFIG_ROUTER_R2 = True @@ -399,13 +384,13 @@ def config_router_additive(tgen, topo, tc_name): "set": { "large_community": { "num": LARGE_COMM["r2"], - "action": "additive" + "action": "additive", }, "community": { "num": STANDARD_COMM["r2"], - "action": "additive" - } - } + "action": "additive", + }, + }, } ] } @@ -414,8 +399,7 @@ def config_router_additive(tgen, topo, tc_name): step("Configuring LC2 with community attributes as additive") result = create_route_maps(tgen, input_dict) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) # tgen.mininet_cli() CONFIG_ROUTER_ADDITIVE = True @@ -434,47 +418,41 @@ def config_for_as_path(tgen, topo, tc_name): "pf_list_1": [ { "seqid": "10", - "network": "%s/%s" % (NETWORK["ipv4"][0], - MASK["ipv4"]), - "action": "permit" + "network": "%s/%s" % (NETWORK["ipv4"][0], MASK["ipv4"]), + "action": "permit", } ], "pf_list_2": [ { "seqid": "10", - "network": "%s/%s" % (NETWORK["ipv4"][1], - MASK["ipv4"]), - "action": "permit" + "network": "%s/%s" % (NETWORK["ipv4"][1], MASK["ipv4"]), + "action": "permit", } - ] + ], }, "ipv6": { "pf_list_3": [ { "seqid": "10", - "network": "%s/%s" % (NETWORK["ipv6"][0], - MASK["ipv6"]), - "action": "permit" + "network": "%s/%s" % (NETWORK["ipv6"][0], MASK["ipv6"]), + "action": "permit", } ], "pf_list_4": [ { "seqid": "10", - "network": "%s/%s" % (NETWORK["ipv6"][1], - MASK["ipv6"]), - "action": "permit" + "network": "%s/%s" % (NETWORK["ipv6"][1], MASK["ipv6"]), + "action": "permit", } - ] - } - + ], + }, } } } step("Configuring prefix-lists on r1 to filter networks") result = create_prefix_lists(tgen, input_dict_1) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) input_dict_2 = { "r1": { @@ -483,81 +461,50 @@ def config_for_as_path(tgen, topo, tc_name): { "action": "permit", "seq_id": 10, - "match": { - "ipv4": { - "prefix_lists": "pf_list_1" - } - }, + "match": {"ipv4": {"prefix_lists": "pf_list_1"}}, "set": { - "large_community": { - "num": LARGE_COMM["pf_list_1"] - }, - "community": { - "num": STANDARD_COMM["pf_list_1"] - } - } + "large_community": {"num": LARGE_COMM["pf_list_1"]}, + "community": {"num": STANDARD_COMM["pf_list_1"]}, + }, }, { "action": "permit", "seq_id": 20, - "match": { - "ipv6": { - "prefix_lists": "pf_list_3" - } - }, + "match": {"ipv6": {"prefix_lists": "pf_list_3"}}, "set": { - "large_community": { - "num": LARGE_COMM["pf_list_1"] - }, - "community": { - "num": STANDARD_COMM["pf_list_1"] - } - } + "large_community": {"num": LARGE_COMM["pf_list_1"]}, + "community": {"num": STANDARD_COMM["pf_list_1"]}, + }, }, { "action": "permit", "seq_id": 30, - "match": { - "ipv4": { - "prefix_lists": "pf_list_2" - } - }, + "match": {"ipv4": {"prefix_lists": "pf_list_2"}}, "set": { - "large_community": { - "num": LARGE_COMM["pf_list_2"] - }, - "community": { - "num": STANDARD_COMM["pf_list_2"] - } - } + "large_community": {"num": LARGE_COMM["pf_list_2"]}, + "community": {"num": STANDARD_COMM["pf_list_2"]}, + }, }, { "action": "permit", "seq_id": 40, - "match": { - "ipv6": { - "prefix_lists": "pf_list_4" - } - }, + "match": {"ipv6": {"prefix_lists": "pf_list_4"}}, "set": { - "large_community": { - "num": LARGE_COMM["pf_list_2"] - }, - "community": { - "num": STANDARD_COMM["pf_list_2"] - } - } - } + "large_community": {"num": LARGE_COMM["pf_list_2"]}, + "community": {"num": STANDARD_COMM["pf_list_2"]}, + }, + }, ] } } } - step("Applying prefix-lists match in route-map LC1 on r1. Setting" - " community attritbute for filtered networks") + step( + "Applying prefix-lists match in route-map LC1 on r1. Setting" + " community attritbute for filtered networks" + ) result = create_route_maps(tgen, input_dict_2) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) config_router_additive(tgen, topo, tc_name) @@ -569,22 +516,21 @@ def config_for_as_path(tgen, topo, tc_name): "action": "permit", "name": "ANY", "value": LARGE_COMM["pf_list_1"], - "large": True + "large": True, }, { "community_type": "standard", "action": "permit", "name": "ANY", "value": STANDARD_COMM["pf_list_1"], - } + }, ] } } step("Configuring bgp community lists on r4") result = create_bgp_community_lists(tgen, input_dict_3) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) input_dict_4 = { "r4": { @@ -595,14 +541,9 @@ def config_for_as_path(tgen, topo, tc_name): "seq_id": "10", "match": { "large_community_list": {"id": "ANY"}, - "community_list": {"id": "ANY"} + "community_list": {"id": "ANY"}, }, - "set": { - "aspath": { - "as_num": "4000000", - "as_action": "prepend" - } - } + "set": {"path": {"as_num": "4000000", "as_action": "prepend"}}, } ] } @@ -611,8 +552,7 @@ def config_for_as_path(tgen, topo, tc_name): step("Applying community list on route-map on r4") result = create_route_maps(tgen, input_dict_4) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) input_dict_5 = { "r4": { @@ -624,10 +564,9 @@ def config_for_as_path(tgen, topo, tc_name): "r5": { "dest_link": { "r4-link1": { - "route_maps": [{ - "name": "LC4", - "direction": "out" - }] + "route_maps": [ + {"name": "LC4", "direction": "out"} + ] } } } @@ -640,16 +579,15 @@ def config_for_as_path(tgen, topo, tc_name): "r5": { "dest_link": { "r4-link1": { - "route_maps": [{ - "name": "LC4", - "direction": "out" - }] + "route_maps": [ + {"name": "LC4", "direction": "out"} + ] } } } } } - } + }, } } } @@ -657,8 +595,7 @@ def config_for_as_path(tgen, topo, tc_name): step("Applying route-map LC4 out from r4 to r5 ") result = create_router_bgp(tgen, topo, input_dict_5) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) ##################################################### @@ -690,8 +627,8 @@ def test_large_community_set(request): "seq_id": "10", "set": { "large_community": {"num": LARGE_COMM["r1"]}, - "community": {"num": STANDARD_COMM["r1"]} - } + "community": {"num": STANDARD_COMM["r1"]}, + }, } ] } @@ -700,8 +637,7 @@ def test_large_community_set(request): step("Trying to set bgp communities") result = create_route_maps(tgen, input_dict) - assert result is True, "Test case {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -728,15 +664,15 @@ def test_large_community_advertise(request): } for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]], - input_dict) + result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]], input_dict) assert result is True, "Test case {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - result = verify_bgp_community(tgen, adt, "r3", [NETWORK[adt][0]], - input_dict) + result = verify_bgp_community(tgen, adt, "r3", [NETWORK[adt][0]], input_dict) assert result is True, "Test case {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -759,14 +695,14 @@ def test_large_community_transitive(request): input_dict_1 = { "largeCommunity": LARGE_COMM["r1"], - "community": STANDARD_COMM["r1"] + "community": STANDARD_COMM["r1"], } for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]], - input_dict_1) + result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]], input_dict_1) assert result is True, "Test case {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -790,14 +726,14 @@ def test_large_community_override(request): input_dict_3 = { "largeCommunity": LARGE_COMM["r2"], - "community": STANDARD_COMM["r2"] + "community": STANDARD_COMM["r2"], } for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][1]], - input_dict_3) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][1]], input_dict_3) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) @@ -823,14 +759,14 @@ def test_large_community_additive(request): input_dict_1 = { "largeCommunity": "%s %s" % (LARGE_COMM["r1"], LARGE_COMM["r2"]), - "community": "%s %s" % (STANDARD_COMM["r1"], STANDARD_COMM["r2"]) + "community": "%s %s" % (STANDARD_COMM["r1"], STANDARD_COMM["r2"]), } for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]], - input_dict_1) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]], input_dict_1) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) @@ -853,30 +789,28 @@ def test_large_community_match_as_path(request): config_for_as_path(tgen, topo, tc_name) input_dict = { - "largeCommunity": "%s %s" % ( - LARGE_COMM["pf_list_1"], LARGE_COMM["r2"]), - "community": "%s %s" % ( - STANDARD_COMM["pf_list_1"], STANDARD_COMM["r2"]), + "largeCommunity": "%s %s" % (LARGE_COMM["pf_list_1"], LARGE_COMM["r2"]), + "community": "%s %s" % (STANDARD_COMM["pf_list_1"], STANDARD_COMM["r2"]), } input_dict_1 = { - "largeCommunity": "%s %s" % ( - LARGE_COMM["pf_list_2"], LARGE_COMM["r2"]), - "community": "%s %s" % ( - STANDARD_COMM["pf_list_2"], STANDARD_COMM["r2"]), + "largeCommunity": "%s %s" % (LARGE_COMM["pf_list_2"], LARGE_COMM["r2"]), + "community": "%s %s" % (STANDARD_COMM["pf_list_2"], STANDARD_COMM["r2"]), } for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r5", [NETWORK[adt][0]], - input_dict) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_community(tgen, adt, "r5", [NETWORK[adt][0]], input_dict) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) - result = verify_bgp_community(tgen, adt, "r5", [NETWORK[adt][1]], - input_dict_1, expected=False) + result = verify_bgp_community( + tgen, adt, "r5", [NETWORK[adt][1]], input_dict_1, expected=False + ) - assert result is not True, "Test case {} : Should fail \n Error: {}". \ - format(tc_name, result) + assert result is not True, "Test case {} : Should fail \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) @@ -909,22 +843,22 @@ def test_large_community_match_all(request): "action": "permit", "name": "ANY", "value": "1:1:1", - "large": True + "large": True, }, { "community_type": "standard", "action": "permit", "name": "ALL", "value": "1:1:1 1:2:1 1:3:1 1:4:1 1:5:1 2:1:1 2:2:1", - "large": True + "large": True, }, { "community_type": "expanded", "action": "permit", "name": "EXP_ALL", "value": "1:1:1 1:2:1 1:3:1 1:4:1 1:5:1 2:[1-5]:1", - "large": True - } + "large": True, + }, ] } } @@ -932,8 +866,7 @@ def test_large_community_match_all(request): step("Create bgp community lists for ANY, EXACT and EXP_ALL match") result = create_bgp_community_lists(tgen, input_dict_1) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) input_dict_2 = { "r4": { @@ -942,18 +875,18 @@ def test_large_community_match_all(request): { "action": "permit", "seq_id": "10", - "match": {"large-community-list": {"id": "ANY"}} + "match": {"large-community-list": {"id": "ANY"}}, }, { "action": "permit", "seq_id": "20", - "match": {"large-community-list": {"id": "EXACT"}} + "match": {"large-community-list": {"id": "EXACT"}}, }, { "action": "permit", "seq_id": "30", - "match": {"large-community-list": {"id": "EXP_ALL"}} - } + "match": {"large-community-list": {"id": "EXP_ALL"}}, + }, ] } } @@ -961,8 +894,7 @@ def test_large_community_match_all(request): step("Applying bgp community lits on LC4 route-map") result = create_route_maps(tgen, input_dict_2) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) input_dict_3 = { "r4": { @@ -974,10 +906,9 @@ def test_large_community_match_all(request): "r5": { "dest_link": { "r4-link1": { - "route_maps": [{ - "name": "LC4", - "direction": "in" - }] + "route_maps": [ + {"name": "LC4", "direction": "in"} + ] } } } @@ -990,16 +921,15 @@ def test_large_community_match_all(request): "r5": { "dest_link": { "r4-link1": { - "route_maps": [{ - "name": "LC4", - "direction": "in" - }] + "route_maps": [ + {"name": "LC4", "direction": "in"} + ] } } } } } - } + }, } } } @@ -1008,24 +938,23 @@ def test_large_community_match_all(request): step("Apply route-mpa LC4 on r4 for r2 neighbor, direction 'in'") result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) input_dict_4 = { "largeCommunity": "1:1:1 1:2:1 1:3:1 1:4:1 1:5:1 2:1:1 2:2:1 2:3:1 " - "2:4:1 2:5:1" + "2:4:1 2:5:1" } for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]], - input_dict_4) - assert result is True, "Test case {} : Should fail \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]], input_dict_4) + assert result is True, "Test case {} : Should fail \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) -#@pytest.mark.skip(reason="as-set not working for ipv6") +# @pytest.mark.skip(reason="as-set not working for ipv6") def test_large_community_aggregate_network(request): """ Restart router and check if large community and community @@ -1047,7 +976,7 @@ def test_large_community_aggregate_network(request): input_dict = { "community": STANDARD_COMM["agg_1"], - "largeCommunity": LARGE_COMM["agg_1"] + "largeCommunity": LARGE_COMM["agg_1"], } input_dict_1 = { @@ -1058,9 +987,9 @@ def test_large_community_aggregate_network(request): "unicast": { "aggregate_address": [ { - "network": "%s/%s" % ( - NETWORK["ipv4"][2], NET_MASK["ipv4"]), - "as_set": True + "network": "%s/%s" + % (NETWORK["ipv4"][2], NET_MASK["ipv4"]), + "as_set": True, } ] } @@ -1069,13 +998,13 @@ def test_large_community_aggregate_network(request): "unicast": { "aggregate_address": [ { - "network": "%s/%s" % ( - NETWORK["ipv6"][2], NET_MASK["ipv6"]), - "as_set": True + "network": "%s/%s" + % (NETWORK["ipv6"][2], NET_MASK["ipv6"]), + "as_set": True, } ] } - } + }, } } } @@ -1083,16 +1012,15 @@ def test_large_community_aggregate_network(request): step("Configuring aggregate address as-set on r2") result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Test case {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r4", - ["%s/%s" % (NETWORK[adt][2], - NET_MASK[adt])], - input_dict) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_community( + tgen, adt, "r4", ["%s/%s" % (NETWORK[adt][2], NET_MASK[adt])], input_dict + ) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) input_dict_2 = { "r1": { @@ -1102,10 +1030,10 @@ def test_large_community_aggregate_network(request): "unicast": { "advertise_networks": [ { - "network": "%s/%s" % ( - NETWORK["ipv4"][0], MASK["ipv4"]), + "network": "%s/%s" + % (NETWORK["ipv4"][0], MASK["ipv4"]), "no_of_network": 1, - "delete": True + "delete": True, } ] } @@ -1114,14 +1042,14 @@ def test_large_community_aggregate_network(request): "unicast": { "advertise_networks": [ { - "network": "%s/%s" % ( - NETWORK["ipv6"][0], MASK["ipv6"]), + "network": "%s/%s" + % (NETWORK["ipv6"][0], MASK["ipv6"]), "no_of_network": 1, - "delete": True + "delete": True, } ] } - } + }, } } } @@ -1129,22 +1057,21 @@ def test_large_community_aggregate_network(request): step("Stop advertising one of the networks") result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Test case {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result) input_dict_3 = { "community": STANDARD_COMM["agg_2"], - "largeCommunity": LARGE_COMM["agg_2"] + "largeCommunity": LARGE_COMM["agg_2"], } for adt in ADDR_TYPES: step("Verifying bgp community values on r5 is also modified") - result = verify_bgp_community(tgen, adt, "r4", - ["%s/%s" % (NETWORK[adt][2], - NET_MASK[adt])], - input_dict_3) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_community( + tgen, adt, "r4", ["%s/%s" % (NETWORK[adt][2], NET_MASK[adt])], input_dict_3 + ) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) @@ -1168,7 +1095,7 @@ def test_large_community_boundary_values(request): "community_type": "standard", "action": "permit", "name": "ANY", - "value": "0:-1" + "value": "0:-1", } ] } @@ -1176,8 +1103,9 @@ def test_large_community_boundary_values(request): step("Checking boundary value for community 0:-1") result = create_bgp_community_lists(tgen, input_dict) - assert result is not True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is not True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) step("Checking community attribute 0:65536") input_dict_2 = { @@ -1187,7 +1115,7 @@ def test_large_community_boundary_values(request): "community_type": "standard", "action": "permit", "name": "ANY", - "value": "0:65536" + "value": "0:65536", } ] } @@ -1195,8 +1123,9 @@ def test_large_community_boundary_values(request): step("Checking boundary value for community 0:65536") result = create_bgp_community_lists(tgen, input_dict_2) - assert result is not True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is not True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) step("Checking boundary value for community 0:4294967296") input_dict_3 = { @@ -1207,15 +1136,16 @@ def test_large_community_boundary_values(request): "action": "permit", "name": "ANY", "value": "0:4294967296", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_3) - assert result is not True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is not True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) step("Checking boundary value for community 0:-1:1") input_dict_4 = { @@ -1226,15 +1156,16 @@ def test_large_community_boundary_values(request): "action": "permit", "name": "ANY", "value": "0:-1:1", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_4) - assert result is not True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is not True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) def test_large_community_after_clear_bgp(request): @@ -1253,25 +1184,22 @@ def test_large_community_after_clear_bgp(request): reset_config_on_routers(tgen) config_router_r1(tgen, topo, tc_name) - input_dict = { - "largeCommunity": LARGE_COMM["r1"], - "community": STANDARD_COMM["r1"] - } + input_dict = {"largeCommunity": LARGE_COMM["r1"], "community": STANDARD_COMM["r1"]} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]], - input_dict) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]], input_dict) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) step("Clearing BGP on r1") clear_bgp_and_verify(tgen, topo, "r1") for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]], - input_dict) - assert result is True, "Test case {} : Failed \n Error: {}". \ - format(tc_name, result) + result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]], input_dict) + assert result is True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) diff --git a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py index cba20551cd..502a9a9ec4 100755 --- a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py +++ b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py @@ -3,7 +3,7 @@ # # Copyright (c) 2019 by VMware, Inc. ("VMware") # Used Copyright (c) 2018 by Network Device Education Foundation, -#Inc. ("NetDEF") in this file. +# Inc. ("NetDEF") in this file. # # Permission to use, copy, modify, and/or distribute this software # for any purpose with or without fee is hereby granted, provided @@ -77,19 +77,23 @@ from lib.topogen import Topogen, get_topogen from mininet.topo import Topo from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, reset_config_on_routers, - create_route_maps, create_bgp_community_lists, - create_prefix_lists, verify_bgp_community, step, - verify_create_community_list, delete_route_maps, - verify_route_maps, create_static_routes, - check_address_types + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + create_route_maps, + create_bgp_community_lists, + create_prefix_lists, + verify_bgp_community, + step, + verify_create_community_list, + delete_route_maps, + verify_route_maps, + create_static_routes, + check_address_types, ) from lib.topolog import logger -from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp_and_verify -) +from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology and configuration creation @@ -121,6 +125,7 @@ class GenerateTopo(Topo): # Building topology from json file build_topo_from_json(tgen, topo) + def setup_module(mod): """ Sets up the pytest environment @@ -130,7 +135,7 @@ def setup_module(mod): testsuite_run_time = time.asctime(time.localtime(time.time())) logger.info("Testsuite start time: {}".format(testsuite_run_time)) - logger.info("="*40) + logger.info("=" * 40) logger.info("Running setup_module to create topology") @@ -155,8 +160,9 @@ def setup_module(mod): # Api call verify whether BGP is converged # Ipv4 bgp_convergence = verify_bgp_convergence(tgen, topo) - assert bgp_convergence is True, ("setup_module :Failed \n Error:" - " {}".format(bgp_convergence)) + assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format( + bgp_convergence + ) ADDR_TYPES = check_address_types() logger.info("Running setup_module() done") @@ -176,9 +182,11 @@ def teardown_module(mod): # Stop toplogy and Remove tmp files tgen.stop_topology() - logger.info("Testsuite end time: {}".\ - format(time.asctime(time.localtime(time.time())))) - logger.info("="*40) + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + ##################################################### # @@ -213,26 +221,24 @@ def test_create_bgp_standard_large_community_list(request): "action": "permit", "name": "LC_1_STD", "value": "2:1:1 2:1:2 1:2:3", - "large": True + "large": True, }, { "community_type": "standard", "action": "permit", "name": "LC_2_STD", "value": "3:1:1 3:1:2", - "large": True - } + "large": True, + }, ] } } result = create_bgp_community_lists(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify BGP large community is created") result = verify_create_community_list(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create srtandard large community list with in-correct values") input_dict = { @@ -243,20 +249,18 @@ def test_create_bgp_standard_large_community_list(request): "action": "permit", "name": "LC_1_STD_ERR", "value": "0:0:0", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) ## TODO should fail step("Verify BGP large community is created") result = verify_create_community_list(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -288,19 +292,17 @@ def test_create_bgp_expanded_large_community_list(request): "action": "permit", "name": "LC_1_EXP", "value": "1:1:200 1:2:* 3:2:1", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify BGP large community is created") result = verify_create_community_list(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -331,14 +333,13 @@ def test_modify_large_community_lists_referenced_by_rmap(request): "action": "permit", "name": "LC_DEL", "value": "1:2:1 1:3:1 2:1:1 2:2:2 3:3:3", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create route map") input_dict_2 = { @@ -351,9 +352,9 @@ def test_modify_large_community_lists_referenced_by_rmap(request): "set": { "large_community": { "num": "1:2:1 1:3:1 2:10:1 3:3:3 4:4:4 5:5:5", - "action": "additive" + "action": "additive", } - } + }, } ] } @@ -364,20 +365,14 @@ def test_modify_large_community_lists_referenced_by_rmap(request): { "action": "permit", "seq_id": "10", - "set": { - "large_comm_list": { - "id": "LC_DEL", - "delete": True - } - } + "set": {"large_comm_list": {"id": "LC_DEL", "delete": True}}, } ] } - } + }, } result = create_route_maps(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map and advertise networks") input_dict_3 = { @@ -386,42 +381,42 @@ def test_modify_large_community_lists_referenced_by_rmap(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ], + "advertise_networks": [{"network": "200.50.2.0/32"}], "neighbor": { "r2": { "dest_link": { "r1": { - "route_maps": [{ - "name": "RM_R2_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R2_OUT", + "direction": "out", + } + ] } } } - } + }, } }, "ipv6": { "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ], + "advertise_networks": [{"network": "1::1/128"}], "neighbor": { "r2": { "dest_link": { "r1": { - "route_maps": [{ - "name": "RM_R2_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R2_OUT", + "direction": "out", + } + ] } } } - } + }, } - } + }, } } }, @@ -434,10 +429,9 @@ def test_modify_large_community_lists_referenced_by_rmap(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } @@ -450,35 +444,31 @@ def test_modify_large_community_lists_referenced_by_rmap(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify Community-list") dut = "r4" - input_dict_4 = { - "largeCommunity": "2:10:1 4:4:4 5:5:5" - } + input_dict_4 = {"largeCommunity": "2:10:1 4:4:4 5:5:5"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_4) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -511,17 +501,16 @@ def test_large_community_lists_with_rmap_apply_and_remove(request): "set": { "large_community": { "num": "200:200:1 200:200:10 200:200:20000", - "action": "additive" + "action": "additive", } - } + }, } ] } } } result = create_route_maps(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map and advertise networks") input_dict_2 = { @@ -530,18 +519,12 @@ def test_large_community_lists_with_rmap_apply_and_remove(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -554,10 +537,9 @@ def test_large_community_lists_with_rmap_apply_and_remove(request): "r6": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_LC1", - "direction": "out" - }] + "route_maps": [ + {"name": "RM_LC1", "direction": "out"} + ] } } } @@ -570,57 +552,49 @@ def test_large_community_lists_with_rmap_apply_and_remove(request): "r6": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_LC1", - "direction": "out" - }] + "route_maps": [ + {"name": "RM_LC1", "direction": "out"} + ] } } } } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r6" - input_dict_4 = { - "largeCommunity": "200:200:1 200:200:10 200:200:20000" - } + input_dict_4 = {"largeCommunity": "200:200:1 200:200:10 200:200:20000"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_4) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) step("Delete route map reference by community-list") - input_dict_3 = { - "r4": { - "route_maps": ["RM_LC1"] - } - } + input_dict_3 = {"r4": {"route_maps": ["RM_LC1"]}} result = delete_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify route map is deleted") result = verify_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_4, expected=False) + result = verify_bgp_community( + tgen, adt, dut, NETWORKS[adt], input_dict_4, expected=False + ) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -653,10 +627,10 @@ def test_duplicate_large_community_list_attributes_not_transitive(request): "set": { "large_community": { "num": "0:0:1 0:0:10 0:0:100 2:0:1 2:0:2 2:0:3" - " 2:0:4 2:0:5", - "action": "additive" + " 2:0:4 2:0:5", + "action": "additive", } - } + }, } ], "RM_R4_OUT": [ @@ -666,17 +640,16 @@ def test_duplicate_large_community_list_attributes_not_transitive(request): "set": { "large_community": { "num": "0:0:1 0:0:10 0:0:10000 2:0:1 2:0:2", - "action": "additive" + "action": "additive", } - } + }, } - ] + ], } } } result = create_route_maps(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map and advertise networks") input_dict_2 = { @@ -685,18 +658,12 @@ def test_duplicate_large_community_list_attributes_not_transitive(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -709,23 +676,24 @@ def test_duplicate_large_community_list_attributes_not_transitive(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } }, "r6": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } - } + }, } } }, @@ -735,45 +703,44 @@ def test_duplicate_large_community_list_attributes_not_transitive(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } }, "r6": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } - } + }, } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r6" input_dict_4 = { - "largeCommunity": - "0:0:1 0:0:10 0:0:100 0:0:10000 2:0:1 2:0:2 2:0:3 2:0:4 2:0:5" + "largeCommunity": "0:0:1 0:0:10 0:0:100 0:0:10000 2:0:1 2:0:2 2:0:3 2:0:4 2:0:5" } for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_4) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -806,10 +773,10 @@ def test_large_community_lists_with_rmap_set_none(request): "set": { "large_community": { "num": "0:0:1 0:0:10 0:0:100 2:0:1 2:0:2 2:0:3" - " 2:0:4", - "action": "additive" + " 2:0:4", + "action": "additive", } - } + }, } ] } @@ -820,19 +787,14 @@ def test_large_community_lists_with_rmap_set_none(request): { "action": "permit", "seq_id": "10", - "set": { - "large_community": { - "num": "none" - } - } + "set": {"large_community": {"num": "none"}}, } ] } - } + }, } result = create_route_maps(tgen, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map") input_dict_2 = { @@ -841,18 +803,12 @@ def test_large_community_lists_with_rmap_set_none(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -865,10 +821,9 @@ def test_large_community_lists_with_rmap_set_none(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } @@ -881,16 +836,15 @@ def test_large_community_lists_with_rmap_set_none(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } } } - } + }, } } }, @@ -903,10 +857,9 @@ def test_large_community_lists_with_rmap_set_none(request): "r4": { "dest_link": { "r6": { - "route_maps": [{ - "name": "RM_R6_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R6_IN", "direction": "in"} + ] } } } @@ -919,31 +872,29 @@ def test_large_community_lists_with_rmap_set_none(request): "r4": { "dest_link": { "r6": { - "route_maps": [{ - "name": "RM_R6_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R6_IN", "direction": "in"} + ] } } } } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify Community-list") dut = "r6" for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - expected=False) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], expected=False) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -969,24 +920,17 @@ def test_lcomm_lists_with_redistribute_static_connected_rmap(request): input_dict = { "r1": { "static_routes": [ - { - "network": "200.50.2.0/32", - "next_hop": "10.0.0.6" - }, - { - "network": "1::1/128", - "next_hop": "fd00:0:0:1::2" - } + {"network": "200.50.2.0/32", "next_hop": "10.0.0.6"}, + {"network": "1::1/128", "next_hop": "fd00:0:0:1::2"}, ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("redistribute static routes") input_dict_1 = { - "r1":{ + "r1": { "bgp": { "address_family": { "ipv4": { @@ -994,12 +938,12 @@ def test_lcomm_lists_with_redistribute_static_connected_rmap(request): "redistribute": [ { "redist_type": "static", - "attribute": "route-map RM_R2_OUT" + "attribute": "route-map RM_R2_OUT", }, { "redist_type": "connected", - "attribute": "route-map RM_R2_OUT" - } + "attribute": "route-map RM_R2_OUT", + }, ] } }, @@ -1008,82 +952,74 @@ def test_lcomm_lists_with_redistribute_static_connected_rmap(request): "redistribute": [ { "redist_type": "static", - "attribute": "route-map RM_R2_OUT" + "attribute": "route-map RM_R2_OUT", }, { "redist_type": "connected", - "attribute": "route-map RM_R2_OUT" - } + "attribute": "route-map RM_R2_OUT", + }, ] } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create route map") input_dict_3 = { "r1": { "route_maps": { - "RM_R2_OUT": [{ - "action": "permit", - "set": { - "large_community": {"num":"55:55:55 555:555:555"} - } - }] + "RM_R2_OUT": [ + { + "action": "permit", + "set": {"large_community": {"num": "55:55:55 555:555:555"}}, + } + ] } - } + } } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) - step("Verify large-community-list for static and connected ipv4 route on" - " r2") + step("Verify large-community-list for static and connected ipv4 route on" " r2") - input_dict_5 = { - "largeCommunity": "55:55:55 555:555:555" - } + input_dict_5 = {"largeCommunity": "55:55:55 555:555:555"} if "ipv4" in ADDR_TYPES: dut = "r2" networks = ["200.50.2.0/32", "1.0.1.17/32"] - result = verify_bgp_community(tgen, "ipv4", dut, networks, - input_dict_5) + result = verify_bgp_community(tgen, "ipv4", dut, networks, input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - step("Verify large-community-list for static and connected ipv4 route" - " on r4") + step("Verify large-community-list for static and connected ipv4 route" " on r4") dut = "r4" networks = ["200.50.2.0/32", "1.0.1.17/32"] - result = verify_bgp_community(tgen, "ipv4", dut, networks, - input_dict_5) + result = verify_bgp_community(tgen, "ipv4", dut, networks, input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) if "ipv6" in ADDR_TYPES: - step("Verify large-community-list for static and connected ipv6 route" - " on r2") + step("Verify large-community-list for static and connected ipv6 route" " on r2") dut = "r2" networks = ["1::1/128", "2001:db8:f::1:17/128"] - result = verify_bgp_community(tgen, "ipv6", dut, networks, - input_dict_5) + result = verify_bgp_community(tgen, "ipv6", dut, networks, input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - step("Verify large-community-list for static and connected ipv6 route" - " on r4") + step("Verify large-community-list for static and connected ipv6 route" " on r4") dut = "r4" networks = ["1::1/128", "2001:db8:f::1:17/128"] - result = verify_bgp_community(tgen, "ipv6", dut, networks, - input_dict_5) + result = verify_bgp_community(tgen, "ipv6", dut, networks, input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1115,14 +1051,13 @@ def test_large_community_lists_with_rmap_set_delete(request): "action": "permit", "name": "Test", "value": "1:2:1 1:1:10 1:3:100", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create route map") input_dict_3 = { @@ -1132,12 +1067,7 @@ def test_large_community_lists_with_rmap_set_delete(request): { "action": "permit", "seq_id": "10", - "set": { - "large_comm_list": { - "id": "Test", - "delete": True - } - } + "set": {"large_comm_list": {"id": "Test", "delete": True}}, } ] } @@ -1151,18 +1081,17 @@ def test_large_community_lists_with_rmap_set_delete(request): "set": { "large_community": { "num": "1:2:1 1:1:10 1:3:100 2:1:1 2:2:2 2:3:3" - " 2:4:4 2:5:5", - "action": "additive" + " 2:4:4 2:5:5", + "action": "additive", } - } + }, } ] } - } + }, } result = create_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map and advertise networks") input_dict_4 = { @@ -1171,18 +1100,12 @@ def test_large_community_lists_with_rmap_set_delete(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -1195,10 +1118,9 @@ def test_large_community_lists_with_rmap_set_delete(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } @@ -1211,16 +1133,15 @@ def test_large_community_lists_with_rmap_set_delete(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } } } - } + }, } } }, @@ -1233,10 +1154,9 @@ def test_large_community_lists_with_rmap_set_delete(request): "r4": { "dest_link": { "r6": { - "route_maps": [{ - "name": "RM_R6_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R6_IN", "direction": "in"} + ] } } } @@ -1249,34 +1169,30 @@ def test_large_community_lists_with_rmap_set_delete(request): "r4": { "dest_link": { "r6": { - "route_maps": [{ - "name": "RM_R6_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R6_IN", "direction": "in"} + ] } } } } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r6" - input_dict_5 = { - "largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } + input_dict_5 = {"largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_5) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_5) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1308,18 +1224,15 @@ def test_large_community_lists_with_no_send_community(request): "action": "permit", "seq_id": "10", "set": { - "large_community": { - "num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } - } + "large_community": {"num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"} + }, } ] } } } result = create_route_maps(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map and advertise networks") input_dict_3 = { @@ -1328,18 +1241,12 @@ def test_large_community_lists_with_no_send_community(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -1352,10 +1259,12 @@ def test_large_community_lists_with_no_send_community(request): "r6": { "dest_link": { "r5": { - "route_maps": [{ - "name": "RM_R6_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R6_OUT", + "direction": "out", + } + ] } } } @@ -1368,34 +1277,33 @@ def test_large_community_lists_with_no_send_community(request): "r6": { "dest_link": { "r5": { - "route_maps": [{ - "name": "RM_R6_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R6_OUT", + "direction": "out", + } + ] } } } } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r6" - input_dict_4 = { - "largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } + input_dict_4 = {"largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_4) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) step("Configure neighbor for no-send-community") input_dict_5 = { @@ -1406,11 +1314,7 @@ def test_large_community_lists_with_no_send_community(request): "unicast": { "neighbor": { "r6": { - "dest_link": { - "r5": { - "no_send_community": "large" - } - } + "dest_link": {"r5": {"no_send_community": "large"}} } } } @@ -1419,29 +1323,26 @@ def test_large_community_lists_with_no_send_community(request): "unicast": { "neighbor": { "r6": { - "dest_link": { - "r5": { - "no_send_community": "large" - } - } + "dest_link": {"r5": {"no_send_community": "large"}} } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_5) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify Community-list") for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_4, expected=False) + result = verify_bgp_community( + tgen, adt, dut, NETWORKS[adt], input_dict_4, expected=False + ) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1472,14 +1373,15 @@ def test_create_large_community_lists_with_no_attribute_values(request): "community_type": "standard", "action": "permit", "name": "Test1", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_1) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1512,18 +1414,15 @@ def test_large_community_lists_with_rmap_match_exact(request): "action": "permit", "seq_id": "10", "set": { - "large_community": { - "num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } - } + "large_community": {"num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"} + }, } ] } } } result = create_route_maps(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map and advertise networks") input_dict_3 = { @@ -1532,18 +1431,12 @@ def test_large_community_lists_with_rmap_match_exact(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -1556,10 +1449,12 @@ def test_large_community_lists_with_rmap_match_exact(request): "r4": { "dest_link": { "r2": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } } @@ -1572,24 +1467,25 @@ def test_large_community_lists_with_rmap_match_exact(request): "r4": { "dest_link": { "r2": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } } } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create standard large commumity-list") input_dict_4 = { @@ -1600,19 +1496,17 @@ def test_large_community_lists_with_rmap_match_exact(request): "action": "permit", "name": "EXACT", "value": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify BGP large community is created") result = verify_create_community_list(tgen, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create route map") input_dict_5 = { @@ -1624,16 +1518,15 @@ def test_large_community_lists_with_rmap_match_exact(request): "seq_id": "10", "match": { "large-community-list": ["EXACT"], - "match_exact": True - } + "match_exact": True, + }, } ] } } } result = create_route_maps(tgen, input_dict_5) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map") input_dict_6 = { @@ -1646,10 +1539,9 @@ def test_large_community_lists_with_rmap_match_exact(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } @@ -1662,34 +1554,30 @@ def test_large_community_lists_with_rmap_match_exact(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_6) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r4" - input_dict_4 = { - "largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } + input_dict_4 = {"largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_4) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1718,20 +1606,21 @@ def test_large_community_lists_with_rmap_match_all(request): input_dict_2 = { "r2": { "route_maps": { - "RM_R4_OUT": [{ - "action": "permit", - "set": { - "large_community": { - "num": "1:1:1 1:2:3 2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } + "RM_R4_OUT": [ + { + "action": "permit", + "set": { + "large_community": { + "num": "1:1:1 1:2:3 2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" + } + }, } - }] + ] } } } result = create_route_maps(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map") input_dict_3 = { @@ -1740,18 +1629,12 @@ def test_large_community_lists_with_rmap_match_all(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -1764,10 +1647,12 @@ def test_large_community_lists_with_rmap_match_all(request): "r4": { "dest_link": { "r2": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } } @@ -1780,23 +1665,24 @@ def test_large_community_lists_with_rmap_match_all(request): "r4": { "dest_link": { "r2": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } } } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create standard large commumity-list") input_dict_4 = { @@ -1807,19 +1693,17 @@ def test_large_community_lists_with_rmap_match_all(request): "action": "permit", "name": "ALL", "value": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5", - "large": True + "large": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify BGP large community is created") result = verify_create_community_list(tgen, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create route map") input_dict_5 = { @@ -1829,19 +1713,14 @@ def test_large_community_lists_with_rmap_match_all(request): { "action": "permit", "seq_id": "10", - "match": { - "large-community-list": { - "id": "ALL" - } - } + "match": {"large-community-list": {"id": "ALL"}}, } ] } } } result = create_route_maps(tgen, input_dict_5) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map") input_dict_6 = { @@ -1854,10 +1733,9 @@ def test_large_community_lists_with_rmap_match_all(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } @@ -1870,34 +1748,30 @@ def test_large_community_lists_with_rmap_match_all(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_6) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r4" - input_dict_4 = { - "largeCommunity": "1:1:1 1:2:3 2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } + input_dict_4 = {"largeCommunity": "1:1:1 1:2:3 2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_4) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -1929,18 +1803,15 @@ def test_large_community_lists_with_rmap_match_any(request): "action": "permit", "seq_id": "10", "set": { - "large_community": { - "num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } - } + "large_community": {"num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"} + }, } ] } } } result = create_route_maps(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map") input_dict_3 = { @@ -1949,18 +1820,12 @@ def test_large_community_lists_with_rmap_match_any(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -1973,10 +1838,12 @@ def test_large_community_lists_with_rmap_match_any(request): "r4": { "dest_link": { "r2": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } } @@ -1989,23 +1856,24 @@ def test_large_community_lists_with_rmap_match_any(request): "r4": { "dest_link": { "r2": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } } } } - } + }, } } - } + }, } result = create_router_bgp(tgen, topo, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create standard large commumity-list") input_dict_4 = { @@ -2016,40 +1884,38 @@ def test_large_community_lists_with_rmap_match_any(request): "action": "permit", "name": "ANY", "value": "2:1:1", - "large": True + "large": True, }, { "community_type": "standard", "action": "permit", "name": "ANY", "value": "2:2:1", - "large": True + "large": True, }, { "community_type": "standard", "action": "permit", "name": "ANY", "value": "2:3:1", - "large": True + "large": True, }, { "community_type": "standard", "action": "permit", "name": "ANY", "value": "2:4:1", - "large": True - } + "large": True, + }, ] } } result = create_bgp_community_lists(tgen, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify BGP large community is created") result = verify_create_community_list(tgen, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create route map") input_dict_5 = { @@ -2059,19 +1925,14 @@ def test_large_community_lists_with_rmap_match_any(request): { "action": "permit", "seq_id": "10", - "match": { - "large-community-list": { - "id": "ANY" - } - } + "match": {"large-community-list": {"id": "ANY"}}, } ] } } } result = create_route_maps(tgen, input_dict_5) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map") input_dict_6 = { @@ -2084,10 +1945,9 @@ def test_large_community_lists_with_rmap_match_any(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } @@ -2100,34 +1960,30 @@ def test_large_community_lists_with_rmap_match_any(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_6) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r4" - input_dict_7 = { - "largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5" - } + input_dict_7 = {"largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_7) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_7) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -2162,18 +2018,15 @@ def test_large_community_lists_with_rmap_match_regex(request): "large_community": { "num": "1:1:1 1:1:2 2:1:3 2:1:4 2:1:5", }, - "community": { - "num": "1:1 1:2 1:3 1:4 1:5" - } - } + "community": {"num": "1:1 1:2 1:3 1:4 1:5"}, + }, } ] } } } result = create_route_maps(tgen, input_dict_2) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map") input_dict_3 = { @@ -2182,18 +2035,12 @@ def test_large_community_lists_with_rmap_match_regex(request): "address_family": { "ipv4": { "unicast": { - "advertise_networks": [ - {"network": "200.50.2.0/32"} - ] + "advertise_networks": [{"network": "200.50.2.0/32"}] } }, "ipv6": { - "unicast": { - "advertise_networks": [ - {"network": "1::1/128"} - ] - } - } + "unicast": {"advertise_networks": [{"network": "1::1/128"}]} + }, } } }, @@ -2206,10 +2053,12 @@ def test_large_community_lists_with_rmap_match_regex(request): "r4": { "dest_link": { "r2": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } } @@ -2222,23 +2071,24 @@ def test_large_community_lists_with_rmap_match_regex(request): "r4": { "dest_link": { "r2": { - "route_maps": [{ - "name": "RM_R4_OUT", - "direction": "out" - }] + "route_maps": [ + { + "name": "RM_R4_OUT", + "direction": "out", + } + ] } } } } } - } + }, } } - } + }, } - result = create_router_bgp(tgen, topo,input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + result = create_router_bgp(tgen, topo, input_dict_3) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create standard large commumity-list") input_dict_4 = { @@ -2249,26 +2099,24 @@ def test_large_community_lists_with_rmap_match_regex(request): "action": "permit", "name": "ALL", "value": "1:1:1 2:1:3 2:1:4 2:1:5", - "large": True + "large": True, }, { "community_type": "expanded", "action": "permit", "name": "EXP_ALL", "value": "1:1:1 2:1:[3-5]", - "large": True - } + "large": True, + }, ] } } result = create_bgp_community_lists(tgen, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify BGP large community is created") result = verify_create_community_list(tgen, input_dict_4) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create route map") input_dict_5 = { @@ -2278,19 +2126,14 @@ def test_large_community_lists_with_rmap_match_regex(request): { "action": "permit", "seq_id": "10", - "match": { - "large_community_list": { - "id": "ALL", - }, - }, + "match": {"large_community_list": {"id": "ALL",},}, } ] } } } result = create_route_maps(tgen, input_dict_5) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure neighbor for route map") input_dict_6 = { @@ -2303,10 +2146,9 @@ def test_large_community_lists_with_rmap_match_regex(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } @@ -2319,48 +2161,38 @@ def test_large_community_lists_with_rmap_match_regex(request): "r2": { "dest_link": { "r4": { - "route_maps": [{ - "name": "RM_R4_IN", - "direction": "in" - }] + "route_maps": [ + {"name": "RM_R4_IN", "direction": "in"} + ] } } } } } - } + }, } } } } result = create_router_bgp(tgen, topo, input_dict_6) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r4" - input_dict_7 = { - "largeCommunity": "1:1:1 1:1:2 2:1:3 2:1:4 2:1:5" - } + input_dict_7 = {"largeCommunity": "1:1:1 1:1:2 2:1:3 2:1:4 2:1:5"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_7) + result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_7) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) step("Delete route map reference by community-list") - input_dict_3 = { - "r4": { - "route_maps": ["RM_R4_IN"] - } - } + input_dict_3 = {"r4": {"route_maps": ["RM_R4_IN"]}} result = delete_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_route_maps(tgen, input_dict_3) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Create route map") input_dict_5 = { @@ -2370,35 +2202,29 @@ def test_large_community_lists_with_rmap_match_regex(request): { "action": "permit", "seq_id": "20", - "match": { - "large_community_list": { - "id": "EXP_ALL", - }, - }, + "match": {"large_community_list": {"id": "EXP_ALL",},}, } ] } } } result = create_route_maps(tgen, input_dict_5) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("clear ip bgp") - result = clear_bgp_and_verify(tgen, topo, 'r4') - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + result = clear_bgp_and_verify(tgen, topo, "r4") + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) step("Verify large-community-list") dut = "r4" - input_dict_7 = { - "largeCommunity": "1:1:1 1:1:2 2:1:3 2:1:4 2:1:5" - } + input_dict_7 = {"largeCommunity": "1:1:1 1:1:2 2:1:3 2:1:4 2:1:5"} for adt in ADDR_TYPES: - result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], - input_dict_7, expected=False) - assert result is not True, "Testcase {} : Failed \n Error: {}".\ - format(tc_name, result) + result = verify_bgp_community( + tgen, adt, dut, NETWORKS[adt], input_dict_7, expected=False + ) + assert result is not True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) diff --git a/tests/topotests/bgp_link_bw_ip/__init__.py b/tests/topotests/bgp_link_bw_ip/__init__.py new file mode 100755 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/__init__.py diff --git a/tests/topotests/bgp_link_bw_ip/r1/bgp-route-1.json b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-1.json new file mode 100644 index 0000000000..3e3c35ee08 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-1.json @@ -0,0 +1,19 @@ +{ + "prefix":"198.10.1.1\/32", + "paths":[ + { + "valid":true, + "bestpath":{ + "overall":true + }, + "extendedCommunity":{ + "string":"LB:65301:125000 (1.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.1.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/bgp-route-2.json b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-2.json new file mode 100644 index 0000000000..f07e89b495 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-2.json @@ -0,0 +1,32 @@ +{ + "prefix":"198.10.1.1\/32", + "paths":[ + { + "valid":true, + "multipath":true, + "extendedCommunity":{ + "string":"LB:65303:125000 (1.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.1.6" + } + ] + }, + { + "valid":true, + "multipath":true, + "bestpath":{ + "overall":true + }, + "extendedCommunity":{ + "string":"LB:65201:375000 (3.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.1.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/bgp-route-3.json b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-3.json new file mode 100644 index 0000000000..3501d12e70 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-3.json @@ -0,0 +1,32 @@ +{ + "prefix":"198.10.1.1\/32", + "paths":[ + { + "valid":true, + "multipath":true, + "extendedCommunity":{ + "string":"LB:65303:125000 (1.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.1.6" + } + ] + }, + { + "valid":true, + "multipath":true, + "bestpath":{ + "overall":true + }, + "extendedCommunity":{ + "string":"LB:65301:250000 (2.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.1.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/bgp-route-4.json b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-4.json new file mode 100644 index 0000000000..b1ed004490 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-4.json @@ -0,0 +1,32 @@ +{ + "prefix":"198.10.1.11\/32", + "paths":[ + { + "valid":true, + "multipath":true, + "extendedCommunity":{ + "string":"LB:65303:125000 (1.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.1.6" + } + ] + }, + { + "valid":true, + "multipath":true, + "bestpath":{ + "overall":true + }, + "extendedCommunity":{ + "string":"LB:65201:250000 (2.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.1.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/bgp-route-5.json b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-5.json new file mode 100644 index 0000000000..89469b8ace --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/bgp-route-5.json @@ -0,0 +1,29 @@ +{ + "prefix":"198.10.1.1\/32", + "paths":[ + { + "valid":true, + "multipath":true, + "nexthops":[ + { + "ip":"11.1.1.6" + } + ] + }, + { + "valid":true, + "multipath":true, + "bestpath":{ + "overall":true + }, + "extendedCommunity":{ + "string":"LB:65201:375000 (3.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.1.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r1/bgpd.conf new file mode 100644 index 0000000000..595e244a21 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/bgpd.conf @@ -0,0 +1,8 @@ +hostname r1 +! +router bgp 65101 + bgp router-id 11.1.1.1 + bgp bestpath as-path multipath-relax + neighbor 11.1.1.2 remote-as external + neighbor 11.1.1.6 remote-as external +! diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-1.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-1.json new file mode 100644 index 0000000000..3c02e2675d --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-1.json @@ -0,0 +1,20 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.6", + "weight":25 + }, + { + "fib":true, + "ip":"11.1.1.2", + "weight":75 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-2.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-2.json new file mode 100644 index 0000000000..3c2d42caac --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-2.json @@ -0,0 +1,20 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.6", + "weight":33 + }, + { + "fib":true, + "ip":"11.1.1.2", + "weight":66 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-3.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-3.json new file mode 100644 index 0000000000..3d80018cea --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-3.json @@ -0,0 +1,20 @@ +{ + "198.10.1.11\/32":[ + { + "prefix":"198.10.1.11\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.6", + "weight":33 + }, + { + "fib":true, + "ip":"11.1.1.2", + "weight":66 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-4.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-4.json new file mode 100644 index 0000000000..6b757ef9ed --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-4.json @@ -0,0 +1,20 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.2", + "weight":1 + }, + { + "fib":true, + "ip":"11.1.1.6", + "weight":1 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-5.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-5.json new file mode 100644 index 0000000000..641ecabf47 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-5.json @@ -0,0 +1,20 @@ +{ + "198.10.1.11\/32":[ + { + "prefix":"198.10.1.11\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.2", + "weight":1 + }, + { + "fib":true, + "ip":"11.1.1.6", + "weight":1 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-6.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-6.json new file mode 100644 index 0000000000..6ed3f8ef55 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-6.json @@ -0,0 +1,15 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.2", + "weight":100 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-7.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-7.json new file mode 100644 index 0000000000..95531d99be --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-7.json @@ -0,0 +1,15 @@ +{ + "198.10.1.11\/32":[ + { + "prefix":"198.10.1.11\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.2", + "weight":100 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-8.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-8.json new file mode 100644 index 0000000000..beac501360 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-8.json @@ -0,0 +1,20 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.6", + "weight":1 + }, + { + "fib":true, + "ip":"11.1.1.2", + "weight":100 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/ip-route-9.json b/tests/topotests/bgp_link_bw_ip/r1/ip-route-9.json new file mode 100644 index 0000000000..eb27ce2633 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/ip-route-9.json @@ -0,0 +1,20 @@ +{ + "198.10.1.11\/32":[ + { + "prefix":"198.10.1.11\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.1.6", + "weight":1 + }, + { + "fib":true, + "ip":"11.1.1.2", + "weight":100 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/v4_route.json b/tests/topotests/bgp_link_bw_ip/r1/v4_route.json new file mode 100644 index 0000000000..d40a06d872 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/v4_route.json @@ -0,0 +1,104 @@ +{ + "10.0.1.1\/32":[ + { + "prefix":"10.0.1.1\/32", + "protocol":"ospf", + "distance":110, + "metric":10, + "table":254, + "internalStatus":0, + "internalFlags":0, + "internalNextHopNum":1, + "internalNextHopActiveNum":1, + "nexthops":[ + { + "flags":9, + "ip":"0.0.0.0", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true, + "onLink":true + } + ] + }, + { + "prefix":"10.0.1.1\/32", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "table":254, + "internalStatus":16, + "internalFlags":8, + "internalNextHopNum":1, + "internalNextHopActiveNum":1, + "nexthops":[ + { + "flags":3, + "fib":true, + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + } + ], + "10.0.3.4\/32":[ + { + "prefix":"10.0.3.4\/32", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "table":254, + "internalStatus":16, + "internalFlags":8, + "internalNextHopNum":1, + "internalNextHopActiveNum":1, + "nexthops":[ + { + "flags":3, + "fib":true, + "directlyConnected":true, + "interfaceIndex":3, + "interfaceName":"r1-eth1", + "active":true + } + ] + } + ], + "10.0.20.1\/32":[ + { + "prefix":"10.0.20.1\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":20, + "installed":true, + "table":254, + "internalStatus":16, + "internalFlags":8, + "internalNextHopNum":1, + "internalNextHopActiveNum":1, + "nexthops":[ + { + "flags":11, + "fib":true, + "ip":"10.0.3.2", + "afi":"ipv4", + "interfaceIndex":3, + "interfaceName":"r1-eth1", + "active":true, + "onLink":true + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r1/zebra.conf b/tests/topotests/bgp_link_bw_ip/r1/zebra.conf new file mode 100644 index 0000000000..0fc81f9bac --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r1/zebra.conf @@ -0,0 +1,7 @@ +! +interface r1-eth0 + ip address 11.1.1.1/30 +! +interface r1-eth1 + ip address 11.1.1.5/30 +! diff --git a/tests/topotests/bgp_link_bw_ip/r10/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r10/bgpd.conf new file mode 100644 index 0000000000..88a7bdce22 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r10/bgpd.conf @@ -0,0 +1,15 @@ +hostname r10 +! +ip prefix-list redist seq 10 permit 0.0.0.0/0 ge 32 +! +route-map redist permit 10 + match ip address prefix-list redist +! +router bgp 65354 + bgp router-id 11.1.6.2 + neighbor 11.1.6.1 remote-as external + ! + address-family ipv4 unicast + redistribute connected route-map redist + ! +! diff --git a/tests/topotests/bgp_link_bw_ip/r10/zebra.conf b/tests/topotests/bgp_link_bw_ip/r10/zebra.conf new file mode 100644 index 0000000000..1a24fdaea7 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r10/zebra.conf @@ -0,0 +1,6 @@ +interface r10-eth0 + ip address 11.1.6.2/30 +! +interface r10-eth1 + ip address 50.1.1.10/32 +! diff --git a/tests/topotests/bgp_link_bw_ip/r2/bgp-route-1.json b/tests/topotests/bgp_link_bw_ip/r2/bgp-route-1.json new file mode 100644 index 0000000000..3c38689a37 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r2/bgp-route-1.json @@ -0,0 +1,19 @@ +{ + "prefix":"198.10.1.1\/32", + "paths":[ + { + "valid":true, + "bestpath":{ + "overall":true + }, + "extendedCommunity":{ + "string":"LB:65301:125000 (1.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.2.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r2/bgp-route-2.json b/tests/topotests/bgp_link_bw_ip/r2/bgp-route-2.json new file mode 100644 index 0000000000..1895cd822e --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r2/bgp-route-2.json @@ -0,0 +1,19 @@ +{ + "prefix":"198.10.1.1\/32", + "paths":[ + { + "valid":true, + "bestpath":{ + "overall":true + }, + "extendedCommunity":{ + "string":"LB:65301:250000 (2.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.2.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r2/bgp-route-3.json b/tests/topotests/bgp_link_bw_ip/r2/bgp-route-3.json new file mode 100644 index 0000000000..dfc4171bad --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r2/bgp-route-3.json @@ -0,0 +1,32 @@ +{ + "prefix":"198.10.1.1\/32", + "paths":[ + { + "valid":true, + "multipath":true, + "extendedCommunity":{ + "string":"LB:65302:125000 (1.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.2.6" + } + ] + }, + { + "valid":true, + "multipath":true, + "bestpath":{ + "overall":true + }, + "extendedCommunity":{ + "string":"LB:65301:250000 (2.000 Mbps)" + }, + "nexthops":[ + { + "ip":"11.1.2.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r2/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r2/bgpd.conf new file mode 100644 index 0000000000..2b6e9aeb6f --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r2/bgpd.conf @@ -0,0 +1,9 @@ +hostname r2 +! +router bgp 65201 + bgp router-id 11.1.2.1 + bgp bestpath as-path multipath-relax + neighbor 11.1.1.1 remote-as external + neighbor 11.1.2.2 remote-as external + neighbor 11.1.2.6 remote-as external +! diff --git a/tests/topotests/bgp_link_bw_ip/r2/ip-route-1.json b/tests/topotests/bgp_link_bw_ip/r2/ip-route-1.json new file mode 100644 index 0000000000..131100a684 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r2/ip-route-1.json @@ -0,0 +1,19 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "protocol":"bgp", + "selected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.2.2", + "interfaceName":"r2-eth1", + "active":true, + "weight":1 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r2/ip-route-2.json b/tests/topotests/bgp_link_bw_ip/r2/ip-route-2.json new file mode 100644 index 0000000000..7e2fa6be25 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r2/ip-route-2.json @@ -0,0 +1,20 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.2.6", + "weight":33 + }, + { + "fib":true, + "ip":"11.1.2.2", + "weight":66 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r2/ip-route-3.json b/tests/topotests/bgp_link_bw_ip/r2/ip-route-3.json new file mode 100644 index 0000000000..d0509bbd29 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r2/ip-route-3.json @@ -0,0 +1,15 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.2.2", + "weight":1 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r2/zebra.conf b/tests/topotests/bgp_link_bw_ip/r2/zebra.conf new file mode 100644 index 0000000000..23573a108d --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r2/zebra.conf @@ -0,0 +1,10 @@ +! +interface r2-eth0 + ip address 11.1.1.2/30 +! +interface r2-eth1 + ip address 11.1.2.1/30 +! +interface r2-eth2 + ip address 11.1.2.5/30 +! diff --git a/tests/topotests/bgp_link_bw_ip/r3/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r3/bgpd.conf new file mode 100644 index 0000000000..8b7c0c1792 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r3/bgpd.conf @@ -0,0 +1,8 @@ +hostname r3 +! +router bgp 65202 + bgp router-id 11.1.3.1 + bgp bestpath as-path multipath-relax + neighbor 11.1.1.5 remote-as external + neighbor 11.1.3.2 remote-as external +! diff --git a/tests/topotests/bgp_link_bw_ip/r3/zebra.conf b/tests/topotests/bgp_link_bw_ip/r3/zebra.conf new file mode 100644 index 0000000000..d667669821 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r3/zebra.conf @@ -0,0 +1,7 @@ +! +interface r3-eth0 + ip address 11.1.1.6/30 +! +interface r3-eth1 + ip address 11.1.3.1/30 +! diff --git a/tests/topotests/bgp_link_bw_ip/r4/bgp-route-1.json b/tests/topotests/bgp_link_bw_ip/r4/bgp-route-1.json new file mode 100644 index 0000000000..87d1ae0b44 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r4/bgp-route-1.json @@ -0,0 +1,23 @@ +{ + "prefix":"198.10.1.1\/32", + "paths":[ + { + "valid":true, + "multipath":true, + "nexthops":[ + { + "ip":"11.1.4.6" + } + ] + }, + { + "valid":true, + "multipath":true, + "nexthops":[ + { + "ip":"11.1.4.2" + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r4/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r4/bgpd.conf new file mode 100644 index 0000000000..fa1f37843f --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r4/bgpd.conf @@ -0,0 +1,28 @@ +! +log file bgpd.log +! +debug bgp updates +debug bgp zebra +debug bgp bestpath 198.10.1.1/32 +! +hostname r4 +! +ip prefix-list anycast_ip seq 10 permit 198.10.1.0/24 le 32 +! +route-map anycast_ip permit 10 + match ip address prefix-list anycast_ip + set extcommunity bandwidth num-multipaths +! +route-map anycast_ip permit 20 +! +router bgp 65301 + bgp router-id 11.1.4.1 + bgp bestpath as-path multipath-relax + neighbor 11.1.2.1 remote-as external + neighbor 11.1.4.2 remote-as external + neighbor 11.1.4.6 remote-as external + ! + address-family ipv4 unicast + neighbor 11.1.2.1 route-map anycast_ip out + ! +! diff --git a/tests/topotests/bgp_link_bw_ip/r4/ip-route-1.json b/tests/topotests/bgp_link_bw_ip/r4/ip-route-1.json new file mode 100644 index 0000000000..a9ccf07c82 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r4/ip-route-1.json @@ -0,0 +1,21 @@ +{ + "198.10.1.1\/32":[ + { + "prefix":"198.10.1.1\/32", + "protocol":"bgp", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"11.1.4.2", + "weight":1 + }, + { + "fib":true, + "ip":"11.1.4.6", + "weight":1 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_link_bw_ip/r4/zebra.conf b/tests/topotests/bgp_link_bw_ip/r4/zebra.conf new file mode 100644 index 0000000000..ef61f7eb1b --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r4/zebra.conf @@ -0,0 +1,10 @@ +! +interface r4-eth0 + ip address 11.1.2.2/30 +! +interface r4-eth1 + ip address 11.1.4.1/30 +! +interface r4-eth2 + ip address 11.1.4.5/30 +! diff --git a/tests/topotests/bgp_link_bw_ip/r5/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r5/bgpd.conf new file mode 100644 index 0000000000..8614f3e178 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r5/bgpd.conf @@ -0,0 +1,20 @@ +hostname r5 +! +ip prefix-list anycast_ip seq 10 permit 198.10.1.0/24 le 32 +! +route-map anycast_ip permit 10 + match ip address prefix-list anycast_ip + set extcommunity bandwidth num-multipaths +! +route-map anycast_ip permit 20 +! +router bgp 65302 + bgp router-id 11.1.5.1 + bgp bestpath as-path multipath-relax + neighbor 11.1.2.5 remote-as external + neighbor 11.1.5.2 remote-as external + ! + address-family ipv4 unicast + neighbor 11.1.2.5 route-map anycast_ip out + ! +! diff --git a/tests/topotests/bgp_link_bw_ip/r5/zebra.conf b/tests/topotests/bgp_link_bw_ip/r5/zebra.conf new file mode 100644 index 0000000000..66c65964e2 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r5/zebra.conf @@ -0,0 +1,7 @@ +! +interface r5-eth0 + ip address 11.1.2.6/30 +! +interface r5-eth1 + ip address 11.1.5.1/30 +! diff --git a/tests/topotests/bgp_link_bw_ip/r6/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r6/bgpd.conf new file mode 100644 index 0000000000..3e5c6df6e1 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r6/bgpd.conf @@ -0,0 +1,20 @@ +hostname r6 +! +ip prefix-list anycast_ip seq 10 permit 198.10.1.0/24 le 32 +! +route-map anycast_ip permit 10 + match ip address prefix-list anycast_ip + set extcommunity bandwidth num-multipaths +! +route-map anycast_ip permit 20 +! +router bgp 65303 + bgp router-id 11.1.6.1 + bgp bestpath as-path multipath-relax + neighbor 11.1.3.1 remote-as external + neighbor 11.1.6.2 remote-as external + ! + address-family ipv4 unicast + neighbor 11.1.3.1 route-map anycast_ip out + ! +! diff --git a/tests/topotests/bgp_link_bw_ip/r6/zebra.conf b/tests/topotests/bgp_link_bw_ip/r6/zebra.conf new file mode 100644 index 0000000000..66ff563269 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r6/zebra.conf @@ -0,0 +1,7 @@ +! +interface r6-eth0 + ip address 11.1.3.2/30 +! +interface r6-eth1 + ip address 11.1.6.1/30 +! diff --git a/tests/topotests/bgp_link_bw_ip/r7/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r7/bgpd.conf new file mode 100644 index 0000000000..7862023f55 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r7/bgpd.conf @@ -0,0 +1,15 @@ +hostname r7 +! +ip prefix-list redist seq 10 permit 0.0.0.0/0 ge 32 +! +route-map redist permit 10 + match ip address prefix-list redist +! +router bgp 65351 + bgp router-id 11.1.4.2 + neighbor 11.1.4.1 remote-as external + ! + address-family ipv4 unicast + redistribute connected route-map redist + ! +! diff --git a/tests/topotests/bgp_link_bw_ip/r7/zebra.conf b/tests/topotests/bgp_link_bw_ip/r7/zebra.conf new file mode 100644 index 0000000000..38e36cac30 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r7/zebra.conf @@ -0,0 +1,6 @@ +interface r7-eth0 + ip address 11.1.4.2/30 +! +interface r7-eth1 + ip address 50.1.1.7/32 +! diff --git a/tests/topotests/bgp_link_bw_ip/r8/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r8/bgpd.conf new file mode 100644 index 0000000000..02110d9175 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r8/bgpd.conf @@ -0,0 +1,15 @@ +hostname r8 +! +ip prefix-list redist seq 10 permit 0.0.0.0/0 ge 32 +! +route-map redist permit 10 + match ip address prefix-list redist +! +router bgp 65352 + bgp router-id 11.1.4.6 + neighbor 11.1.4.5 remote-as external + ! + address-family ipv4 unicast + redistribute connected route-map redist + ! +! diff --git a/tests/topotests/bgp_link_bw_ip/r8/zebra.conf b/tests/topotests/bgp_link_bw_ip/r8/zebra.conf new file mode 100644 index 0000000000..1369e19c06 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r8/zebra.conf @@ -0,0 +1,6 @@ +interface r8-eth0 + ip address 11.1.4.6/30 +! +interface r8-eth1 + ip address 50.1.1.8/32 +! diff --git a/tests/topotests/bgp_link_bw_ip/r9/bgpd.conf b/tests/topotests/bgp_link_bw_ip/r9/bgpd.conf new file mode 100644 index 0000000000..d64663fa16 --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r9/bgpd.conf @@ -0,0 +1,15 @@ +hostname r9 +! +ip prefix-list redist seq 10 permit 0.0.0.0/0 ge 32 +! +route-map redist permit 10 + match ip address prefix-list redist +! +router bgp 65353 + bgp router-id 11.1.5.2 + neighbor 11.1.5.1 remote-as external + ! + address-family ipv4 unicast + redistribute connected route-map redist + ! +! diff --git a/tests/topotests/bgp_link_bw_ip/r9/zebra.conf b/tests/topotests/bgp_link_bw_ip/r9/zebra.conf new file mode 100644 index 0000000000..c73caf3bfc --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/r9/zebra.conf @@ -0,0 +1,6 @@ +interface r9-eth0 + ip address 11.1.5.2/30 +! +interface r9-eth1 + ip address 50.1.1.9/32 +! diff --git a/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py b/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py new file mode 100755 index 0000000000..86eb2969ce --- /dev/null +++ b/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py @@ -0,0 +1,515 @@ +#!/usr/bin/env python + +# +# test_bgp_linkbw_ip.py +# +# Copyright (c) 2020 by +# Cumulus Networks, Inc +# Vivek Venkatraman +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_bgp_linkbw_ip.py: Test weighted ECMP using BGP link-bandwidth +""" + +import os +import re +import sys +from functools import partial +import pytest +import json + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, '../')) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + +""" +This topology is for validating one of the primary use cases for +weighted ECMP (a.k.a. Unequal cost multipath) using BGP link-bandwidth: +https://tools.ietf.org/html/draft-mohanty-bess-ebgp-dmz + +The topology consists of two PODs. Pod-1 consists of a spine switch +and two leaf switches, with two servers attached to the first leaf and +one to the second leaf. Pod-2 consists of one spine and one leaf, with +one server connected to the leaf. The PODs are connected by a super-spine +switch. + +Note that the use of the term "switch" above is in keeping with common +data-center terminology. These devices are all regular routers; for +this scenario, the servers are also routers as they have to announce +anycast IP (VIP) addresses via BGP. +""" + +class BgpLinkBwTopo(Topo): + "Test topology builder" + def build(self, *_args, **_opts): + "Build function" + tgen = get_topogen(self) + + # Create 10 routers - 1 super-spine, 2 spines, 3 leafs + # and 4 servers + routers = {} + for i in range(1, 11): + routers[i] = tgen.add_router('r{}'.format(i)) + + # Create 13 "switches" - to interconnect the above routers + switches = {} + for i in range(1, 14): + switches[i] = tgen.add_switch('s{}'.format(i)) + + # Interconnect R1 (super-spine) to R2 and R3 (the two spines) + switches[1].add_link(tgen.gears['r1']) + switches[1].add_link(tgen.gears['r2']) + switches[2].add_link(tgen.gears['r1']) + switches[2].add_link(tgen.gears['r3']) + + # Interconnect R2 (spine in pod-1) to R4 and R5 (the associated + # leaf switches) + switches[3].add_link(tgen.gears['r2']) + switches[3].add_link(tgen.gears['r4']) + switches[4].add_link(tgen.gears['r2']) + switches[4].add_link(tgen.gears['r5']) + + # Interconnect R3 (spine in pod-2) to R6 (associated leaf) + switches[5].add_link(tgen.gears['r3']) + switches[5].add_link(tgen.gears['r6']) + + # Interconnect leaf switches to servers + switches[6].add_link(tgen.gears['r4']) + switches[6].add_link(tgen.gears['r7']) + switches[7].add_link(tgen.gears['r4']) + switches[7].add_link(tgen.gears['r8']) + switches[8].add_link(tgen.gears['r5']) + switches[8].add_link(tgen.gears['r9']) + switches[9].add_link(tgen.gears['r6']) + switches[9].add_link(tgen.gears['r10']) + + # Create empty networks for the servers + switches[10].add_link(tgen.gears['r7']) + switches[11].add_link(tgen.gears['r8']) + switches[12].add_link(tgen.gears['r9']) + switches[13].add_link(tgen.gears['r10']) + +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(BgpLinkBwTopo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + for rname, router in router_list.iteritems(): + router.load_config( + TopoRouter.RD_ZEBRA, + os.path.join(CWD, '{}/zebra.conf'.format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, + os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + ) + + # Initialize all routers. + tgen.start_router() + + #tgen.mininet_cli() + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + tgen.stop_topology() + +def test_bgp_linkbw_adv(): + "Test #1: Test BGP link-bandwidth advertisement based on number of multipaths" + logger.info('\nTest #1: Test BGP link-bandwidth advertisement based on number of multipaths') + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip('skipped because of router(s) failure') + + r1 = tgen.gears['r1'] + r2 = tgen.gears['r2'] + + # Configure anycast IP on server r7 + logger.info('Configure anycast IP on server r7') + + tgen.net['r7'].cmd('ip addr add 198.10.1.1/32 dev r7-eth1') + + # Check on spine router r2 for link-bw advertisement by leaf router r4 + logger.info('Check on spine router r2 for link-bw advertisement by leaf router r4') + + json_file = '{}/r2/bgp-route-1.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r2, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on spine router r2' + assert result is None, assertmsg + + # Check on spine router r2 that default weight is used as there is no multipath + logger.info('Check on spine router r2 that default weight is used as there is no multipath') + + json_file = '{}/r2/ip-route-1.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r2, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on spine router r2' + assert result is None, assertmsg + + # Check on super-spine router r1 that link-bw has been propagated by spine router r2 + logger.info('Check on super-spine router r1 that link-bw has been propagated by spine router r2') + + json_file = '{}/r1/bgp-route-1.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + +def test_bgp_cumul_linkbw(): + "Test #2: Test cumulative link-bandwidth propagation" + logger.info('\nTest #2: Test cumulative link-bandwidth propagation') + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip('skipped because of router(s) failure') + + r1 = tgen.gears['r1'] + r2 = tgen.gears['r2'] + r4 = tgen.gears['r4'] + + # Configure anycast IP on additional server r8 + logger.info('Configure anycast IP on server r8') + + tgen.net['r8'].cmd('ip addr add 198.10.1.1/32 dev r8-eth1') + + # Check multipath on leaf router r4 + logger.info('Check multipath on leaf router r4') + + json_file = '{}/r4/bgp-route-1.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r4, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on leaf router r4' + assert result is None, assertmsg + + # Check regular ECMP is in effect on leaf router r4 + logger.info('Check regular ECMP is in effect on leaf router r4') + + json_file = '{}/r4/ip-route-1.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r4, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on leaf router r4' + assert result is None, assertmsg + + # Check on spine router r2 that leaf has propagated the cumulative link-bw based on num-multipaths + logger.info('Check on spine router r2 that leaf has propagated the cumulative link-bw based on num-multipaths') + + json_file = '{}/r2/bgp-route-2.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r2, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on spine router r2' + assert result is None, assertmsg + +def test_weighted_ecmp(): + "Test #3: Test weighted ECMP - multipath with next hop weights" + logger.info('\nTest #3: Test weighted ECMP - multipath with next hop weights') + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip('skipped because of router(s) failure') + + r1 = tgen.gears['r1'] + r2 = tgen.gears['r2'] + + # Configure anycast IP on additional server r9 + logger.info('Configure anycast IP on server r9') + + tgen.net['r9'].cmd('ip addr add 198.10.1.1/32 dev r9-eth1') + + # Check multipath on spine router r2 + logger.info('Check multipath on spine router r2') + json_file = '{}/r2/bgp-route-3.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r2, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on spine router r2' + assert result is None, assertmsg + + # Check weighted ECMP is in effect on the spine router r2 + logger.info('Check weighted ECMP is in effect on the spine router r2') + + json_file = '{}/r2/ip-route-2.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r2, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on spine router r2' + assert result is None, assertmsg + + # Configure anycast IP on additional server r10 + logger.info('Configure anycast IP on server r10') + + tgen.net['r10'].cmd('ip addr add 198.10.1.1/32 dev r10-eth1') + + # Check multipath on super-spine router r1 + logger.info('Check multipath on super-spine router r1') + json_file = '{}/r1/bgp-route-2.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + # Check weighted ECMP is in effect on the super-spine router r1 + logger.info('Check weighted ECMP is in effect on the super-spine router r1') + json_file = '{}/r1/ip-route-1.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + +def test_weighted_ecmp_link_flap(): + "Test #4: Test weighted ECMP rebalancing upon change (link flap)" + logger.info('\nTest #4: Test weighted ECMP rebalancing upon change (link flap)') + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip('skipped because of router(s) failure') + + r1 = tgen.gears['r1'] + r2 = tgen.gears['r2'] + + # Bring down link on server r9 + logger.info('Bring down link on server r9') + + tgen.net['r9'].cmd('ip link set dev r9-eth1 down') + + # Check spine router r2 has only one path + logger.info('Check spine router r2 has only one path') + + json_file = '{}/r2/ip-route-3.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r2, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on spine router r2' + assert result is None, assertmsg + + # Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1 + logger.info('Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1') + + json_file = '{}/r1/bgp-route-3.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + json_file = '{}/r1/ip-route-2.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + # Bring up link on server r9 + logger.info('Bring up link on server r9') + + tgen.net['r9'].cmd('ip link set dev r9-eth1 up') + + # Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1 + logger.info('Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1') + + json_file = '{}/r1/bgp-route-2.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + json_file = '{}/r1/ip-route-1.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + +def test_weighted_ecmp_second_anycast_ip(): + "Test #5: Test weighted ECMP for a second anycast IP" + logger.info('\nTest #5: Test weighted ECMP for a second anycast IP') + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip('skipped because of router(s) failure') + + r1 = tgen.gears['r1'] + r2 = tgen.gears['r2'] + + # Configure anycast IP on additional server r7, r9 and r10 + logger.info('Configure anycast IP on server r7, r9 and r10') + + tgen.net['r7'].cmd('ip addr add 198.10.1.11/32 dev r7-eth1') + tgen.net['r9'].cmd('ip addr add 198.10.1.11/32 dev r9-eth1') + tgen.net['r10'].cmd('ip addr add 198.10.1.11/32 dev r10-eth1') + + # Check link-bandwidth and weighted ECMP on super-spine router r1 + logger.info('Check link-bandwidth and weighted ECMP on super-spine router r1') + + json_file = '{}/r1/bgp-route-4.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show bgp ipv4 uni 198.10.1.11/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + json_file = '{}/r1/ip-route-3.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.11/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + +def test_paths_with_and_without_linkbw(): + "Test #6: Test paths with and without link-bandwidth - receiver should resort to regular ECMP" + logger.info('\nTest #6: Test paths with and without link-bandwidth - receiver should resort to regular ECMP') + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip('skipped because of router(s) failure') + + r1 = tgen.gears['r1'] + + # Configure leaf router r6 to not advertise any link-bandwidth + logger.info('Configure leaf router r6 to not advertise any link-bandwidth') + + tgen.net['r6'].cmd('vtysh -c \"conf t\" -c \"router bgp 65303\" -c \"address-family ipv4 unicast\" -c \"no neighbor 11.1.3.1 route-map anycast_ip out\"') + + # Check link-bandwidth change on super-spine router r1 + logger.info('Check link-bandwidth change on super-spine router r1') + + json_file = '{}/r1/bgp-route-5.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + # Check super-spine router r1 resorts to regular ECMP + logger.info('Check super-spine router r1 resorts to regular ECMP') + + json_file = '{}/r1/ip-route-4.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + json_file = '{}/r1/ip-route-5.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.11/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + +def test_linkbw_handling_options(): + "Test #7: Test different options for processing link-bandwidth on the receiver" + logger.info('\nTest #7: Test different options for processing link-bandwidth on the receiver') + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip('skipped because of router(s) failure') + + r1 = tgen.gears['r1'] + + # Configure super-spine r1 to skip multipaths without link-bandwidth + logger.info('Configure super-spine r1 to skip multipaths without link-bandwidth') + + tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"router bgp 65101\" -c \"bgp bestpath bandwidth skip-missing\"') + + # Check super-spine router r1 resorts to only one path as other path is skipped + logger.info('Check super-spine router r1 resorts to only one path as other path is skipped') + + json_file = '{}/r1/ip-route-6.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + json_file = '{}/r1/ip-route-7.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.11/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + # Configure super-spine r1 to use default-weight for multipaths without link-bandwidth + logger.info('Configure super-spine r1 to use default-weight for multipaths without link-bandwidth') + + tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"router bgp 65101\" -c \"bgp bestpath bandwidth default-weight-for-missing\"') + + # Check super-spine router r1 uses ECMP with weight 1 for path without link-bandwidth + logger.info('Check super-spine router r1 uses ECMP with weight 1 for path without link-bandwidth') + + json_file = '{}/r1/ip-route-8.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.1/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + + json_file = '{}/r1/ip-route-9.json'.format(CWD) + expected = json.loads(open(json_file).read()) + test_func = partial(topotest.router_json_cmp, + r1, 'show ip route 198.10.1.11/32 json', expected) + _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) + assertmsg = 'JSON output mismatch on super-spine router r1' + assert result is None, assertmsg + +if __name__ == '__main__': + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py b/tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py index da4b67b087..56bb14411a 100644 --- a/tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py +++ b/tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py @@ -35,7 +35,7 @@ import time import pytest CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -43,20 +43,22 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r4"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r3']) - switch.add_link(tgen.gears['r4']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -66,20 +68,20 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_remove_private_as(): tgen = get_topogen() @@ -88,24 +90,29 @@ def test_bgp_remove_private_as(): def _bgp_converge(router): while True: - output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) - if output['192.168.255.1']['bgpState'] == 'Established': + output = json.loads( + tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json") + ) + if output["192.168.255.1"]["bgpState"] == "Established": time.sleep(1) return True def _bgp_as_path(router): - output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp 172.16.255.254/32 json")) - if output['prefix'] == '172.16.255.254/32': - return output['paths'][0]['aspath']['segments'][0]['list'] + output = json.loads( + tgen.gears[router].vtysh_cmd("show ip bgp 172.16.255.254/32 json") + ) + if output["prefix"] == "172.16.255.254/32": + return output["paths"][0]["aspath"]["segments"][0]["list"] + + if _bgp_converge("r2"): + assert len(_bgp_as_path("r2")) == 1 + assert 65000 not in _bgp_as_path("r2") - if _bgp_converge('r2'): - assert len(_bgp_as_path('r2')) == 1 - assert 65000 not in _bgp_as_path('r2') + if _bgp_converge("r4"): + assert len(_bgp_as_path("r4")) == 2 + assert 3000 in _bgp_as_path("r4") - if _bgp_converge('r4'): - assert len(_bgp_as_path('r4')) == 2 - assert 3000 in _bgp_as_path('r4') -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py b/tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py index e7f4f40f06..5e7c6d4b63 100644 --- a/tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py +++ b/tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py @@ -39,7 +39,7 @@ import time import pytest CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -47,16 +47,18 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -66,20 +68,20 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_maximum_prefix_invalid(): tgen = get_topogen() @@ -88,12 +90,16 @@ def test_bgp_maximum_prefix_invalid(): def _bgp_converge(router): while True: - output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) - if output['192.168.255.1']['connectionsEstablished'] > 0: + output = json.loads( + tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json") + ) + if output["192.168.255.1"]["connectionsEstablished"] > 0: return True def _bgp_parsing_nlri(router): - cmd_max_exceeded = 'grep "%MAXPFXEXCEED: No. of IPv4 Unicast prefix received" bgpd.log' + cmd_max_exceeded = ( + 'grep "%MAXPFXEXCEED: No. of IPv4 Unicast prefix received" bgpd.log' + ) cmdt_error_parsing_nlri = 'grep "Error parsing NLRI" bgpd.log' output_max_exceeded = tgen.gears[router].run(cmd_max_exceeded) output_error_parsing_nlri = tgen.gears[router].run(cmdt_error_parsing_nlri) @@ -103,10 +109,10 @@ def test_bgp_maximum_prefix_invalid(): return False return True + if _bgp_converge("r2"): + assert _bgp_parsing_nlri("r2") == True - if _bgp_converge('r2'): - assert _bgp_parsing_nlri('r2') == True -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py b/tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py index d77aa5aff2..708684f696 100644 --- a/tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py +++ b/tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py @@ -35,7 +35,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -43,16 +43,18 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -62,38 +64,34 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_maximum_prefix_out(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r2'] + router = tgen.gears["r2"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.2 json")) expected = { - '192.168.255.2': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 2 - } - } + "192.168.255.2": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}}, } } return topotest.json_cmp(output, expected) @@ -103,6 +101,7 @@ def test_bgp_maximum_prefix_out(): assert result is None, 'Failed bgp convergence in "{}"'.format(router) -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_multiview_topo1/README.md b/tests/topotests/bgp_multiview_topo1/README.md index 2a2747344a..c1a1445894 100644 --- a/tests/topotests/bgp_multiview_topo1/README.md +++ b/tests/topotests/bgp_multiview_topo1/README.md @@ -1,4 +1,4 @@ -# Simple FreeRangeRouting Route-Server Test +# Simple FRRouting Route-Server Test ## Topology +----------+ +----------+ +----------+ +----------+ +----------+ diff --git a/tests/topotests/bgp_multiview_topo1/peer1/exa-receive.py b/tests/topotests/bgp_multiview_topo1/peer1/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_multiview_topo1/peer1/exa-receive.py +++ b/tests/topotests/bgp_multiview_topo1/peer1/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_multiview_topo1/peer1/exa-send.py b/tests/topotests/bgp_multiview_topo1/peer1/exa-send.py index 2de2bce40a..505b08d6aa 100755 --- a/tests/topotests/bgp_multiview_topo1/peer1/exa-send.py +++ b/tests/topotests/bgp_multiview_topo1/peer1/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -16,13 +16,16 @@ numRoutes = int(argv[2]) # Announce numRoutes different routes per PE for i in range(0, numRoutes): - stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer)) + stdout.write( + "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n" + % ((peer + 100), i, peer, peer) + ) stdout.flush() # Announce 1 overlapping route per peer -stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer) +stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) diff --git a/tests/topotests/bgp_multiview_topo1/peer2/exa-receive.py b/tests/topotests/bgp_multiview_topo1/peer2/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_multiview_topo1/peer2/exa-receive.py +++ b/tests/topotests/bgp_multiview_topo1/peer2/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_multiview_topo1/peer2/exa-send.py b/tests/topotests/bgp_multiview_topo1/peer2/exa-send.py index 2de2bce40a..505b08d6aa 100755 --- a/tests/topotests/bgp_multiview_topo1/peer2/exa-send.py +++ b/tests/topotests/bgp_multiview_topo1/peer2/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -16,13 +16,16 @@ numRoutes = int(argv[2]) # Announce numRoutes different routes per PE for i in range(0, numRoutes): - stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer)) + stdout.write( + "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n" + % ((peer + 100), i, peer, peer) + ) stdout.flush() # Announce 1 overlapping route per peer -stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer) +stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) diff --git a/tests/topotests/bgp_multiview_topo1/peer3/exa-receive.py b/tests/topotests/bgp_multiview_topo1/peer3/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_multiview_topo1/peer3/exa-receive.py +++ b/tests/topotests/bgp_multiview_topo1/peer3/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_multiview_topo1/peer3/exa-send.py b/tests/topotests/bgp_multiview_topo1/peer3/exa-send.py index 2de2bce40a..505b08d6aa 100755 --- a/tests/topotests/bgp_multiview_topo1/peer3/exa-send.py +++ b/tests/topotests/bgp_multiview_topo1/peer3/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -16,13 +16,16 @@ numRoutes = int(argv[2]) # Announce numRoutes different routes per PE for i in range(0, numRoutes): - stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer)) + stdout.write( + "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n" + % ((peer + 100), i, peer, peer) + ) stdout.flush() # Announce 1 overlapping route per peer -stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer) +stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) diff --git a/tests/topotests/bgp_multiview_topo1/peer4/exa-receive.py b/tests/topotests/bgp_multiview_topo1/peer4/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_multiview_topo1/peer4/exa-receive.py +++ b/tests/topotests/bgp_multiview_topo1/peer4/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_multiview_topo1/peer4/exa-send.py b/tests/topotests/bgp_multiview_topo1/peer4/exa-send.py index 2de2bce40a..505b08d6aa 100755 --- a/tests/topotests/bgp_multiview_topo1/peer4/exa-send.py +++ b/tests/topotests/bgp_multiview_topo1/peer4/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -16,13 +16,16 @@ numRoutes = int(argv[2]) # Announce numRoutes different routes per PE for i in range(0, numRoutes): - stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer)) + stdout.write( + "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n" + % ((peer + 100), i, peer, peer) + ) stdout.flush() # Announce 1 overlapping route per peer -stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer) +stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) diff --git a/tests/topotests/bgp_multiview_topo1/peer5/exa-receive.py b/tests/topotests/bgp_multiview_topo1/peer5/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_multiview_topo1/peer5/exa-receive.py +++ b/tests/topotests/bgp_multiview_topo1/peer5/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_multiview_topo1/peer5/exa-send.py b/tests/topotests/bgp_multiview_topo1/peer5/exa-send.py index 2de2bce40a..505b08d6aa 100755 --- a/tests/topotests/bgp_multiview_topo1/peer5/exa-send.py +++ b/tests/topotests/bgp_multiview_topo1/peer5/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -16,13 +16,16 @@ numRoutes = int(argv[2]) # Announce numRoutes different routes per PE for i in range(0, numRoutes): - stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer)) + stdout.write( + "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n" + % ((peer + 100), i, peer, peer) + ) stdout.flush() # Announce 1 overlapping route per peer -stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer) +stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) diff --git a/tests/topotests/bgp_multiview_topo1/peer6/exa-receive.py b/tests/topotests/bgp_multiview_topo1/peer6/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_multiview_topo1/peer6/exa-receive.py +++ b/tests/topotests/bgp_multiview_topo1/peer6/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_multiview_topo1/peer6/exa-send.py b/tests/topotests/bgp_multiview_topo1/peer6/exa-send.py index 2de2bce40a..505b08d6aa 100755 --- a/tests/topotests/bgp_multiview_topo1/peer6/exa-send.py +++ b/tests/topotests/bgp_multiview_topo1/peer6/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -16,13 +16,16 @@ numRoutes = int(argv[2]) # Announce numRoutes different routes per PE for i in range(0, numRoutes): - stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer)) + stdout.write( + "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n" + % ((peer + 100), i, peer, peer) + ) stdout.flush() # Announce 1 overlapping route per peer -stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer) +stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) diff --git a/tests/topotests/bgp_multiview_topo1/peer7/exa-receive.py b/tests/topotests/bgp_multiview_topo1/peer7/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_multiview_topo1/peer7/exa-receive.py +++ b/tests/topotests/bgp_multiview_topo1/peer7/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_multiview_topo1/peer7/exa-send.py b/tests/topotests/bgp_multiview_topo1/peer7/exa-send.py index 2de2bce40a..505b08d6aa 100755 --- a/tests/topotests/bgp_multiview_topo1/peer7/exa-send.py +++ b/tests/topotests/bgp_multiview_topo1/peer7/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -16,13 +16,16 @@ numRoutes = int(argv[2]) # Announce numRoutes different routes per PE for i in range(0, numRoutes): - stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer)) + stdout.write( + "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n" + % ((peer + 100), i, peer, peer) + ) stdout.flush() # Announce 1 overlapping route per peer -stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer) +stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) diff --git a/tests/topotests/bgp_multiview_topo1/peer8/exa-receive.py b/tests/topotests/bgp_multiview_topo1/peer8/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_multiview_topo1/peer8/exa-receive.py +++ b/tests/topotests/bgp_multiview_topo1/peer8/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_multiview_topo1/peer8/exa-send.py b/tests/topotests/bgp_multiview_topo1/peer8/exa-send.py index 2de2bce40a..505b08d6aa 100755 --- a/tests/topotests/bgp_multiview_topo1/peer8/exa-send.py +++ b/tests/topotests/bgp_multiview_topo1/peer8/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -16,13 +16,16 @@ numRoutes = int(argv[2]) # Announce numRoutes different routes per PE for i in range(0, numRoutes): - stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer)) + stdout.write( + "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n" + % ((peer + 100), i, peer, peer) + ) stdout.flush() # Announce 1 overlapping route per peer -stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer) +stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) diff --git a/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py b/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py index c851567dda..c342b17dd2 100755 --- a/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py +++ b/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py @@ -60,7 +60,7 @@ test_bgp_multiview_topo1.py: Simple Quagga/FRR Route-Server Test ~~ 172.20.0.1/28 ~~ attributes (using route-map) ~~ Stub Switch ~~ ~~~~~~~~~~~~~ -""" +""" import os import re @@ -90,37 +90,39 @@ fatal_error = "" ## ##################################################### + class NetworkTopo(Topo): "BGP Multiview Topology 1" def build(self, **_opts): - exabgpPrivateDirs = ['/etc/exabgp', - '/var/run/exabgp', - '/var/log'] + exabgpPrivateDirs = ["/etc/exabgp", "/var/run/exabgp", "/var/log"] # Setup Routers router = {} for i in range(1, 2): - router[i] = topotest.addRouter(self, 'r%s' % i) + router[i] = topotest.addRouter(self, "r%s" % i) # Setup Provider BGP peers peer = {} for i in range(1, 9): - peer[i] = self.addHost('peer%s' % i, ip='172.16.1.%s/24' % i, - defaultRoute='via 172.16.1.254', - privateDirs=exabgpPrivateDirs) + peer[i] = self.addHost( + "peer%s" % i, + ip="172.16.1.%s/24" % i, + defaultRoute="via 172.16.1.254", + privateDirs=exabgpPrivateDirs, + ) # Setup Switches switch = {} # First switch is for a dummy interface (for local network) - switch[0] = self.addSwitch('sw0', cls=topotest.LegacySwitch) - self.addLink(switch[0], router[1], intfName2='r1-stub') + switch[0] = self.addSwitch("sw0", cls=topotest.LegacySwitch) + self.addLink(switch[0], router[1], intfName2="r1-stub") # Second switch is for connection to all peering routers - switch[1] = self.addSwitch('sw1', cls=topotest.LegacySwitch) - self.addLink(switch[1], router[1], intfName2='r1-eth0') + switch[1] = self.addSwitch("sw1", cls=topotest.LegacySwitch) + self.addLink(switch[1], router[1], intfName2="r1-eth0") for j in range(1, 9): - self.addLink(switch[1], peer[j], intfName2='peer%s-eth0' % j) + self.addLink(switch[1], peer[j], intfName2="peer%s-eth0" % j) ##################################################### @@ -129,6 +131,7 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(module): global topo, net @@ -136,7 +139,7 @@ def setup_module(module): print("******************************************\n") print("Cleanup old Mininet runs") - os.system('sudo mn -c > /dev/null 2>&1') + os.system("sudo mn -c > /dev/null 2>&1") thisDir = os.path.dirname(os.path.realpath(__file__)) topo = NetworkTopo() @@ -146,25 +149,26 @@ def setup_module(module): # Starting Routers for i in range(1, 2): - net['r%s' % i].loadConf('zebra', '%s/r%s/zebra.conf' % (thisDir, i)) - net['r%s' % i].loadConf('bgpd', '%s/r%s/bgpd.conf' % (thisDir, i)) - net['r%s' % i].startRouter() + net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i)) + net["r%s" % i].loadConf("bgpd", "%s/r%s/bgpd.conf" % (thisDir, i)) + net["r%s" % i].startRouter() # Starting PE Hosts and init ExaBGP on each of them - print('*** Starting BGP on all 8 Peers') + print("*** Starting BGP on all 8 Peers") for i in range(1, 9): - net['peer%s' % i].cmd('cp %s/exabgp.env /etc/exabgp/exabgp.env' % thisDir) - net['peer%s' % i].cmd('cp %s/peer%s/* /etc/exabgp/' % (thisDir, i)) - net['peer%s' % i].cmd('chmod 644 /etc/exabgp/*') - net['peer%s' % i].cmd('chmod 755 /etc/exabgp/*.py') - net['peer%s' % i].cmd('chown -R exabgp:exabgp /etc/exabgp') - net['peer%s' % i].cmd('exabgp -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg') - print('peer%s' % i), - print('') + net["peer%s" % i].cmd("cp %s/exabgp.env /etc/exabgp/exabgp.env" % thisDir) + net["peer%s" % i].cmd("cp %s/peer%s/* /etc/exabgp/" % (thisDir, i)) + net["peer%s" % i].cmd("chmod 644 /etc/exabgp/*") + net["peer%s" % i].cmd("chmod 755 /etc/exabgp/*.py") + net["peer%s" % i].cmd("chown -R exabgp:exabgp /etc/exabgp") + net["peer%s" % i].cmd("exabgp -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg") + print("peer%s" % i), + print("") # For debugging after starting Quagga/FRR daemons, uncomment the next line # CLI(net) + def teardown_module(module): global net @@ -172,20 +176,21 @@ def teardown_module(module): print("******************************************\n") # Shutdown - clean up everything - print('*** Killing BGP on Peer routers') + print("*** Killing BGP on Peer routers") # Killing ExaBGP for i in range(1, 9): - net['peer%s' % i].cmd('kill `cat /var/run/exabgp/exabgp.pid`') + net["peer%s" % i].cmd("kill `cat /var/run/exabgp/exabgp.pid`") # End - Shutdown network net.stop() + def test_router_running(): global fatal_error global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) print("\n\n** Check if FRR/Quagga is running on each Router node") @@ -193,7 +198,7 @@ def test_router_running(): # Starting Routers for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -207,7 +212,7 @@ def test_bgp_converge(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) # Wait for BGP to converge (All Neighbors in either Full or TwoWay State) @@ -220,9 +225,12 @@ def test_bgp_converge(): # Look for any node not yet converged for i in range(1, 2): for view in range(1, 4): - notConverged = net['r%s' % i].cmd('vtysh -c "show ip bgp view %s summary" 2> /dev/null | grep ^[0-9] | grep -v " 11$"' % view) + notConverged = net["r%s" % i].cmd( + 'vtysh -c "show ip bgp view %s summary" 2> /dev/null | grep ^[0-9] | grep -v " 11$"' + % view + ) if notConverged: - print('Waiting for r%s, view %s' % (i, view)) + print("Waiting for r%s, view %s" % (i, view)) sys.stdout.flush() break if notConverged: @@ -231,17 +239,17 @@ def test_bgp_converge(): sleep(5) timeout -= 5 else: - print('Done') + print("Done") break else: # Bail out with error if a router fails to converge - bgpStatus = net['r%s' % i].cmd('vtysh -c "show ip bgp view %s summary"' % view) + bgpStatus = net["r%s" % i].cmd('vtysh -c "show ip bgp view %s summary"' % view) assert False, "BGP did not converge:\n%s" % bgpStatus # Wait for an extra 5s to announce all routes - print('Waiting 5s for routes to be announced'); + print("Waiting 5s for routes to be announced") sleep(5) - + print("BGP converged.") # if timeout < 60: @@ -251,18 +259,19 @@ def test_bgp_converge(): # Make sure that all daemons are running for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting Quagga/FRR daemons, uncomment the next line # CLI(net) + def test_bgp_routingTable(): global fatal_error global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -274,56 +283,68 @@ def test_bgp_routingTable(): for view in range(1, 4): success = 0 # This glob pattern should work as long as number of views < 10 - for refTableFile in (glob.glob( - '%s/r%s/show_ip_bgp_view_%s*.ref' % (thisDir, i, view))): + for refTableFile in glob.glob( + "%s/r%s/show_ip_bgp_view_%s*.ref" % (thisDir, i, view) + ): if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ip bgp view %s" 2> /dev/null' % view).rstrip() - + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show ip bgp view %s" 2> /dev/null' % view) + .rstrip() + ) + # Fix inconsitent spaces between 0.99.24 and newer versions of Quagga... - actual = re.sub('0 0', '0 0', actual) - actual = re.sub(r'([0-9]) 32768', r'\1 32768', actual) + actual = re.sub("0 0", "0 0", actual) + actual = re.sub( + r"([0-9]) 32768", r"\1 32768", actual + ) # Remove summary line (changed recently) - actual = re.sub(r'Total number.*', '', actual) - actual = re.sub(r'Displayed.*', '', actual) + actual = re.sub(r"Total number.*", "", actual) + actual = re.sub(r"Displayed.*", "", actual) actual = actual.rstrip() # Fix table version (ignore it) - actual = re.sub(r'(BGP table version is )[0-9]+', r'\1XXX', actual) + actual = re.sub(r"(BGP table version is )[0-9]+", r"\1XXX", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual BGP routing table", - title2="expected BGP routing table") + title2="expected BGP routing table", + ) if diff: diffresult[refTableFile] = diff else: success = 1 print("template %s matched: r%s ok" % (refTableFile, i)) - break; + break if not success: - resultstr = 'No template matched.\n' + resultstr = "No template matched.\n" for f in diffresult.iterkeys(): resultstr += ( - 'template %s: r%s failed Routing Table Check for view %s:\n%s\n' - % (f, i, view, diffresult[f])) + "template %s: r%s failed Routing Table Check for view %s:\n%s\n" + % (f, i, view, diffresult[f]) + ) raise AssertionError( - "Routing Table verification failed for router r%s, view %s:\n%s" % (i, view, resultstr)) - + "Routing Table verification failed for router r%s, view %s:\n%s" + % (i, view, resultstr) + ) # Make sure that all daemons are running for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -335,24 +356,26 @@ def test_shutdown_check_stderr(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - print("SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n") - pytest.skip('Skipping test for Stderr output') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + print( + "SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n" + ) + pytest.skip("Skipping test for Stderr output") thisDir = os.path.dirname(os.path.realpath(__file__)) print("\n\n** Verifying unexpected STDERR output from daemons") print("******************************************\n") - net['r1'].stopRouter() + net["r1"].stopRouter() - log = net['r1'].getStdErr('bgpd') + log = net["r1"].getStdErr("bgpd") if log: print("\nBGPd StdErr Log:\n" + log) - log = net['r1'].getStdErr('zebra') + log = net["r1"].getStdErr("zebra") if log: print("\nZebra StdErr Log:\n" + log) @@ -362,22 +385,26 @@ def test_shutdown_check_memleak(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) - if os.environ.get('TOPOTESTS_CHECK_MEMLEAK') is None: - print("SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n") - pytest.skip('Skipping test for memory leaks') - + if os.environ.get("TOPOTESTS_CHECK_MEMLEAK") is None: + print( + "SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n" + ) + pytest.skip("Skipping test for memory leaks") + thisDir = os.path.dirname(os.path.realpath(__file__)) - net['r1'].stopRouter() - net['r1'].report_memory_leaks(os.environ.get('TOPOTESTS_CHECK_MEMLEAK'), os.path.basename(__file__)) + net["r1"].stopRouter() + net["r1"].report_memory_leaks( + os.environ.get("TOPOTESTS_CHECK_MEMLEAK"), os.path.basename(__file__) + ) -if __name__ == '__main__': +if __name__ == "__main__": - setLogLevel('info') + setLogLevel("info") # To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli # retval = pytest.main(["-s", "--tb=no"]) retval = pytest.main(["-s"]) diff --git a/tests/topotests/bgp_prefix_sid/__init__.py b/tests/topotests/bgp_prefix_sid/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_prefix_sid/__init__.py diff --git a/tests/topotests/bgp_prefix_sid/exabgp.env b/tests/topotests/bgp_prefix_sid/exabgp.env new file mode 100644 index 0000000000..6c554f5fa8 --- /dev/null +++ b/tests/topotests/bgp_prefix_sid/exabgp.env @@ -0,0 +1,53 @@ + +[exabgp.api] +encoder = text +highres = false +respawn = false +socket = '' + +[exabgp.bgp] +openwait = 60 + +[exabgp.cache] +attributes = true +nexthops = true + +[exabgp.daemon] +daemonize = true +pid = '/var/run/exabgp/exabgp.pid' +user = 'exabgp' + +[exabgp.log] +all = false +configuration = true +daemon = true +destination = '/var/log/exabgp.log' +enable = true +level = INFO +message = false +network = true +packets = false +parser = false +processes = true +reactor = true +rib = false +routes = false +short = false +timers = false + +[exabgp.pdb] +enable = false + +[exabgp.profile] +enable = false +file = '' + +[exabgp.reactor] +speed = 1.0 + +[exabgp.tcp] +acl = false +bind = '' +delay = 0 +once = false +port = 179 diff --git a/tests/topotests/bgp_prefix_sid/peer1/exabgp.cfg b/tests/topotests/bgp_prefix_sid/peer1/exabgp.cfg new file mode 100644 index 0000000000..5b55366a0e --- /dev/null +++ b/tests/topotests/bgp_prefix_sid/peer1/exabgp.cfg @@ -0,0 +1,103 @@ +group controller { + neighbor 10.0.0.1 { + router-id 10.0.0.101; + local-address 10.0.0.101; + local-as 2; + peer-as 1; + + family { + ipv4 nlri-mpls; + } + + static { + # ref: draft-ietf-idr-bgp-prefix-sid-27 + # + # IANA temporarily assigned the following: + # attribute code type (suggested value: 40) to + # the BGP Prefix-SID attribute + # + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Type | Length | RESERVED | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Flags | Label Index | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Label Index | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # Figure. Label-Index TLV (Prefix-SID type-1) + # + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Type | Length | Flags | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Flags | + # +-+-+-+-+-+-+-+-+ + # + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | SRGB 1 (6 octets) | + # | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | SRGB n (6 octets) | + # | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+<Paste> + # Figure. Originator SRGB TLV (Prefix-SID type-3) + + # ExaBGP generic-attribute binary pattern: + # Attribute-type: 0x28 (40:BGP_PREFIX_SID) + # Attribute-flag: 0xc0 (Option, Transitive) + # Attribute-body: Label-Index TLV and Originator SRGB TLV + # Label-Index TLV: 0x01000700000000000001 + # Type (08bit): 0x01 + # Length (16bit): 0x0007 + # RESERVED (08bit): 0x00 + # Flags (16bit): 0x0000 + # Label Index (32bit): 0x00000001 + # Originator SRGB TLV: 0x03000800000c350000000a + # Type (08bit): 0x03 + # Length (16bit): 0x0008 (nb-SRGB is 1) + # Flags (16bit): 0x0000 + # SRGB1 (48bit): 0x0c3500:0x00000a (800000-800010 is SRGB1) + route 3.0.0.1/32 next-hop 10.0.0.101 label [800001] attribute [0x28 0xc0 0x0100070000000000000103000800000c350000000a]; + + # ExaBGP generic-attribute binary pattern: + # Attribute-type: 0x28 (40:BGP_PREFIX_SID) + # Attribute-flag: 0xc0 (Option, Transitive) + # Attribute-body: Label-Index TLV and Originator SRGB TLV + # Label-Index TLV: 0x01000700000000000001 + # Type (08bit): 0x01 + # Length (16bit): 0x0007 + # RESERVED (08bit): 0x00 + # Flags (16bit): 0x0000 + # Label Index (32bit): 0x00000002 + # Originator SRGB TLV: 0x03000800000c350000000a + # Type (08bit): 0x03 + # Length (16bit): 0x0008 (nb-SRGB is 1) + # Flags (16bit): 0x0000 + # SRGB1 (48bit): 0x0c3500:0x00000a (800000-800010 is SRGB1) + route 3.0.0.2/32 next-hop 10.0.0.101 label [800002] attribute [0x28 0xc0 0x0100070000000000000203000800000c350000000a]; + + # ExaBGP generic-attribute binary pattern: + # Attribute-type: 0x28 (40:BGP_PREFIX_SID) + # Attribute-flag: 0xc0 (Option, Transitive) + # Attribute-body: Label-Index TLV and Originator SRGB TLV + # Label-Index TLV: 0x01000700000000000001 + # Type (08bit): 0x01 + # Length (16bit): 0x0007 + # RESERVED (08bit): 0x00 + # Flags (16bit): 0x0000 + # Label Index (32bit): 0x00000003 + # Originator SRGB TLV: 0x03000800000c350000000a + # Type (08bit): 0x03 + # Length (16bit): 0x0008 (nb-SRGB is 1) + # Flags (16bit): 0x0000 + # SRGB1 (48bit): 0x0c3500:0x00000a (800000-800010 is SRGB1) + route 3.0.0.3/32 next-hop 10.0.0.101 label [800003] attribute [0x28 0xc0 0x0100070000000000000303000800000c350000000a]; + } + } +} diff --git a/tests/topotests/bgp_prefix_sid/peer2/exa-receive.py b/tests/topotests/bgp_prefix_sid/peer2/exa-receive.py new file mode 100755 index 0000000000..f1ec9fa5ba --- /dev/null +++ b/tests/topotests/bgp_prefix_sid/peer2/exa-receive.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +""" +exa-receive.py: Save received routes form ExaBGP into file +""" + +from sys import stdin, argv +from datetime import datetime + +# 1st arg is peer number +peer = int(argv[1]) + +# When the parent dies we are seeing continual newlines, so we only access so many before stopping +counter = 0 + +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") + +while True: + try: + line = stdin.readline() + routesavefile.write(line) + routesavefile.flush() + + if line == "": + counter += 1 + if counter > 100: + break + continue + + counter = 0 + except KeyboardInterrupt: + pass + except IOError: + # most likely a signal during readline + pass + +routesavefile.close() diff --git a/tests/topotests/bgp_prefix_sid/peer2/exabgp.cfg b/tests/topotests/bgp_prefix_sid/peer2/exabgp.cfg new file mode 100644 index 0000000000..dabd88e03d --- /dev/null +++ b/tests/topotests/bgp_prefix_sid/peer2/exabgp.cfg @@ -0,0 +1,19 @@ +group controller { + + process receive-routes { + run "/etc/exabgp/exa-receive.py 2"; + receive-routes; + encoder json; + } + + neighbor 10.0.0.1 { + router-id 10.0.0.102; + local-address 10.0.0.102; + local-as 3; + peer-as 1; + + family { + ipv4 nlri-mpls; + } + } +} diff --git a/tests/topotests/bgp_prefix_sid/r1/bgpd.conf b/tests/topotests/bgp_prefix_sid/r1/bgpd.conf new file mode 100644 index 0000000000..7a38cc307f --- /dev/null +++ b/tests/topotests/bgp_prefix_sid/r1/bgpd.conf @@ -0,0 +1,15 @@ +log stdout notifications +log monitor notifications +log commands +! +router bgp 1 + bgp router-id 10.0.0.1 + no bgp default ipv4-unicast + neighbor 10.0.0.101 remote-as 2 + neighbor 10.0.0.102 remote-as 3 + ! + address-family ipv4 labeled-unicast + neighbor 10.0.0.101 activate + neighbor 10.0.0.102 activate + exit-address-family +! diff --git a/tests/topotests/bgp_prefix_sid/r1/zebra.conf b/tests/topotests/bgp_prefix_sid/r1/zebra.conf new file mode 100644 index 0000000000..0cd26052f2 --- /dev/null +++ b/tests/topotests/bgp_prefix_sid/r1/zebra.conf @@ -0,0 +1,7 @@ +hostname r1 +! +interface r1-eth0 + ip address 10.0.0.1/24 + no shutdown +! +line vty diff --git a/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py b/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py new file mode 100755 index 0000000000..3a6aefe7ee --- /dev/null +++ b/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python + +# +# test_bgp_prefix_sid.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2020 by LINE Corporation +# Copyright (c) 2020 by Hiroki Shirokura <slank.dev@gmail.com> +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_bgp_prefix_sid.py: Test BGP topology with EBGP on prefix-sid +""" + +import json +import os +import sys +import functools +import pytest + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from mininet.topo import Topo + + +class TemplateTopo(Topo): + def build(self, **_opts): + tgen = get_topogen(self) + router = tgen.add_router("r1") + switch = tgen.add_switch("s1") + switch.add_link(router) + + switch = tgen.gears["s1"] + peer1 = tgen.add_exabgp_peer( + "peer1", ip="10.0.0.101", defaultRoute="via 10.0.0.1" + ) + peer2 = tgen.add_exabgp_peer( + "peer2", ip="10.0.0.102", defaultRoute="via 10.0.0.1" + ) + switch.add_link(peer1) + switch.add_link(peer2) + + +def setup_module(module): + tgen = Topogen(TemplateTopo, module.__name__) + tgen.start_topology() + + router = tgen.gears["r1"] + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format("r1")) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format("r1")) + ) + router.start() + + logger.info("starting exaBGP on peer1") + peer_list = tgen.exabgp_peers() + for pname, peer in peer_list.iteritems(): + peer_dir = os.path.join(CWD, pname) + env_file = os.path.join(CWD, "exabgp.env") + logger.info("Running ExaBGP peer") + peer.start(peer_dir, env_file) + logger.info(pname) + + +def teardown_module(module): + tgen = get_topogen() + tgen.stop_topology() + + +def test_r1_receive_and_advertise_prefix_sid_type1(): + tgen = get_topogen() + router = tgen.gears["r1"] + + def _check_type1_r1(router, prefix, remoteLabel, labelIndex): + output = router.vtysh_cmd( + "show bgp ipv4 labeled-unicast {} json".format(prefix) + ) + output = json.loads(output) + expected = { + "prefix": prefix, + "advertisedTo": {"10.0.0.101": {}, "10.0.0.102": {}}, + "paths": [ + {"valid": True, "remoteLabel": remoteLabel, "labelIndex": labelIndex,} + ], + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_check_type1_r1, router, "3.0.0.1/32", 800001, 1) + success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert result is None, 'Failed _check_type1_r1 in "{}"'.format(router) + + test_func = functools.partial(_check_type1_r1, router, "3.0.0.2/32", 800002, 2) + success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert result is None, 'Failed _check_type1_r1 in "{}"'.format(router) + + +def exabgp_get_update_prefix(filename, afi, nexthop, prefix): + with open("/tmp/peer2-received.log") as f: + for line in f.readlines(): + output = json.loads(line) + ret = output.get("neighbor") + if ret is None: + continue + ret = ret.get("message") + if ret is None: + continue + ret = ret.get("update") + if ret is None: + continue + ret = ret.get("announce") + if ret is None: + continue + ret = ret.get(afi) + if ret is None: + continue + ret = ret.get(nexthop) + if ret is None: + continue + ret = ret.get(prefix) + if ret is None: + continue + return output + return "Not found" + + +def test_peer2_receive_prefix_sid_type1(): + tgen = get_topogen() + peer2 = tgen.gears["peer2"] + + def _check_type1_peer2(prefix, labelindex): + output = exabgp_get_update_prefix( + "/tmp/peer2-received.log", "ipv4 nlri-mpls", "10.0.0.101", prefix + ) + expected = { + "type": "update", + "neighbor": { + "ip": "10.0.0.1", + "message": { + "update": { + "attribute": { + "attribute-0x28-0xE0": "0x010007000000{:08x}".format( + labelindex + ) + }, + "announce": {"ipv4 nlri-mpls": {"10.0.0.101": {}}}, + } + }, + }, + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_check_type1_peer2, "3.0.0.1/32", labelindex=1) + success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert result is None, 'Failed _check_type1_peer2 in "{}"'.format("peer2") + + test_func = functools.partial(_check_type1_peer2, "3.0.0.2/32", labelindex=2) + success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert result is None, 'Failed _check_type1_peer2 in "{}"'.format("peer2") + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + ret = pytest.main(args) + sys.exit(ret) diff --git a/tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py b/tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py index f307edc678..b49a57b308 100644 --- a/tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py +++ b/tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py @@ -42,7 +42,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -50,20 +50,22 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 4): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -73,38 +75,34 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_reject_as_sets(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r2'] + router = tgen.gears["r2"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.2 json")) expected = { - '192.168.255.2': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 2 - } - } + "192.168.255.2": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}}, } } return topotest.json_cmp(output, expected) @@ -112,34 +110,23 @@ def test_bgp_reject_as_sets(): def _bgp_has_aggregated_route_with_stripped_as_set(router): output = json.loads(router.vtysh_cmd("show ip bgp 172.16.0.0/16 json")) expected = { - 'paths': [ - { - 'aspath': { - 'string': 'Local', - 'segments': [ - ], - 'length': 0 - } - } - ] + "paths": [{"aspath": {"string": "Local", "segments": [], "length": 0}}] } return topotest.json_cmp(output, expected) def _bgp_announce_route_without_as_sets(router): - output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.254.2 advertised-routes json")) + output = json.loads( + router.vtysh_cmd( + "show ip bgp neighbor 192.168.254.2 advertised-routes json" + ) + ) expected = { - 'advertisedRoutes': { - '172.16.0.0/16': { - 'asPath': '' - }, - '192.168.254.0/30': { - 'asPath': '65003' - }, - '192.168.255.0/30': { - 'asPath': '65001' - } + "advertisedRoutes": { + "172.16.0.0/16": {"path": ""}, + "192.168.254.0/30": {"path": "65003"}, + "192.168.255.0/30": {"path": "65001"}, }, - 'totalPrefixCounter': 3 + "totalPrefixCounter": 3, } return topotest.json_cmp(output, expected) @@ -148,7 +135,9 @@ def test_bgp_reject_as_sets(): assert result is None, 'Failed bgp convergence in "{}"'.format(router) - test_func = functools.partial(_bgp_has_aggregated_route_with_stripped_as_set, router) + test_func = functools.partial( + _bgp_has_aggregated_route_with_stripped_as_set, router + ) success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert result is None, 'Failed to see an aggregated route in "{}"'.format(router) @@ -156,8 +145,11 @@ def test_bgp_reject_as_sets(): test_func = functools.partial(_bgp_announce_route_without_as_sets, router) success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert result is None, 'Route 172.16.0.0/16 should be sent without AS_SET to r3 "{}"'.format(router) + assert ( + result is None + ), 'Route 172.16.0.0/16 should be sent without AS_SET to r3 "{}"'.format(router) + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_rfapi_basic_sanity/customize.py b/tests/topotests/bgp_rfapi_basic_sanity/customize.py index a125c6582f..ea548a7337 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/customize.py +++ b/tests/topotests/bgp_rfapi_basic_sanity/customize.py @@ -75,12 +75,15 @@ from lib.ltemplate import ltemplateRtrCmd from mininet.topo import Topo import shutil + CWD = os.path.dirname(os.path.realpath(__file__)) # test name based on directory TEST = os.path.basename(CWD) + class ThisTestTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -89,36 +92,37 @@ class ThisTestTopo(Topo): # between routers, switches and hosts. # # Create P/PE routers - tgen.add_router('r1') + tgen.add_router("r1") for routern in range(2, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # Create a switch with just one router connected to it to simulate a # empty network. switch = {} - switch[0] = tgen.add_switch('sw0') - switch[0].add_link(tgen.gears['r1'], nodeif='r1-eth0') - switch[0].add_link(tgen.gears['r2'], nodeif='r2-eth0') + switch[0] = tgen.add_switch("sw0") + switch[0].add_link(tgen.gears["r1"], nodeif="r1-eth0") + switch[0].add_link(tgen.gears["r2"], nodeif="r2-eth0") - switch[1] = tgen.add_switch('sw1') - switch[1].add_link(tgen.gears['r2'], nodeif='r2-eth1') - switch[1].add_link(tgen.gears['r3'], nodeif='r3-eth0') - switch[1].add_link(tgen.gears['r4'], nodeif='r4-eth0') + switch[1] = tgen.add_switch("sw1") + switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth1") + switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth0") + switch[1].add_link(tgen.gears["r4"], nodeif="r4-eth0") + + switch[2] = tgen.add_switch("sw2") + switch[2].add_link(tgen.gears["r2"], nodeif="r2-eth2") + switch[2].add_link(tgen.gears["r3"], nodeif="r3-eth1") - switch[2] = tgen.add_switch('sw2') - switch[2].add_link(tgen.gears['r2'], nodeif='r2-eth2') - switch[2].add_link(tgen.gears['r3'], nodeif='r3-eth1') def ltemplatePreRouterStartHook(): cc = ltemplateRtrCmd() tgen = get_topogen() - logger.info('pre router-start hook') - #check for normal init + logger.info("pre router-start hook") + # check for normal init if len(tgen.net) == 1: - logger.info('Topology not configured, skipping setup') + logger.info("Topology not configured, skipping setup") return False return True + def ltemplatePostRouterStartHook(): - logger.info('post router-start hook') + logger.info("post router-start hook") return True - diff --git a/tests/topotests/bgp_rfapi_basic_sanity/scripts/add_routes.py b/tests/topotests/bgp_rfapi_basic_sanity/scripts/add_routes.py index 4d6a7582ba..f4b4da55d2 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/scripts/add_routes.py +++ b/tests/topotests/bgp_rfapi_basic_sanity/scripts/add_routes.py @@ -1,36 +1,159 @@ from lutil import luCommand -holddownFactorSet = luCommand('r1','vtysh -c "show running"','rfp holddown-factor','none','Holddown factor set') + +holddownFactorSet = luCommand( + "r1", + 'vtysh -c "show running"', + "rfp holddown-factor", + "none", + "Holddown factor set", +) if not holddownFactorSet: to = "-1" cost = "" else: to = "6" cost = "cost 50" -luCommand('r1','vtysh -c "debug rfapi-dev open vn 10.0.0.1 un 1.1.1.1"','rfapi_set_response_cb: status 0', 'pass', 'Opened RFAPI') -luCommand('r1','vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 11.11.11.11"','rc=2', 'pass', 'Clean query') -luCommand('r1','vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 11.11.11.0/24 lifetime {}"'.format(to),'', 'none', 'Prefix registered') -luCommand('r1','vtysh -c "show vnc registrations local"','1 out of 1','wait','Local registration') -luCommand('r1','vtysh -c "debug rfapi-dev response-omit-self off"','.','none') -luCommand('r1','vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 11.11.11.11"','11.11.11.0/24', 'pass', 'Query self') +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev open vn 10.0.0.1 un 1.1.1.1"', + "rfapi_set_response_cb: status 0", + "pass", + "Opened RFAPI", +) +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 11.11.11.11"', + "rc=2", + "pass", + "Clean query", +) +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 11.11.11.0/24 lifetime {}"'.format( + to + ), + "", + "none", + "Prefix registered", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations local"', + "1 out of 1", + "wait", + "Local registration", +) +luCommand("r1", 'vtysh -c "debug rfapi-dev response-omit-self off"', ".", "none") +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 11.11.11.11"', + "11.11.11.0/24", + "pass", + "Query self", +) -luCommand('r3','vtysh -c "debug rfapi-dev open vn 10.0.0.2 un 2.2.2.2"','rfapi_set_response_cb: status 0', 'pass', 'Opened RFAPI') -luCommand('r3','vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 22.22.22.0/24 lifetime {}"'.format(to),'', 'none', 'Prefix registered') -luCommand('r3','vtysh -c "show vnc registrations local"','1 out of 1','wait','Local registration') -luCommand('r3','vtysh -c "debug rfapi-dev response-omit-self on"','.','none') -luCommand('r3','vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 22.22.22.22"','rc=2', 'pass', 'Self excluded') -luCommand('r3','vtysh -c "debug rfapi-dev open vn 10.0.1.2 un 2.1.1.2"','rfapi_set_response_cb: status 0', 'pass', 'Opened query only RFAPI') -luCommand('r3','vtysh -c "debug rfapi-dev query vn 10.0.1.2 un 2.1.1.2 target 22.22.22.22"','22.22.22.0/24', 'pass', 'See local') +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev open vn 10.0.0.2 un 2.2.2.2"', + "rfapi_set_response_cb: status 0", + "pass", + "Opened RFAPI", +) +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 22.22.22.0/24 lifetime {}"'.format( + to + ), + "", + "none", + "Prefix registered", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations local"', + "1 out of 1", + "wait", + "Local registration", +) +luCommand("r3", 'vtysh -c "debug rfapi-dev response-omit-self on"', ".", "none") +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 22.22.22.22"', + "rc=2", + "pass", + "Self excluded", +) +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev open vn 10.0.1.2 un 2.1.1.2"', + "rfapi_set_response_cb: status 0", + "pass", + "Opened query only RFAPI", +) +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev query vn 10.0.1.2 un 2.1.1.2 target 22.22.22.22"', + "22.22.22.0/24", + "pass", + "See local", +) -luCommand('r4','vtysh -c "debug rfapi-dev open vn 10.0.0.3 un 3.3.3.3"','rfapi_set_response_cb: status 0', 'pass', 'Opened RFAPI') -luCommand('r4','vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 33.33.33.0/24 lifetime {}"'.format(to),'', 'none', 'Prefix registered') -luCommand('r4','vtysh -c "show vnc registrations local"','1 out of 1','wait','Local registration') -luCommand('r4','vtysh -c "debug rfapi-dev response-omit-self off"','.','none') -luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 33.33.33.33"','33.33.33.0/24', 'pass', 'Query self') +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev open vn 10.0.0.3 un 3.3.3.3"', + "rfapi_set_response_cb: status 0", + "pass", + "Opened RFAPI", +) +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 33.33.33.0/24 lifetime {}"'.format( + to + ), + "", + "none", + "Prefix registered", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations local"', + "1 out of 1", + "wait", + "Local registration", +) +luCommand("r4", 'vtysh -c "debug rfapi-dev response-omit-self off"', ".", "none") +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 33.33.33.33"', + "33.33.33.0/24", + "pass", + "Query self", +) -luCommand('r4','vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 11.11.11.0/24 lifetime {} {}"'.format(to, cost),'', 'none', 'MP Prefix registered') -luCommand('r4','vtysh -c "show vnc registrations local"','2 out of 2','wait','Local registration') -luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 11.11.11.11"','11.11.11.0/24', 'pass', 'Query self MP') +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 11.11.11.0/24 lifetime {} {}"'.format( + to, cost + ), + "", + "none", + "MP Prefix registered", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations local"', + "2 out of 2", + "wait", + "Local registration", +) +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 11.11.11.11"', + "11.11.11.0/24", + "pass", + "Query self MP", +) -luCommand('r1','vtysh -c "show vnc registrations"','.','none') -luCommand('r3','vtysh -c "show vnc registrations"','.','none') -luCommand('r4','vtysh -c "show vnc registrations"','.','none') +luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none") diff --git a/tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py b/tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py index 6fbe4ff1c0..6ad3e735ee 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py +++ b/tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py @@ -1,10 +1,48 @@ -luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60) -luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180) -luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180) -luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180) -luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180) -luCommand('r1','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping') -luCommand('r1','ping 4.4.4.4 -c 1',' 0. packet loss','wait','PE->PE4 (loopback) ping') -#luCommand('r4','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping') +luCommand( + "r1", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r3", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r4", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60 +) +luCommand( + "r2", + 'vtysh -c "show bgp summary"', + " 00:0.* 00:0.* 00:0", + "wait", + "Core adjacencies up", + 180, +) +luCommand( + "r1", + 'vtysh -c "show bgp vrf all summary"', + " 00:0", + "wait", + "All adjacencies up", + 180, +) +luCommand( + "r3", + 'vtysh -c "show bgp vrf all summary"', + " 00:0", + "wait", + "All adjacencies up", + 180, +) +luCommand( + "r4", + 'vtysh -c "show bgp vrf all summary"', + " 00:0", + "wait", + "All adjacencies up", + 180, +) +luCommand( + "r1", "ping 3.3.3.3 -c 1", " 0. packet loss", "wait", "PE->PE3 (loopback) ping" +) +luCommand( + "r1", "ping 4.4.4.4 -c 1", " 0. packet loss", "wait", "PE->PE4 (loopback) ping" +) +# luCommand('r4','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping') diff --git a/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_close.py b/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_close.py index 5fffce7ca0..9fdef84cdf 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_close.py +++ b/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_close.py @@ -1,19 +1,102 @@ from lutil import luCommand -holddownFactorSet = luCommand('r1','vtysh -c "show running"','rfp holddown-factor','none','Holddown factor set') + +holddownFactorSet = luCommand( + "r1", + 'vtysh -c "show running"', + "rfp holddown-factor", + "none", + "Holddown factor set", +) if not holddownFactorSet: to = "-1" else: to = "1" -luCommand('r1','vtysh -c "debug rfapi-dev open vn 20.0.0.1 un 1.1.1.21"','rfapi_set_response_cb: status 0', 'pass', 'Opened RFAPI') -luCommand('r1','vtysh -c "debug rfapi-dev register vn 20.0.0.1 un 1.1.1.21 prefix 111.111.111.0/24 lifetime {}"'.format(to),'', 'none', 'Prefix registered') -luCommand('r1','vtysh -c "show vnc registrations local"','111.111.111.0/24','wait','Local registration',1) -luCommand('r1','vtysh -c "show vnc registrations"','.','none') -luCommand('r3','vtysh -c "show vnc registrations"','111.111.111.0/24','wait','See registration') -luCommand('r4','vtysh -c "show vnc registrations"','111.111.111.0/24','wait','See registration') -luCommand('r1','vtysh -c "debug rfapi-dev close vn 20.0.0.1 un 1.1.1.21"','status 0', 'pass', 'Closed RFAPI') -luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3','wait','See cleanup') -luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3','wait','See cleanup') -luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 2','wait','See cleanup') -luCommand('r1','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown',20) -luCommand('r3','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown') -luCommand('r4','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown') +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev open vn 20.0.0.1 un 1.1.1.21"', + "rfapi_set_response_cb: status 0", + "pass", + "Opened RFAPI", +) +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev register vn 20.0.0.1 un 1.1.1.21 prefix 111.111.111.0/24 lifetime {}"'.format( + to + ), + "", + "none", + "Prefix registered", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations local"', + "111.111.111.0/24", + "wait", + "Local registration", + 1, +) +luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "111.111.111.0/24", + "wait", + "See registration", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "111.111.111.0/24", + "wait", + "See registration", +) +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev close vn 20.0.0.1 un 1.1.1.21"', + "status 0", + "pass", + "Closed RFAPI", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 1 .* Remotely: *Active: 3", + "wait", + "See cleanup", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 1 .* Remotely: *Active: 3", + "wait", + "See cleanup", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 2 .* Remotely: *Active: 2", + "wait", + "See cleanup", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 0", + "wait", + "Out of holddown", + 20, +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 0", + "wait", + "Out of holddown", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 0", + "wait", + "Out of holddown", +) diff --git a/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_routes.py b/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_routes.py index a380c79fcf..1caa827ce2 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_routes.py +++ b/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_routes.py @@ -1,19 +1,74 @@ from lutil import luCommand -luCommand('r1','vtysh -c "show bgp ipv4 vpn"','','none','VPN SAFI') -luCommand('r2','vtysh -c "show bgp ipv4 vpn"','','none','VPN SAFI') -luCommand('r3','vtysh -c "show bgp ipv4 vpn"','','none','VPN SAFI') -luCommand('r4','vtysh -c "show bgp ipv4 vpn"','','none','VPN SAFI') -luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3','wait','See all registrations') -luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3','wait','See all registrations') -luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 2','wait','See all registrations') -num = '4 routes and 4' -luCommand('r1','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI okay') -luCommand('r2','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI okay') -luCommand('r3','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI okay') -luCommand('r4','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI okay') -luCommand('r1','vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 22.22.22.22"','pfx=', 'pass', 'Query R2s info') -luCommand('r1','vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 33.33.33.33"','pfx=', 'pass', 'Query R4s info') -luCommand('r3','vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 11.11.11.11"','11.11.11.0/24.*11.11.11.0/24.*', 'pass', 'Query R1s+R4s info') -luCommand('r3','vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 33.33.33.33"','pfx=', 'pass', 'Query R4s info') -luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 11.11.11.11"','11.11.11.0/24.*11.11.11.0/24.*', 'pass', 'Query R1s+R4s info') -luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 22.22.22.22"','pfx=', 'pass', 'Query R2s info') + +luCommand("r1", 'vtysh -c "show bgp ipv4 vpn"', "", "none", "VPN SAFI") +luCommand("r2", 'vtysh -c "show bgp ipv4 vpn"', "", "none", "VPN SAFI") +luCommand("r3", 'vtysh -c "show bgp ipv4 vpn"', "", "none", "VPN SAFI") +luCommand("r4", 'vtysh -c "show bgp ipv4 vpn"', "", "none", "VPN SAFI") +luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 1 .* Remotely: *Active: 3", + "wait", + "See all registrations", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 1 .* Remotely: *Active: 3", + "wait", + "See all registrations", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 2 .* Remotely: *Active: 2", + "wait", + "See all registrations", +) +num = "4 routes and 4" +luCommand("r1", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI okay") +luCommand("r2", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI okay") +luCommand("r3", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI okay") +luCommand("r4", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI okay") +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 22.22.22.22"', + "pfx=", + "pass", + "Query R2s info", +) +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 33.33.33.33"', + "pfx=", + "pass", + "Query R4s info", +) +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 11.11.11.11"', + "11.11.11.0/24.*11.11.11.0/24.*", + "pass", + "Query R1s+R4s info", +) +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 33.33.33.33"', + "pfx=", + "pass", + "Query R4s info", +) +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 11.11.11.11"', + "11.11.11.0/24.*11.11.11.0/24.*", + "pass", + "Query R1s+R4s info", +) +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 22.22.22.22"', + "pfx=", + "pass", + "Query R2s info", +) diff --git a/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_timeout.py b/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_timeout.py index f4467ecc33..e68e9e93ab 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_timeout.py +++ b/tests/topotests/bgp_rfapi_basic_sanity/scripts/check_timeout.py @@ -1,68 +1,325 @@ from lutil import luCommand -holddownFactorSet = luCommand('r1','vtysh -c "show running"','rfp holddown-factor','none','Holddown factor set') -luCommand('r1','vtysh -c "show vnc registrations"','.','none') -luCommand('r3','vtysh -c "show vnc registrations"','.','none') -luCommand('r4','vtysh -c "show vnc registrations"','.','none') + +holddownFactorSet = luCommand( + "r1", + 'vtysh -c "show running"', + "rfp holddown-factor", + "none", + "Holddown factor set", +) +luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none") +luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none") if not holddownFactorSet: - luCommand('r1','vtysh -c "show vnc summary"','.','pass','Holddown factor not set -- skipping test') + luCommand( + "r1", + 'vtysh -c "show vnc summary"', + ".", + "pass", + "Holddown factor not set -- skipping test", + ) else: - #holddown time test - luCommand('r1','vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 lifetime 10"','', 'none', 'Prefix registered') - luCommand('r1','vtysh -c "show vnc registrations local"','1.111.0.0/16','wait','Local registration') + # holddown time test + luCommand( + "r1", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 lifetime 10"', + "", + "none", + "Prefix registered", + ) + luCommand( + "r1", + 'vtysh -c "show vnc registrations local"', + "1.111.0.0/16", + "wait", + "Local registration", + ) - luCommand('r3','vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 lifetime 10"','', 'none', 'Prefix registered') - luCommand('r3','vtysh -c "show vnc registrations local"','1.222.0.0/16','wait','Local registration') + luCommand( + "r3", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 lifetime 10"', + "", + "none", + "Prefix registered", + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations local"', + "1.222.0.0/16", + "wait", + "Local registration", + ) - luCommand('r4','vtysh -c "show vnc registrations"','Remotely: *Active: 4 ','wait', 'See registrations, L=10') + luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Remotely: *Active: 4 ", + "wait", + "See registrations, L=10", + ) - luCommand('r4','vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 1.222.0.0/16 lifetime 5 cost 50"','', 'none', 'MP Prefix registered') - luCommand('r4','vtysh -c "show vnc registrations local"','1.222.0.0/16','wait','Local registration (MP prefix)') + luCommand( + "r4", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 1.222.0.0/16 lifetime 5 cost 50"', + "", + "none", + "MP Prefix registered", + ) + luCommand( + "r4", + 'vtysh -c "show vnc registrations local"', + "1.222.0.0/16", + "wait", + "Local registration (MP prefix)", + ) - luCommand('r1','vtysh -c "show vnc registrations"','.','none') - luCommand('r3','vtysh -c "show vnc registrations"','.','none') + luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none") + luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none") - luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 1.111.111.111"','pfx=', 'pass', 'Query R1s info') - luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 1.222.222.222"','1.222.0.0/16.*1.222.0.0/16', 'pass', 'Query R3s+R4s info') + luCommand( + "r4", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 1.111.111.111"', + "pfx=", + "pass", + "Query R1s info", + ) + luCommand( + "r4", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 1.222.222.222"', + "1.222.0.0/16.*1.222.0.0/16", + "pass", + "Query R3s+R4s info", + ) - luCommand('r4','vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 1.222.0.0/16"','', 'none', 'MP Prefix removed') - luCommand('r4','vtysh -c "show vnc registrations"','In Holddown: *Active: 1 ','wait', 'MP prefix in holddown') - luCommand('r1','vtysh -c "show vnc registrations"','In Holddown: *Active: 1 ','wait', 'MP prefix in holddown') - luCommand('r3','vtysh -c "show vnc registrations"','In Holddown: *Active: 1 ','wait', 'MP prefix in holddown') - luCommand('r1','vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 1.222.222.222"','1.222.0.0/16', 'pass', 'Query R3s info') - luCommand('r1','vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16"','', 'none', 'Prefix timeout') - luCommand('r1','vtysh -c "show vnc registrations holddown"','1.111.0.0/16','wait','Local holddown',1) - luCommand('r3','vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16"','', 'none', 'Prefix timeout') - luCommand('r3','vtysh -c "show vnc registrations holddown"','1.222.0.0/16','wait','Local holddown',1) - luCommand('r4','vtysh -c "show vnc registrations"','.','none') - luCommand('r4','vtysh -c "show vnc registrations"','.','none') + luCommand( + "r4", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 1.222.0.0/16"', + "", + "none", + "MP Prefix removed", + ) + luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 1 ", + "wait", + "MP prefix in holddown", + ) + luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 1 ", + "wait", + "MP prefix in holddown", + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 1 ", + "wait", + "MP prefix in holddown", + ) + luCommand( + "r1", + 'vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 1.222.222.222"', + "1.222.0.0/16", + "pass", + "Query R3s info", + ) + luCommand( + "r1", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16"', + "", + "none", + "Prefix timeout", + ) + luCommand( + "r1", + 'vtysh -c "show vnc registrations holddown"', + "1.111.0.0/16", + "wait", + "Local holddown", + 1, + ) + luCommand( + "r3", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16"', + "", + "none", + "Prefix timeout", + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations holddown"', + "1.222.0.0/16", + "wait", + "Local holddown", + 1, + ) + luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none") + luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none") - luCommand('r4','vtysh -c "show vnc registrations"','In Holddown: *Active: 2 ','wait', 'In holddown') - luCommand('r1','vtysh -c "show vnc registrations"','In Holddown: *Active: 2 ','wait', 'In holddown') - luCommand('r3','vtysh -c "show vnc registrations"','In Holddown: *Active: 2 ','wait', 'In holddown') + luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 2 ", + "wait", + "In holddown", + ) + luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 2 ", + "wait", + "In holddown", + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 2 ", + "wait", + "In holddown", + ) - luCommand('r1','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown',20) - luCommand('r3','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown') - luCommand('r4','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown') + luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 0", + "wait", + "Out of holddown", + 20, + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 0", + "wait", + "Out of holddown", + ) + luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "In Holddown: *Active: 0", + "wait", + "Out of holddown", + ) - #kill test - luCommand('r1','vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 lifetime 10"','', 'none', 'Prefix registered') - luCommand('r1','vtysh -c "show vnc registrations local"','1.111.0.0/16','wait','Local registration') + # kill test + luCommand( + "r1", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 lifetime 10"', + "", + "none", + "Prefix registered", + ) + luCommand( + "r1", + 'vtysh -c "show vnc registrations local"', + "1.111.0.0/16", + "wait", + "Local registration", + ) - luCommand('r3','vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 lifetime 10"','', 'none', 'Prefix registered') - luCommand('r3','vtysh -c "show vnc registrations local"','1.222.0.0/16','wait','Local registration') + luCommand( + "r3", + 'vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 lifetime 10"', + "", + "none", + "Prefix registered", + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations local"', + "1.222.0.0/16", + "wait", + "Local registration", + ) - luCommand('r4','vtysh -c "show vnc registrations"','Remotely: *Active: 4 ','wait', 'See registrations L=10 (pre-kill)',5) - luCommand('r1','vtysh -c "show vnc registrations"','.','none') - luCommand('r3','vtysh -c "show vnc registrations"','.','none') - luCommand('r1','vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 kill"','', 'none', 'Prefix kill') - luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 4 .*In Holddown: *Active: 0','wait','Registration killed',1) - luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 3 .*In Holddown: *Active: 1','wait','Remote in holddown',5) - luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 3 .*In Holddown: *Active: 1','wait','Remote in holddown',5) + luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Remotely: *Active: 4 ", + "wait", + "See registrations L=10 (pre-kill)", + 5, + ) + luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none") + luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none") + luCommand( + "r1", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 kill"', + "", + "none", + "Prefix kill", + ) + luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 1 .* Remotely: *Active: 4 .*In Holddown: *Active: 0", + "wait", + "Registration killed", + 1, + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 2 .* Remotely: *Active: 3 .*In Holddown: *Active: 1", + "wait", + "Remote in holddown", + 5, + ) + luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 2 .* Remotely: *Active: 3 .*In Holddown: *Active: 1", + "wait", + "Remote in holddown", + 5, + ) - luCommand('r3','vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 kill"','', 'none', 'Prefix kill') - luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 1','wait','Registration killed',1) - luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 2 .*In Holddown: *Active: 2','wait','Remote in holddown',5) + luCommand( + "r3", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 kill"', + "", + "none", + "Prefix kill", + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 1", + "wait", + "Registration killed", + 1, + ) + luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 2 .* Remotely: *Active: 2 .*In Holddown: *Active: 2", + "wait", + "Remote in holddown", + 5, + ) - luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 0','wait','Out of holddown',20) - luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 0','wait','Out of holddown') - luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 2 .*In Holddown: *Active: 0','wait','Out of holddown') + luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 0", + "wait", + "Out of holddown", + 20, + ) + luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 0", + "wait", + "Out of holddown", + ) + luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 2 .* Remotely: *Active: 2 .*In Holddown: *Active: 0", + "wait", + "Out of holddown", + ) diff --git a/tests/topotests/bgp_rfapi_basic_sanity/scripts/cleanup_all.py b/tests/topotests/bgp_rfapi_basic_sanity/scripts/cleanup_all.py index e9c1916f75..eea977bfaf 100644 --- a/tests/topotests/bgp_rfapi_basic_sanity/scripts/cleanup_all.py +++ b/tests/topotests/bgp_rfapi_basic_sanity/scripts/cleanup_all.py @@ -1,33 +1,124 @@ from lutil import luCommand -luCommand('r1','vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 11.11.11.0/24"','', 'none', 'Prefix removed') -luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 0 ','wait','Local registration removed') -luCommand('r1','vtysh -c "debug rfapi-dev close vn 10.0.0.1 un 1.1.1.1"','status 0', 'pass', 'Closed RFAPI') -luCommand('r3','vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 22.22.22.0/24"','', 'none', 'Prefix removed') -luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 0 ','wait','Local registration removed') -luCommand('r3','vtysh -c "debug rfapi-dev close vn 10.0.0.2 un 2.2.2.2"','status 0', 'pass', 'Closed RFAPI') +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 11.11.11.0/24"', + "", + "none", + "Prefix removed", +) +luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 ", + "wait", + "Local registration removed", +) +luCommand( + "r1", + 'vtysh -c "debug rfapi-dev close vn 10.0.0.1 un 1.1.1.1"', + "status 0", + "pass", + "Closed RFAPI", +) -luCommand('r4','vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 33.33.33.0/24"','', 'none', 'Prefix removed') -luCommand('r4','vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 11.11.11.0/24"','', 'none', 'MP prefix removed') -luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 0 ','wait','Local registration removed') -#luCommand('r4','vtysh -c "debug rfapi-dev close vn 10.0.0.3 un 3.3.3.3"','status 0', 'pass', 'Closed RFAPI') -luCommand('r4','vtysh -c "clear vnc nve *"','.', 'pass', 'Cleared NVEs') +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 22.22.22.0/24"', + "", + "none", + "Prefix removed", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 ", + "wait", + "Local registration removed", +) +luCommand( + "r3", + 'vtysh -c "debug rfapi-dev close vn 10.0.0.2 un 2.2.2.2"', + "status 0", + "pass", + "Closed RFAPI", +) -luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0','wait','All registrations cleared') -luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0','wait','All registrations cleared') -luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0','wait','All registrations cleared') +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 33.33.33.0/24"', + "", + "none", + "Prefix removed", +) +luCommand( + "r4", + 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 11.11.11.0/24"', + "", + "none", + "MP prefix removed", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 ", + "wait", + "Local registration removed", +) +# luCommand('r4','vtysh -c "debug rfapi-dev close vn 10.0.0.3 un 3.3.3.3"','status 0', 'pass', 'Closed RFAPI') +luCommand("r4", 'vtysh -c "clear vnc nve *"', ".", "pass", "Cleared NVEs") -num = '0 exist' -luCommand('r1','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI clear') -luCommand('r2','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI clear') -luCommand('r3','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI clear') -luCommand('r4','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI clear') +luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 .* Remotely: *Active: 0", + "wait", + "All registrations cleared", +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 .* Remotely: *Active: 0", + "wait", + "All registrations cleared", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 .* Remotely: *Active: 0", + "wait", + "All registrations cleared", +) -luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0','wait','No holddowns',20) -luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0','wait','No holddowns') -luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0','wait','No holddowns') +num = "0 exist" +luCommand("r1", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI clear") +luCommand("r2", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI clear") +luCommand("r3", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI clear") +luCommand("r4", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI clear") -luCommand('r1','vtysh -c "show vnc summary"','.','none') -luCommand('r3','vtysh -c "show vnc summary"','.','none') -luCommand('r4','vtysh -c "show vnc summary"','.','none') +luCommand( + "r1", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0", + "wait", + "No holddowns", + 20, +) +luCommand( + "r3", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0", + "wait", + "No holddowns", +) +luCommand( + "r4", + 'vtysh -c "show vnc registrations"', + "Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0", + "wait", + "No holddowns", +) +luCommand("r1", 'vtysh -c "show vnc summary"', ".", "none") +luCommand("r3", 'vtysh -c "show vnc summary"', ".", "none") +luCommand("r4", 'vtysh -c "show vnc summary"', ".", "none") diff --git a/tests/topotests/bgp_rfapi_basic_sanity/test_bgp_rfapi_basic_sanity.py b/tests/topotests/bgp_rfapi_basic_sanity/test_bgp_rfapi_basic_sanity.py index 0e1f236b7d..cd59bbc395 100755 --- a/tests/topotests/bgp_rfapi_basic_sanity/test_bgp_rfapi_basic_sanity.py +++ b/tests/topotests/bgp_rfapi_basic_sanity/test_bgp_rfapi_basic_sanity.py @@ -25,64 +25,71 @@ import os import sys import pytest -sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')) +sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")) from lib.ltemplate import * + def test_add_routes(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/add_routes.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/add_routes.py", False, CliOnFail, CheckFunc) + def test_adjacencies(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/adjacencies.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/adjacencies.py", False, CliOnFail, CheckFunc) + def test_check_routes(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/check_routes.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/check_routes.py", False, CliOnFail, CheckFunc) + def test_check_close(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/check_close.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/check_close.py", False, CliOnFail, CheckFunc) + def test_check_timeout(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/check_timeout.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/check_timeout.py", False, CliOnFail, CheckFunc) + def test_cleanup_all(): CliOnFail = None # For debugging, uncomment the next line - #CliOnFail = 'tgen.mininet_cli' - CheckFunc = 'ltemplateVersionCheck(\'3.1\')' - #uncomment next line to start cli *before* script is run - #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' - ltemplateTest('scripts/cleanup_all.py', False, CliOnFail, CheckFunc) + # CliOnFail = 'tgen.mininet_cli' + CheckFunc = "ltemplateVersionCheck('3.1')" + # uncomment next line to start cli *before* script is run + # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)' + ltemplateTest("scripts/cleanup_all.py", False, CliOnFail, CheckFunc) + -if __name__ == '__main__': +if __name__ == "__main__": retval = pytest.main(["-s"]) sys.exit(retval) diff --git a/tests/topotests/bgp_rr_ibgp/spine1/show_ip_route.json_ref b/tests/topotests/bgp_rr_ibgp/spine1/show_ip_route.json_ref index 552e96ddb9..75ce1b149e 100644 --- a/tests/topotests/bgp_rr_ibgp/spine1/show_ip_route.json_ref +++ b/tests/topotests/bgp_rr_ibgp/spine1/show_ip_route.json_ref @@ -11,15 +11,12 @@ "table":254, "internalStatus":16, "internalFlags":13, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, "nexthops":[ { "flags":3, "fib":true, "ip":"192.168.2.1", "afi":"ipv4", - "interfaceIndex":2, "interfaceName":"spine1-eth0", "active":true } @@ -38,14 +35,11 @@ "table":254, "internalStatus":16, "internalFlags":8, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, "nexthops":[ { "flags":3, "fib":true, "directlyConnected":true, - "interfaceIndex":2, "interfaceName":"spine1-eth0", "active":true } @@ -64,15 +58,12 @@ "table":254, "internalStatus":16, "internalFlags":13, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, "nexthops":[ { "flags":3, "fib":true, "ip":"192.168.4.2", "afi":"ipv4", - "interfaceIndex":3, "interfaceName":"spine1-eth1", "active":true } @@ -91,23 +82,20 @@ "table":254, "internalStatus":16, "internalFlags":8, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, "nexthops":[ { "flags":3, "fib":true, "directlyConnected":true, - "interfaceIndex":3, "interfaceName":"spine1-eth1", "active":true } ] } ], - "192.168.5.0\/24":[ + "192.168.5.1\/32":[ { - "prefix":"192.168.5.0\/24", + "prefix":"192.168.5.1\/32", "protocol":"bgp", "selected":true, "destSelected":true, @@ -117,24 +105,21 @@ "table":254, "internalStatus":16, "internalFlags":13, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, "nexthops":[ { "flags":3, "fib":true, "ip":"192.168.2.1", "afi":"ipv4", - "interfaceIndex":2, "interfaceName":"spine1-eth0", "active":true } ] } ], - "192.168.6.0\/24":[ + "192.168.6.2\/32":[ { - "prefix":"192.168.6.0\/24", + "prefix":"192.168.6.2\/32", "protocol":"bgp", "selected":true, "destSelected":true, @@ -144,19 +129,16 @@ "table":254, "internalStatus":16, "internalFlags":13, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, "nexthops":[ { "flags":3, "fib":true, "ip":"192.168.4.2", "afi":"ipv4", - "interfaceIndex":3, "interfaceName":"spine1-eth1", "active":true } ] } ] -} +}
\ No newline at end of file diff --git a/tests/topotests/bgp_rr_ibgp/spine2/bgpd.conf b/tests/topotests/bgp_rr_ibgp/spine2/bgpd.conf deleted file mode 100644 index a865b388ac..0000000000 --- a/tests/topotests/bgp_rr_ibgp/spine2/bgpd.conf +++ /dev/null @@ -1,8 +0,0 @@ -hostname spine2 -router bgp 99 - neighbor 192.168.5.1 remote-as internal - neighbor 192.168.6.2 remote-as internal - address-family ipv4 uni - redistribute connected - neighbor 192.168.5.1 route-reflector-client - neighbor 192.168.6.2 route-reflector-client diff --git a/tests/topotests/bgp_rr_ibgp/spine2/show_ip_route.json_ref b/tests/topotests/bgp_rr_ibgp/spine2/show_ip_route.json_ref deleted file mode 100644 index c428a8832f..0000000000 --- a/tests/topotests/bgp_rr_ibgp/spine2/show_ip_route.json_ref +++ /dev/null @@ -1,162 +0,0 @@ -{ - "192.168.1.0\/24":[ - { - "prefix":"192.168.1.0\/24", - "protocol":"bgp", - "selected":true, - "destSelected":true, - "distance":200, - "metric":0, - "installed":true, - "table":254, - "internalStatus":16, - "internalFlags":13, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, - "nexthops":[ - { - "flags":3, - "fib":true, - "ip":"192.168.5.1", - "afi":"ipv4", - "interfaceIndex":2, - "interfaceName":"spine2-eth0", - "active":true - } - ] - } - ], - "192.168.2.0\/24":[ - { - "prefix":"192.168.2.0\/24", - "protocol":"bgp", - "selected":true, - "destSelected":true, - "distance":200, - "metric":0, - "installed":true, - "table":254, - "internalStatus":16, - "internalFlags":13, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, - "nexthops":[ - { - "flags":3, - "fib":true, - "ip":"192.168.5.1", - "afi":"ipv4", - "interfaceIndex":2, - "interfaceName":"spine2-eth0", - "active":true - } - ] - } - ], - "192.168.3.0\/24":[ - { - "prefix":"192.168.3.0\/24", - "protocol":"bgp", - "selected":true, - "destSelected":true, - "distance":200, - "metric":0, - "installed":true, - "table":254, - "internalStatus":16, - "internalFlags":13, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, - "nexthops":[ - { - "flags":3, - "fib":true, - "ip":"192.168.6.2", - "afi":"ipv4", - "interfaceIndex":3, - "interfaceName":"spine2-eth1", - "active":true - } - ] - } - ], - "192.168.4.0\/24":[ - { - "prefix":"192.168.4.0\/24", - "protocol":"bgp", - "selected":true, - "destSelected":true, - "distance":200, - "metric":0, - "installed":true, - "table":254, - "internalStatus":16, - "internalFlags":13, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, - "nexthops":[ - { - "flags":3, - "fib":true, - "ip":"192.168.6.2", - "afi":"ipv4", - "interfaceIndex":3, - "interfaceName":"spine2-eth1", - "active":true - } - ] - } - ], - "192.168.5.0\/24":[ - { - "prefix":"192.168.5.0\/24", - "protocol":"connected", - "selected":true, - "destSelected":true, - "distance":0, - "metric":0, - "installed":true, - "table":254, - "internalStatus":16, - "internalFlags":8, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, - "nexthops":[ - { - "flags":3, - "fib":true, - "directlyConnected":true, - "interfaceIndex":2, - "interfaceName":"spine2-eth0", - "active":true - } - ] - } - ], - "192.168.6.0\/24":[ - { - "prefix":"192.168.6.0\/24", - "protocol":"connected", - "selected":true, - "destSelected":true, - "distance":0, - "metric":0, - "installed":true, - "table":254, - "internalStatus":16, - "internalFlags":8, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, - "nexthops":[ - { - "flags":3, - "fib":true, - "directlyConnected":true, - "interfaceIndex":3, - "interfaceName":"spine2-eth1", - "active":true - } - ] - } - ] -} diff --git a/tests/topotests/bgp_rr_ibgp/spine2/staticd.conf b/tests/topotests/bgp_rr_ibgp/spine2/staticd.conf deleted file mode 100644 index 3ee14d262c..0000000000 --- a/tests/topotests/bgp_rr_ibgp/spine2/staticd.conf +++ /dev/null @@ -1 +0,0 @@ -hostname spine2 diff --git a/tests/topotests/bgp_rr_ibgp/spine2/zebra.conf b/tests/topotests/bgp_rr_ibgp/spine2/zebra.conf deleted file mode 100644 index a06681fbc4..0000000000 --- a/tests/topotests/bgp_rr_ibgp/spine2/zebra.conf +++ /dev/null @@ -1,9 +0,0 @@ -hostname spine2 -ip forwarding -ipv6 forwarding - -int spine2-eth0 - ip addr 192.168.5.4/24 - -int spine2-eth1 - ip addr 192.168.6.4/24 diff --git a/tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py b/tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py index c28394a7a7..da45e73ab4 100755 --- a/tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py +++ b/tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py @@ -25,47 +25,8 @@ """ test_bgp_rr_ibgp_topo1.py: Testing IBGP with RR and no IGP - - In a leaf/spine topology with only IBGP connections, where - the same network is being redistributed at multiple points - in the network ( say a redistribute connected at both leaf and spines ) - we end up in a state where zebra gets very confused. - - eva# show ip route - Codes: K - kernel route, C - connected, S - static, R - RIP, - O - OSPF, I - IS-IS, B - BGP, E - EIGRP, N - NHRP, - T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP, - F - PBR, f - OpenFabric, - > - selected route, * - FIB route, q - queued route, r - rejected route - - C>* 192.168.1.0/24 is directly connected, tor1-eth0, 00:00:30 - C>* 192.168.2.0/24 is directly connected, tor1-eth1, 00:00:30 - B 192.168.3.0/24 [200/0] via 192.168.4.2 inactive, 00:00:25 - via 192.168.6.2 inactive, 00:00:25 - B>* 192.168.4.0/24 [200/0] via 192.168.2.3, tor1-eth1, 00:00:25 - * via 192.168.6.2 inactive, 00:00:25 - C>* 192.168.5.0/24 is directly connected, tor1-eth2, 00:00:30 - B>* 192.168.6.0/24 [200/0] via 192.168.4.2 inactive, 00:00:25 - * via 192.168.5.4, tor1-eth2, 00:00:25 - - Effectively we have ibgp routes recursing through ibgp routes - and there is no metric to discern whom to listen to. - - This draft: - https://tools.ietf.org/html/draft-ietf-idr-bgp-optimal-route-reflection-19 - - appears to address this issue. From looking at both cisco and arista - deployments they are handling this issue by having the route reflector - prefer the localy learned routes over from their clients. - - Add this topology, in a broken state, so that when we do fix this issue - it is a simple matter of touching this topology up and re-adding it - to the normal daily builds. I also wanted to add this topology - since it is in a state of `doneness` and I wanted to move onto - my normal day job without having to remember about this test. - - This topology is not configured to be run as part of the normal - topotests. +Ensure that a basic rr topology comes up and correctly passes +routes around """ @@ -77,7 +38,7 @@ import json # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -94,6 +55,7 @@ from mininet.topo import Topo ## ##################################################### + class NetworkTopo(Topo): "BGP_RR_IBGP Topology 1" @@ -102,41 +64,31 @@ class NetworkTopo(Topo): tgen = get_topogen(self) - tgen.add_router('tor1') - tgen.add_router('tor2') - tgen.add_router('spine1') - tgen.add_router('spine2') + tgen.add_router("tor1") + tgen.add_router("tor2") + tgen.add_router("spine1") # First switch is for a dummy interface (for local network) # on tor1 - # 192.168.1.0/24 - switch = tgen.add_switch('sw1') - switch.add_link(tgen.gears['tor1']) + # 192.168.1.0/24 + switch = tgen.add_switch("sw1") + switch.add_link(tgen.gears["tor1"]) - # 192.168.2.0/24 - tor1 <-> spine1 connection - switch = tgen.add_switch('sw2') - switch.add_link(tgen.gears['tor1']) - switch.add_link(tgen.gears['spine1']) + # 192.168.2.0/24 - tor1 <-> spine1 connection + switch = tgen.add_switch("sw2") + switch.add_link(tgen.gears["tor1"]) + switch.add_link(tgen.gears["spine1"]) # 3rd switch is for a dummy interface (for local netwokr) - # 192.168.3.0/24 - tor2 - switch = tgen.add_switch('sw3') - switch.add_link(tgen.gears['tor2']) + # 192.168.3.0/24 - tor2 + switch = tgen.add_switch("sw3") + switch.add_link(tgen.gears["tor2"]) - # 192.168.4.0/24 - tor2 <-> spine1 connection - switch = tgen.add_switch('sw4') - switch.add_link(tgen.gears['tor2']) - switch.add_link(tgen.gears['spine1']) + # 192.168.4.0/24 - tor2 <-> spine1 connection + switch = tgen.add_switch("sw4") + switch.add_link(tgen.gears["tor2"]) + switch.add_link(tgen.gears["spine1"]) - # 192.168.5.0/24 - tor1 <-> spine2 connection - switch = tgen.add_switch('sw5') - switch.add_link(tgen.gears['tor1']) - switch.add_link(tgen.gears['spine2']) - - # 192.168.6.0/24 - tor2 <-> spine2 connection - switch = tgen.add_switch('sw6') - switch.add_link(tgen.gears['tor2']) - switch.add_link(tgen.gears['spine2']) ##################################################### ## @@ -144,6 +96,7 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(module): "Setup topology" tgen = Topogen(NetworkTopo, module.__name__) @@ -153,12 +106,10 @@ def setup_module(module): router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() @@ -181,7 +132,7 @@ def test_converge_protocols(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - topotest.sleep(5, 'Waiting for BGP_RR_IBGP convergence') + topotest.sleep(5, "Waiting for BGP_RR_IBGP convergence") def test_bgp_rr_ibgp_routes(): @@ -195,6 +146,7 @@ def test_bgp_rr_ibgp_routes(): # Verify BGP_RR_IBGP Status logger.info("Verifying BGP_RR_IBGP routes") + def test_zebra_ipv4_routingTable(): "Test 'show ip route'" @@ -206,16 +158,19 @@ def test_zebra_ipv4_routingTable(): failures = 0 router_list = tgen.routers().values() for router in router_list: - output = router.vtysh_cmd('show ip route json', isjson=True) - refTableFile = '{}/{}/show_ip_route.json_ref'.format(CWD, router.name) + output = router.vtysh_cmd("show ip route json", isjson=True) + refTableFile = "{}/{}/show_ip_route.json_ref".format(CWD, router.name) expected = json.loads(open(refTableFile).read()) - assertmsg = 'Zebra IPv4 Routing Table verification failed for router {}'.format(router.name) + assertmsg = "Zebra IPv4 Routing Table verification failed for router {}".format( + router.name + ) assert topotest.json_cmp(output, expected) is None, assertmsg + def test_shutdown_check_stderr(): - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - pytest.skip('Skipping test for Stderr output and memory leaks') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + pytest.skip("Skipping test for Stderr output and memory leaks") tgen = get_topogen() # Don't run this test if we have any failure. @@ -228,15 +183,15 @@ def test_shutdown_check_stderr(): for router in router_list: router.stop() - log = tgen.net[router.name].getStdErr('bgpd') + log = tgen.net[router.name].getStdErr("bgpd") if log: - logger.error('BGPd StdErr Log:' + log) - log = tgen.net[router.name].getStdErr('zebra') + logger.error("BGPd StdErr Log:" + log) + log = tgen.net[router.name].getStdErr("zebra") if log: - logger.error('Zebra StdErr Log:' + log) + logger.error("Zebra StdErr Log:" + log) -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_rr_ibgp/tor1/bgpd.conf b/tests/topotests/bgp_rr_ibgp/tor1/bgpd.conf index 44a78dffd7..e8ec0f7680 100644 --- a/tests/topotests/bgp_rr_ibgp/tor1/bgpd.conf +++ b/tests/topotests/bgp_rr_ibgp/tor1/bgpd.conf @@ -1,5 +1,4 @@ hostname tor1 router bgp 99 neighbor 192.168.2.3 remote-as internal - neighbor 192.168.5.4 remote-as internal redistribute connected diff --git a/tests/topotests/bgp_rr_ibgp/tor1/show_ip_route.json_ref b/tests/topotests/bgp_rr_ibgp/tor1/show_ip_route.json_ref index 223dcebbca..6cfa02441f 100644 --- a/tests/topotests/bgp_rr_ibgp/tor1/show_ip_route.json_ref +++ b/tests/topotests/bgp_rr_ibgp/tor1/show_ip_route.json_ref @@ -11,14 +11,11 @@ "table":254, "internalStatus":16, "internalFlags":8, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, "nexthops":[ { "flags":3, "fib":true, "directlyConnected":true, - "interfaceIndex":2, "interfaceName":"tor1-eth0", "active":true } @@ -37,14 +34,11 @@ "table":254, "internalStatus":16, "internalFlags":8, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, "nexthops":[ { "flags":3, "fib":true, "directlyConnected":true, - "interfaceIndex":3, "interfaceName":"tor1-eth1", "active":true } @@ -55,23 +49,29 @@ { "prefix":"192.168.3.0\/24", "protocol":"bgp", + "selected":true, + "destSelected":true, "distance":200, "metric":0, + "installed":true, "table":254, - "internalStatus":0, - "internalFlags":5, - "internalNextHopNum":2, - "internalNextHopActiveNum":0, + "internalStatus":16, + "internalFlags":13, "nexthops":[ { - "flags":0, + "flags":5, "ip":"192.168.4.2", - "afi":"ipv4" + "afi":"ipv4", + "active":true, + "recursive":true }, { - "flags":0, - "ip":"192.168.6.2", - "afi":"ipv4" + "flags":3, + "fib":true, + "ip":"192.168.2.3", + "afi":"ipv4", + "interfaceName":"tor1-eth1", + "active":true } ] } @@ -88,29 +88,21 @@ "table":254, "internalStatus":16, "internalFlags":13, - "internalNextHopNum":2, - "internalNextHopActiveNum":1, "nexthops":[ { "flags":3, "fib":true, "ip":"192.168.2.3", "afi":"ipv4", - "interfaceIndex":3, "interfaceName":"tor1-eth1", "active":true - }, - { - "flags":0, - "ip":"192.168.6.2", - "afi":"ipv4" } ] } ], - "192.168.5.0\/24":[ + "192.168.5.1\/32":[ { - "prefix":"192.168.5.0\/24", + "prefix":"192.168.5.1\/32", "protocol":"connected", "selected":true, "destSelected":true, @@ -120,23 +112,20 @@ "table":254, "internalStatus":16, "internalFlags":8, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, "nexthops":[ { "flags":3, "fib":true, "directlyConnected":true, - "interfaceIndex":4, - "interfaceName":"tor1-eth2", + "interfaceName":"lo", "active":true } ] } ], - "192.168.6.0\/24":[ + "192.168.6.2\/32":[ { - "prefix":"192.168.6.0\/24", + "prefix":"192.168.6.2\/32", "protocol":"bgp", "selected":true, "destSelected":true, @@ -146,21 +135,20 @@ "table":254, "internalStatus":16, "internalFlags":13, - "internalNextHopNum":2, - "internalNextHopActiveNum":1, "nexthops":[ { - "flags":0, + "flags":5, "ip":"192.168.4.2", - "afi":"ipv4" + "afi":"ipv4", + "active":true, + "recursive":true }, { "flags":3, "fib":true, - "ip":"192.168.5.4", + "ip":"192.168.2.3", "afi":"ipv4", - "interfaceIndex":4, - "interfaceName":"tor1-eth2", + "interfaceName":"tor1-eth1", "active":true } ] diff --git a/tests/topotests/bgp_rr_ibgp/tor1/zebra.conf b/tests/topotests/bgp_rr_ibgp/tor1/zebra.conf index f2fa713507..25b4fcfd0f 100644 --- a/tests/topotests/bgp_rr_ibgp/tor1/zebra.conf +++ b/tests/topotests/bgp_rr_ibgp/tor1/zebra.conf @@ -8,5 +8,5 @@ int tor1-eth0 int tor1-eth1 ip addr 192.168.2.1/24 -int tor1-eth2 - ip addr 192.168.5.1/24 +int lo + ip addr 192.168.5.1/32 diff --git a/tests/topotests/bgp_rr_ibgp/tor2/bgpd.conf b/tests/topotests/bgp_rr_ibgp/tor2/bgpd.conf index 5ef1de260e..b091c97ac3 100644 --- a/tests/topotests/bgp_rr_ibgp/tor2/bgpd.conf +++ b/tests/topotests/bgp_rr_ibgp/tor2/bgpd.conf @@ -1,5 +1,4 @@ hostname tor2 router bgp 99 neighbor 192.168.4.3 remote-as internal - neighbor 192.168.6.4 remote-as internal redistribute connected diff --git a/tests/topotests/bgp_rr_ibgp/tor2/show_ip_route.json_ref b/tests/topotests/bgp_rr_ibgp/tor2/show_ip_route.json_ref index 5f041b8c62..d9e9290e61 100644 --- a/tests/topotests/bgp_rr_ibgp/tor2/show_ip_route.json_ref +++ b/tests/topotests/bgp_rr_ibgp/tor2/show_ip_route.json_ref @@ -3,23 +3,29 @@ { "prefix":"192.168.1.0\/24", "protocol":"bgp", + "selected":true, + "destSelected":true, "distance":200, "metric":0, + "installed":true, "table":254, - "internalStatus":0, - "internalFlags":5, - "internalNextHopNum":2, - "internalNextHopActiveNum":0, + "internalStatus":16, + "internalFlags":13, "nexthops":[ { - "flags":0, + "flags":5, "ip":"192.168.2.1", - "afi":"ipv4" + "afi":"ipv4", + "active":true, + "recursive":true }, { - "flags":0, - "ip":"192.168.5.1", - "afi":"ipv4" + "flags":3, + "fib":true, + "ip":"192.168.4.3", + "afi":"ipv4", + "interfaceName":"tor2-eth1", + "active":true } ] } @@ -36,22 +42,14 @@ "table":254, "internalStatus":16, "internalFlags":13, - "internalNextHopNum":2, - "internalNextHopActiveNum":1, "nexthops":[ { "flags":3, "fib":true, "ip":"192.168.4.3", "afi":"ipv4", - "interfaceIndex":3, "interfaceName":"tor2-eth1", "active":true - }, - { - "flags":0, - "ip":"192.168.5.1", - "afi":"ipv4" } ] } @@ -68,14 +66,11 @@ "table":254, "internalStatus":16, "internalFlags":8, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, "nexthops":[ { "flags":3, "fib":true, "directlyConnected":true, - "interfaceIndex":2, "interfaceName":"tor2-eth0", "active":true } @@ -94,23 +89,20 @@ "table":254, "internalStatus":16, "internalFlags":8, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, "nexthops":[ { "flags":3, "fib":true, "directlyConnected":true, - "interfaceIndex":3, "interfaceName":"tor2-eth1", "active":true } ] } ], - "192.168.5.0\/24":[ + "192.168.5.1\/32":[ { - "prefix":"192.168.5.0\/24", + "prefix":"192.168.5.1\/32", "protocol":"bgp", "selected":true, "destSelected":true, @@ -120,29 +112,28 @@ "table":254, "internalStatus":16, "internalFlags":13, - "internalNextHopNum":2, - "internalNextHopActiveNum":1, "nexthops":[ { - "flags":0, + "flags":5, "ip":"192.168.2.1", - "afi":"ipv4" + "afi":"ipv4", + "active":true, + "recursive":true }, { "flags":3, "fib":true, - "ip":"192.168.6.4", + "ip":"192.168.4.3", "afi":"ipv4", - "interfaceIndex":4, - "interfaceName":"tor2-eth2", + "interfaceName":"tor2-eth1", "active":true } ] } ], - "192.168.6.0\/24":[ + "192.168.6.2\/32":[ { - "prefix":"192.168.6.0\/24", + "prefix":"192.168.6.2\/32", "protocol":"connected", "selected":true, "destSelected":true, @@ -152,15 +143,12 @@ "table":254, "internalStatus":16, "internalFlags":8, - "internalNextHopNum":1, - "internalNextHopActiveNum":1, "nexthops":[ { "flags":3, "fib":true, "directlyConnected":true, - "interfaceIndex":4, - "interfaceName":"tor2-eth2", + "interfaceName":"lo", "active":true } ] diff --git a/tests/topotests/bgp_rr_ibgp/tor2/zebra.conf b/tests/topotests/bgp_rr_ibgp/tor2/zebra.conf index 3318cbb196..e1a06b14fc 100644 --- a/tests/topotests/bgp_rr_ibgp/tor2/zebra.conf +++ b/tests/topotests/bgp_rr_ibgp/tor2/zebra.conf @@ -9,5 +9,5 @@ int tor2-eth1 ip addr 192.168.4.2/24 -int tor2-eth2 - ip addr 192.168.6.2/24 +int lo + ip addr 192.168.6.2/32 diff --git a/tests/topotests/bgp_sender-as-path-loop-detection/test_bgp_sender-as-path-loop-detection.py b/tests/topotests/bgp_sender-as-path-loop-detection/test_bgp_sender-as-path-loop-detection.py index 708464864a..56a98c1ef8 100644 --- a/tests/topotests/bgp_sender-as-path-loop-detection/test_bgp_sender-as-path-loop-detection.py +++ b/tests/topotests/bgp_sender-as-path-loop-detection/test_bgp_sender-as-path-loop-detection.py @@ -35,7 +35,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -43,20 +43,22 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 4): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -66,38 +68,34 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_sender_as_path_loop_detection(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r2'] + router = tgen.gears["r2"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.2 json")) expected = { - '192.168.255.2': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 2 - } - } + "192.168.255.2": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}}, } } return topotest.json_cmp(output, expected) @@ -105,19 +103,11 @@ def test_bgp_sender_as_path_loop_detection(): def _bgp_has_route_from_r1(router): output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.254/32 json")) expected = { - 'paths': [ + "paths": [ { - 'aspath': { - 'segments': [ - { - 'type': 'as-sequence', - 'list': [ - 65001, - 65003 - ] - } - ], - 'length': 2 + "aspath": { + "segments": [{"type": "as-sequence", "list": [65001, 65003]}], + "length": 2, } } ] @@ -125,10 +115,12 @@ def test_bgp_sender_as_path_loop_detection(): return topotest.json_cmp(output, expected) def _bgp_suppress_route_to_r3(router): - output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.254.2 advertised-routes json")) - expected = { - 'totalPrefixCounter': 0 - } + output = json.loads( + router.vtysh_cmd( + "show ip bgp neighbor 192.168.254.2 advertised-routes json" + ) + ) + expected = {"totalPrefixCounter": 0} return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_converge, router) @@ -144,8 +136,11 @@ def test_bgp_sender_as_path_loop_detection(): test_func = functools.partial(_bgp_suppress_route_to_r3, router) success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert result is None, 'Route 172.16.255.254/32 should not be sent to r3 "{}"'.format(router) + assert ( + result is None + ), 'Route 172.16.255.254/32 should not be sent to r3 "{}"'.format(router) + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_set_local-preference_add_subtract/test_bgp_set_local-preference_add_subtract.py b/tests/topotests/bgp_set_local-preference_add_subtract/test_bgp_set_local-preference_add_subtract.py index 09e195e22d..ce3165db25 100644 --- a/tests/topotests/bgp_set_local-preference_add_subtract/test_bgp_set_local-preference_add_subtract.py +++ b/tests/topotests/bgp_set_local-preference_add_subtract/test_bgp_set_local-preference_add_subtract.py @@ -36,7 +36,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -44,17 +44,19 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 4): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -64,70 +66,48 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_set_local_preference(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r1'] + router = tgen.gears["r1"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor json")) expected = { - '192.168.255.2': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 2 - } - } + "192.168.255.2": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}}, + }, + "192.168.255.3": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}}, }, - '192.168.255.3': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 2 - } - } - } } return topotest.json_cmp(output, expected) def _bgp_check_local_preference(router): output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.254/32 json")) expected = { - 'paths': [ - { - 'localpref': 50, - 'nexthops': [ - { - 'ip': '192.168.255.3' - } - ] - }, - { - 'localpref': 150, - 'nexthops': [ - { - 'ip': '192.168.255.2' - } - ] - } + "paths": [ + {"locPrf": 50, "nexthops": [{"ip": "192.168.255.3"}]}, + {"locPrf": 150, "nexthops": [{"ip": "192.168.255.2"}]}, ] } return topotest.json_cmp(output, expected) @@ -140,8 +120,11 @@ def test_bgp_set_local_preference(): test_func = functools.partial(_bgp_check_local_preference, router) success, result = topotest.run_and_expect(test_func, None, count=15, wait=0.5) - assert result is None, 'Failed to see applied BGP local-preference in "{}"'.format(router) + assert result is None, 'Failed to see applied BGP local-preference in "{}"'.format( + router + ) + -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/test_bgp_show_ip_bgp_fqdn.py b/tests/topotests/bgp_show_ip_bgp_fqdn/test_bgp_show_ip_bgp_fqdn.py index f5119468e0..c41ae7cb2a 100644 --- a/tests/topotests/bgp_show_ip_bgp_fqdn/test_bgp_show_ip_bgp_fqdn.py +++ b/tests/topotests/bgp_show_ip_bgp_fqdn/test_bgp_show_ip_bgp_fqdn.py @@ -36,7 +36,7 @@ import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -44,16 +44,18 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) @@ -63,46 +65,42 @@ def setup_module(mod): for i, (rname, router) in enumerate(router_list.iteritems(), 1): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_bgp_show_ip_bgp_hostname(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears['r2'] + router = tgen.gears["r2"] def _bgp_converge(router): output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) expected = { - '192.168.255.1': { - 'bgpState': 'Established', - 'addressFamilyInfo': { - 'ipv4Unicast': { - 'acceptedPrefixCounter': 2 - } - } + "192.168.255.1": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}}, } } return topotest.json_cmp(output, expected) def _bgp_show_nexthop_hostname_and_ip(router): output = json.loads(router.vtysh_cmd("show ip bgp json")) - for nh in output['routes']['172.16.255.253/32'][0]['nexthops']: - if 'hostname' in nh and 'ip' in nh: + for nh in output["routes"]["172.16.255.253/32"][0]["nexthops"]: + if "hostname" in nh and "ip" in nh: return True return False @@ -112,6 +110,7 @@ def test_bgp_show_ip_bgp_hostname(): assert result is None, 'Failed bgp convergence in "{}"'.format(router) assert _bgp_show_nexthop_hostname_and_ip(router) == True -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py b/tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py index 2944b3729c..5d8c80c6a2 100644 --- a/tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py +++ b/tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py @@ -35,7 +35,7 @@ import platform # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -49,17 +49,19 @@ from mininet.topo import Topo class BGPIPV6RTADVVRFTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 2 routers. - tgen.add_router('r1') - tgen.add_router('r2') + tgen.add_router("r1") + tgen.add_router("r2") + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) def setup_module(mod): "Sets up the pytest environment" @@ -68,52 +70,57 @@ def setup_module(mod): router_list = tgen.routers() - logger.info('Testing with VRF Lite support') + logger.info("Testing with VRF Lite support") krel = platform.release() # May need to adjust handling of vrf traffic depending on kernel version l3mdev_accept = 0 - if topotest.version_cmp(krel, '4.15') >= 0 and \ - topotest.version_cmp(krel, '4.18') <= 0: + if ( + topotest.version_cmp(krel, "4.15") >= 0 + and topotest.version_cmp(krel, "4.18") <= 0 + ): l3mdev_accept = 1 - if topotest.version_cmp(krel, '5.0') >= 0: + if topotest.version_cmp(krel, "5.0") >= 0: l3mdev_accept = 1 - logger.info('krel \'{0}\' setting net.ipv4.tcp_l3mdev_accept={1}'.format( - krel, l3mdev_accept)) + logger.info( + "krel '{0}' setting net.ipv4.tcp_l3mdev_accept={1}".format(krel, l3mdev_accept) + ) - cmds = ['ip link add {0}-cust1 type vrf table 1001', - 'ip link add loop1 type dummy', - 'ip link set loop1 master {0}-cust1', - 'ip link set {0}-eth0 master {0}-cust1'] + cmds = [ + "ip link add {0}-cust1 type vrf table 1001", + "ip link add loop1 type dummy", + "ip link set loop1 master {0}-cust1", + "ip link set {0}-eth0 master {0}-cust1", + ] for rname, router in router_list.iteritems(): for cmd in cmds: output = tgen.net[rname].cmd(cmd.format(rname)) - output = tgen.net[rname].cmd('sysctl -n net.ipv4.tcp_l3mdev_accept') + output = tgen.net[rname].cmd("sysctl -n net.ipv4.tcp_l3mdev_accept") logger.info( - 'router {0}: existing tcp_l3mdev_accept was {1}'.format( - rname, output)) + "router {0}: existing tcp_l3mdev_accept was {1}".format(rname, output) + ) if l3mdev_accept: output = tgen.net[rname].cmd( - 'sysctl -w net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept)) + "sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept) + ) for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) # Initialize all routers. tgen.start_router() + def teardown_module(_mod): "Teardown the pytest environment" tgen = get_topogen() @@ -134,44 +141,51 @@ def test_protocols_convergence(): logger.info("Checking IPv4 routes for convergence") for router in tgen.routers().values(): - json_file = '{}/{}/ipv4_routes.json'.format(CWD, router.name) + json_file = "{}/{}/ipv4_routes.json".format(CWD, router.name) if not os.path.isfile(json_file): - logger.info('skipping file {}'.format(json_file)) + logger.info("skipping file {}".format(json_file)) continue expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ip route vrf {}-cust1 json'.format(router.name), expected) - _, result = topotest.run_and_expect(test_func, None, count=160, - wait=0.5) + test_func = partial( + topotest.router_json_cmp, + router, + "show ip route vrf {}-cust1 json".format(router.name), + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg # Check IPv6 routing tables. logger.info("Checking IPv6 routes for convergence") for router in tgen.routers().values(): - json_file = '{}/{}/ipv6_routes.json'.format(CWD, router.name) + json_file = "{}/{}/ipv6_routes.json".format(CWD, router.name) if not os.path.isfile(json_file): - logger.info('skipping file {}'.format(json_file)) + logger.info("skipping file {}".format(json_file)) continue expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ipv6 route vrf {}-cust1 json'.format(router.name), expected) - _, result = topotest.run_and_expect(test_func, None, count=160, - wait=0.5) + test_func = partial( + topotest.router_json_cmp, + router, + "show ipv6 route vrf {}-cust1 json".format(router.name), + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg + def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_vrf_netns/peer1/exa-receive.py b/tests/topotests/bgp_vrf_netns/peer1/exa-receive.py index 5334ea5369..031ff455ca 100755 --- a/tests/topotests/bgp_vrf_netns/peer1/exa-receive.py +++ b/tests/topotests/bgp_vrf_netns/peer1/exa-receive.py @@ -4,7 +4,7 @@ exa-receive.py: Save received routes form ExaBGP into file """ -from sys import stdin,argv +from sys import stdin, argv from datetime import datetime # 1st arg is peer number @@ -13,12 +13,12 @@ peer = int(argv[1]) # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 -routesavefile = open('/tmp/peer%s-received.log' % peer, 'w') +routesavefile = open("/tmp/peer%s-received.log" % peer, "w") while True: try: line = stdin.readline() - timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ') + timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ") routesavefile.write(timestamp + line) routesavefile.flush() diff --git a/tests/topotests/bgp_vrf_netns/peer1/exa-send.py b/tests/topotests/bgp_vrf_netns/peer1/exa-send.py index 9a2a201c57..9279cc45ff 100755 --- a/tests/topotests/bgp_vrf_netns/peer1/exa-send.py +++ b/tests/topotests/bgp_vrf_netns/peer1/exa-send.py @@ -4,7 +4,7 @@ exa-send.py: Send a few testroutes with ExaBGP """ -from sys import stdout,argv +from sys import stdout, argv from time import sleep sleep(5) @@ -17,10 +17,12 @@ asnum = 99 # Announce numRoutes equal routes per PE - different neighbor AS for i in range(0, numRoutes): - stdout.write('announce route 10.201.%s.0/24 med 100 community %i:1 next-hop 10.0.%i.%i\n' % (i, i, (((peer-1) / 5) + 1), peer+100)) + stdout.write( + "announce route 10.201.%s.0/24 med 100 community %i:1 next-hop 10.0.%i.%i\n" + % (i, i, (((peer - 1) / 5) + 1), peer + 100) + ) stdout.flush() -#Loop endlessly to allow ExaBGP to continue running +# Loop endlessly to allow ExaBGP to continue running while True: sleep(1) - diff --git a/tests/topotests/bgp_vrf_netns/r1/summary.txt b/tests/topotests/bgp_vrf_netns/r1/summary.txt index 7473fa2151..1a079ff130 100644 --- a/tests/topotests/bgp_vrf_netns/r1/summary.txt +++ b/tests/topotests/bgp_vrf_netns/r1/summary.txt @@ -8,7 +8,7 @@ "10.0.1.101":{ "outq":0, "inq":0, - "prefixReceivedCount":10, + "pfxRcd":10, "state":"Established" } }, diff --git a/tests/topotests/bgp_vrf_netns/r1/summary20.txt b/tests/topotests/bgp_vrf_netns/r1/summary20.txt index 18318e07a8..2b5787e6da 100644 --- a/tests/topotests/bgp_vrf_netns/r1/summary20.txt +++ b/tests/topotests/bgp_vrf_netns/r1/summary20.txt @@ -7,7 +7,7 @@ "10.0.1.101":{ "outq":0, "inq":0, - "prefixReceivedCount":10, + "pfxRcd":10, "state":"Established" } }, diff --git a/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py b/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py index a5590bcaf6..ae48f01a0e 100755 --- a/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py +++ b/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py @@ -33,7 +33,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -53,6 +53,7 @@ CustomizeVrfWithNetns = True ## ##################################################### + class BGPVRFNETNSTopo1(Topo): "BGP EBGP VRF NETNS Topology 1" @@ -60,18 +61,17 @@ class BGPVRFNETNSTopo1(Topo): tgen = get_topogen(self) # Setup Routers - tgen.add_router('r1') + tgen.add_router("r1") # Setup Switches - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) # Add eBGP ExaBGP neighbors - peer_ip = '10.0.1.101' - peer_route = 'via 10.0.1.1' - peer = tgen.add_exabgp_peer('peer1', - ip=peer_ip, defaultRoute=peer_route) - switch = tgen.gears['s1'] + peer_ip = "10.0.1.101" + peer_route = "via 10.0.1.1" + peer = tgen.add_exabgp_peer("peer1", ip=peer_ip, defaultRoute=peer_route) + switch = tgen.gears["s1"] switch.add_link(peer) @@ -81,74 +81,87 @@ class BGPVRFNETNSTopo1(Topo): ## ##################################################### + def setup_module(module): tgen = Topogen(BGPVRFNETNSTopo1, module.__name__) tgen.start_topology() # Get r1 reference - router = tgen.gears['r1'] + router = tgen.gears["r1"] # check for zebra capability if CustomizeVrfWithNetns == True: - if router.check_capability( - TopoRouter.RD_ZEBRA, - '--vrfwnetns' - ) == False: - return pytest.skip('Skipping BGP VRF NETNS Test. VRF NETNS backend not available on FRR') - if os.system('ip netns list') != 0: - return pytest.skip('Skipping BGP VRF NETNS Test. NETNS not available on System') + if router.check_capability(TopoRouter.RD_ZEBRA, "--vrfwnetns") == False: + return pytest.skip( + "Skipping BGP VRF NETNS Test. VRF NETNS backend not available on FRR" + ) + if os.system("ip netns list") != 0: + return pytest.skip( + "Skipping BGP VRF NETNS Test. NETNS not available on System" + ) # retrieve VRF backend kind if CustomizeVrfWithNetns == True: - logger.info('Testing with VRF Namespace support') + logger.info("Testing with VRF Namespace support") # create VRF r1-cust1 # move r1-eth0 to VRF r1-cust1 - cmds = ['if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi', - 'ip netns add {0}-cust1', - 'ip link set dev {0}-eth0 netns {0}-cust1', - 'ip netns exec {0}-cust1 ifconfig {0}-eth0 up'] + cmds = [ + "if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi", + "ip netns add {0}-cust1", + "ip link set dev {0}-eth0 netns {0}-cust1", + "ip netns exec {0}-cust1 ifconfig {0}-eth0 up", + ] for cmd in cmds: - cmd = cmd.format('r1') - logger.info('cmd: '+cmd); - output = router.run(cmd.format('r1')) + cmd = cmd.format("r1") + logger.info("cmd: " + cmd) + output = router.run(cmd.format("r1")) if output != None and len(output) > 0: - logger.info('Aborting due to unexpected output: cmd="{}" output=\n{}'.format(cmd, output)) - return pytest.skip('Skipping BGP VRF NETNS Test. Unexpected output to command: '+cmd) - #run daemons + logger.info( + 'Aborting due to unexpected output: cmd="{}" output=\n{}'.format( + cmd, output + ) + ) + return pytest.skip( + "Skipping BGP VRF NETNS Test. Unexpected output to command: " + cmd + ) + # run daemons router.load_config( TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format('r1')), - '--vrfwnetns' + os.path.join(CWD, "{}/zebra.conf".format("r1")), + "--vrfwnetns", ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format('r1')) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format("r1")) ) - logger.info('Launching BGP and ZEBRA') + logger.info("Launching BGP and ZEBRA") # BGP and ZEBRA start without underlying VRF router.start() # Starting Hosts and init ExaBGP on each of them - logger.info('starting exaBGP on peer1') + logger.info("starting exaBGP on peer1") peer_list = tgen.exabgp_peers() for pname, peer in peer_list.iteritems(): peer_dir = os.path.join(CWD, pname) - env_file = os.path.join(CWD, 'exabgp.env') - logger.info('Running ExaBGP peer') + env_file = os.path.join(CWD, "exabgp.env") + logger.info("Running ExaBGP peer") peer.start(peer_dir, env_file) logger.info(pname) + def teardown_module(module): tgen = get_topogen() # move back r1-eth0 to default VRF # delete VRF r1-cust1 - cmds = ['ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1', - 'ip netns delete {0}-cust1'] + cmds = [ + "ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1", + "ip netns delete {0}-cust1", + ] for cmd in cmds: - tgen.net['r1'].cmd(cmd.format('r1')) + tgen.net["r1"].cmd(cmd.format("r1")) tgen.stop_topology() + def test_bgp_vrf_learn(): "Test daemon learnt VRF context" tgen = get_topogen() @@ -158,11 +171,11 @@ def test_bgp_vrf_learn(): pytest.skip(tgen.errors) # Expected result - output = tgen.gears['r1'].vtysh_cmd("show vrf", isjson=False) - logger.info('output is: {}'.format(output)) + output = tgen.gears["r1"].vtysh_cmd("show vrf", isjson=False) + logger.info("output is: {}".format(output)) - output = tgen.gears['r1'].vtysh_cmd("show bgp vrfs", isjson=False) - logger.info('output is: {}'.format(output)) + output = tgen.gears["r1"].vtysh_cmd("show bgp vrfs", isjson=False) + logger.info("output is: {}".format(output)) def test_bgp_convergence(): @@ -175,23 +188,25 @@ def test_bgp_convergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('waiting for bgp convergence') + logger.info("waiting for bgp convergence") # Expected result - router = tgen.gears['r1'] - if router.has_version('<', '3.0'): - reffile = os.path.join(CWD, 'r1/summary20.txt') + router = tgen.gears["r1"] + if router.has_version("<", "3.0"): + reffile = os.path.join(CWD, "r1/summary20.txt") else: - reffile = os.path.join(CWD, 'r1/summary.txt') + reffile = os.path.join(CWD, "r1/summary.txt") expected = json.loads(open(reffile).read()) - test_func = functools.partial(topotest.router_json_cmp, - router, 'show bgp vrf r1-cust1 summary json', expected) + test_func = functools.partial( + topotest.router_json_cmp, router, "show bgp vrf r1-cust1 summary json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=90, wait=0.5) - assertmsg = 'BGP router network did not converge' + assertmsg = "BGP router network did not converge" assert res is None, assertmsg + def test_bgp_vrf_netns(): tgen = get_topogen() @@ -200,24 +215,28 @@ def test_bgp_vrf_netns(): pytest.skip(tgen.errors) expect = { - 'routerId': '10.0.1.1', - 'routes': { - }, + "routerId": "10.0.1.1", + "routes": {}, } for subnet in range(0, 10): - netkey = '10.201.{}.0/24'.format(subnet) - expect['routes'][netkey] = [] - peer = {'valid': True} - expect['routes'][netkey].append(peer) - - test_func = functools.partial(topotest.router_json_cmp, - tgen.gears['r1'], 'show ip bgp vrf r1-cust1 ipv4 json', expect) + netkey = "10.201.{}.0/24".format(subnet) + expect["routes"][netkey] = [] + peer = {"valid": True} + expect["routes"][netkey].append(peer) + + test_func = functools.partial( + topotest.router_json_cmp, + tgen.gears["r1"], + "show ip bgp vrf r1-cust1 ipv4 json", + expect, + ) _, res = topotest.run_and_expect(test_func, None, count=12, wait=0.5) assertmsg = 'expected routes in "show ip bgp vrf r1-cust1 ipv4" output' assert res is None, assertmsg -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] ret = pytest.main(args) diff --git a/tests/topotests/conftest.py b/tests/topotests/conftest.py index 76b0ab017e..d46c52a4c4 100755 --- a/tests/topotests/conftest.py +++ b/tests/topotests/conftest.py @@ -9,13 +9,18 @@ import pytest topology_only = False + def pytest_addoption(parser): """ Add topology-only option to the topology tester. This option makes pytest only run the setup_module() to setup the topology without running any tests. """ - parser.addoption('--topology-only', action='store_true', - help='Only set up this topology, don\'t run tests') + parser.addoption( + "--topology-only", + action="store_true", + help="Only set up this topology, don't run tests", + ) + def pytest_runtest_call(): """ @@ -30,7 +35,8 @@ def pytest_runtest_call(): # Allow user to play with the setup. tgen.mininet_cli() - pytest.exit('the topology executed successfully') + pytest.exit("the topology executed successfully") + def pytest_assertrepr_compare(op, left, right): """ @@ -44,17 +50,19 @@ def pytest_assertrepr_compare(op, left, right): return json_result.errors + def pytest_configure(config): "Assert that the environment is correctly configured." global topology_only if not diagnose_env(): - pytest.exit('enviroment has errors, please read the logs') + pytest.exit("enviroment has errors, please read the logs") - if config.getoption('--topology-only'): + if config.getoption("--topology-only"): topology_only = True + def pytest_runtest_makereport(item, call): "Log all assert messages to default logger with error level" # Nothing happened @@ -65,18 +73,22 @@ def pytest_runtest_makereport(item, call): modname = parent.module.__name__ # Treat skips as non errors - if call.excinfo.typename != 'AssertionError': - logger.info('assert skipped at "{}/{}": {}'.format( - modname, item.name, call.excinfo.value)) + if call.excinfo.typename != "AssertionError": + logger.info( + 'assert skipped at "{}/{}": {}'.format( + modname, item.name, call.excinfo.value + ) + ) return # Handle assert failures parent._previousfailed = item - logger.error('assert failed at "{}/{}": {}'.format( - modname, item.name, call.excinfo.value)) + logger.error( + 'assert failed at "{}/{}": {}'.format(modname, item.name, call.excinfo.value) + ) # (topogen) Set topology error to avoid advancing in the test. tgen = get_topogen() if tgen is not None: # This will cause topogen to report error on `routers_have_failure`. - tgen.set_error('{}/{}'.format(modname, item.name)) + tgen.set_error("{}/{}".format(modname, item.name)) diff --git a/tests/topotests/eigrp-topo1/test_eigrp_topo1.py b/tests/topotests/eigrp-topo1/test_eigrp_topo1.py index 1c00face43..c1dd88823b 100755 --- a/tests/topotests/eigrp-topo1/test_eigrp_topo1.py +++ b/tests/topotests/eigrp-topo1/test_eigrp_topo1.py @@ -35,7 +35,7 @@ import json # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -52,6 +52,7 @@ from mininet.topo import Topo ## ##################################################### + class NetworkTopo(Topo): "EIGRP Topology 1" @@ -61,27 +62,27 @@ class NetworkTopo(Topo): tgen = get_topogen(self) for routern in range(1, 4): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # On main router # First switch is for a dummy interface (for local network) - switch = tgen.add_switch('sw1') - switch.add_link(tgen.gears['r1']) + switch = tgen.add_switch("sw1") + switch.add_link(tgen.gears["r1"]) # Switches for EIGRP # switch 2 switch is for connection to EIGRP router - switch = tgen.add_switch('sw2') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("sw2") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) # switch 4 is stub on remote EIGRP router - switch = tgen.add_switch('sw4') - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("sw4") + switch.add_link(tgen.gears["r3"]) # switch 3 is between EIGRP routers - switch = tgen.add_switch('sw3') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("sw3") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) ##################################################### @@ -90,6 +91,7 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(module): "Setup topology" tgen = Topogen(NetworkTopo, module.__name__) @@ -99,12 +101,10 @@ def setup_module(module): router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_EIGRP, - os.path.join(CWD, '{}/eigrpd.conf'.format(rname)) + TopoRouter.RD_EIGRP, os.path.join(CWD, "{}/eigrpd.conf".format(rname)) ) tgen.start_router() @@ -126,7 +126,7 @@ def test_converge_protocols(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - topotest.sleep(5, 'Waiting for EIGRP convergence') + topotest.sleep(5, "Waiting for EIGRP convergence") def test_eigrp_routes(): @@ -142,7 +142,7 @@ def test_eigrp_routes(): router_list = tgen.routers().values() for router in router_list: - refTableFile = '{}/{}/show_ip_eigrp.json'.format(CWD, router.name) + refTableFile = "{}/{}/show_ip_eigrp.json".format(CWD, router.name) # Read expected result from file expected = json.loads(open(refTableFile).read()) @@ -153,6 +153,7 @@ def test_eigrp_routes(): assertmsg = '"show ip eigrp topo" mismatches on {}'.format(router.name) assert topotest.json_cmp(actual, expected) is None, assertmsg + def test_zebra_ipv4_routingTable(): "Test 'show ip route'" @@ -164,27 +165,29 @@ def test_zebra_ipv4_routingTable(): failures = 0 router_list = tgen.routers().values() for router in router_list: - output = router.vtysh_cmd('show ip route json', isjson=True) - refTableFile = '{}/{}/show_ip_route.json_ref'.format(CWD, router.name) + output = router.vtysh_cmd("show ip route json", isjson=True) + refTableFile = "{}/{}/show_ip_route.json_ref".format(CWD, router.name) expected = json.loads(open(refTableFile).read()) - assertmsg = 'Zebra IPv4 Routing Table verification failed for router {}'.format(router.name) + assertmsg = "Zebra IPv4 Routing Table verification failed for router {}".format( + router.name + ) assert topotest.json_cmp(output, expected) is None, assertmsg + def test_shut_interface_and_recover(): "Test shutdown of an interface and recovery of the interface" tgen = get_topogen() - router = tgen.gears['r1'] - router.run('ip link set r1-eth1 down') - topotest.sleep(5, 'Waiting for EIGRP convergence') - router.run('ip link set r1-eth1 up') - + router = tgen.gears["r1"] + router.run("ip link set r1-eth1 down") + topotest.sleep(5, "Waiting for EIGRP convergence") + router.run("ip link set r1-eth1 up") def test_shutdown_check_stderr(): - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - pytest.skip('Skipping test for Stderr output and memory leaks') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + pytest.skip("Skipping test for Stderr output and memory leaks") tgen = get_topogen() # Don't run this test if we have any failure. @@ -197,15 +200,15 @@ def test_shutdown_check_stderr(): for router in router_list: router.stop() - log = tgen.net[router.name].getStdErr('eigrpd') + log = tgen.net[router.name].getStdErr("eigrpd") if log: - logger.error('EIGRPd StdErr Log:' + log) - log = tgen.net[router.name].getStdErr('zebra') + logger.error("EIGRPd StdErr Log:" + log) + log = tgen.net[router.name].getStdErr("zebra") if log: - logger.error('Zebra StdErr Log:' + log) + logger.error("Zebra StdErr Log:" + log) -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) @@ -237,23 +240,23 @@ def ip_eigrp_topo(node): } } """ - output = topotest.normalize_text(node.vtysh_cmd('show ip eigrp topo')).splitlines() + output = topotest.normalize_text(node.vtysh_cmd("show ip eigrp topo")).splitlines() result = {} for idx, line in enumerate(output): - columns = line.split(' ', 1) + columns = line.split(" ", 1) # Parse the following format into python dicts # code A.B.C.D/E, X successors, FD is Y, serno: Z # via FOO, interface-name code = columns[0] - if code not in ['P', 'A', 'U', 'Q', 'R', 'r', 's']: + if code not in ["P", "A", "U", "Q", "R", "r", "s"]: continue if not result.has_key(code): result[code] = {} # Split network from the rest - columns = columns[1].split(',') + columns = columns[1].split(",") # Parse first line data network = columns[0] @@ -263,33 +266,33 @@ def ip_eigrp_topo(node): if column == columns[0]: continue - match = re.search(r'(\d+) successors', column) + match = re.search(r"(\d+) successors", column) if match is not None: - result[code][network]['successors'] = match.group(1) + result[code][network]["successors"] = match.group(1) continue - match = re.search(r'FD is (\d+)', column) + match = re.search(r"FD is (\d+)", column) if match is not None: - result[code][network]['fd'] = match.group(1) + result[code][network]["fd"] = match.group(1) continue - match = re.search(r'serno: (\d+)', column) + match = re.search(r"serno: (\d+)", column) if match is not None: - result[code][network]['serno'] = match.group(1) + result[code][network]["serno"] = match.group(1) continue # Parse second line data nextline = output[idx + 1] - columns = topotest.normalize_text(nextline).split(',') + columns = topotest.normalize_text(nextline).split(",") for column in columns: - match = re.search(r'via (.+)', column) + match = re.search(r"via (.+)", column) if match is not None: - result[code][network]['via'] = match.group(1) + result[code][network]["via"] = match.group(1) continue - match = re.search(r'(.+)', column) + match = re.search(r"(.+)", column) if match is not None: - result[code][network]['interface'] = match.group(1) + result[code][network]["interface"] = match.group(1) continue return result diff --git a/tests/topotests/evpn-pim-1/spine/bgp.summ.json b/tests/topotests/evpn-pim-1/spine/bgp.summ.json index faf40c8d43..53370507e8 100644 --- a/tests/topotests/evpn-pim-1/spine/bgp.summ.json +++ b/tests/topotests/evpn-pim-1/spine/bgp.summ.json @@ -12,7 +12,6 @@ "tableVersion":0, "outq":0, "inq":0, - "prefixReceivedCount":3, "pfxRcd":3, "pfxSnt":7, "state":"Established", @@ -26,7 +25,6 @@ "tableVersion":0, "outq":0, "inq":0, - "prefixReceivedCount":3, "pfxRcd":3, "pfxSnt":7, "state":"Established", diff --git a/tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py b/tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py index dafe2e03ac..94bb91d49f 100755 --- a/tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py +++ b/tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py @@ -36,7 +36,7 @@ from functools import partial # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -53,6 +53,7 @@ from mininet.topo import Topo ## ##################################################### + class NetworkTopo(Topo): "evpn-pim Topology 1" @@ -61,34 +62,33 @@ class NetworkTopo(Topo): tgen = get_topogen(self) - tgen.add_router('spine') - tgen.add_router('leaf1') - tgen.add_router('leaf2') - tgen.add_router('host1') - tgen.add_router('host2') + tgen.add_router("spine") + tgen.add_router("leaf1") + tgen.add_router("leaf2") + tgen.add_router("host1") + tgen.add_router("host2") # On main router # First switch is for a dummy interface (for local network) # spine-eth0 is connected to leaf1-eth0 - switch = tgen.add_switch('sw1') - switch.add_link(tgen.gears['spine']) - switch.add_link(tgen.gears['leaf1']) + switch = tgen.add_switch("sw1") + switch.add_link(tgen.gears["spine"]) + switch.add_link(tgen.gears["leaf1"]) # spine-eth1 is connected to leaf2-eth0 - switch = tgen.add_switch('sw2') - switch.add_link(tgen.gears['spine']) - switch.add_link(tgen.gears['leaf2']) + switch = tgen.add_switch("sw2") + switch.add_link(tgen.gears["spine"]) + switch.add_link(tgen.gears["leaf2"]) # leaf1-eth1 is connected to host1-eth0 - switch = tgen.add_switch('sw3') - switch.add_link(tgen.gears['leaf1']) - switch.add_link(tgen.gears['host1']) + switch = tgen.add_switch("sw3") + switch.add_link(tgen.gears["leaf1"]) + switch.add_link(tgen.gears["host1"]) # leaf2-eth1 is connected to host2-eth0 - switch = tgen.add_switch('sw4') - switch.add_link(tgen.gears['leaf2']) - switch.add_link(tgen.gears['host2']) - + switch = tgen.add_switch("sw4") + switch.add_link(tgen.gears["leaf2"]) + switch.add_link(tgen.gears["host2"]) ##################################################### @@ -97,42 +97,45 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(module): "Setup topology" tgen = Topogen(NetworkTopo, module.__name__) tgen.start_topology() - leaf1 = tgen.gears['leaf1'] - leaf2 = tgen.gears['leaf2'] - - leaf1.run('brctl addbr brleaf1') - leaf2.run('brctl addbr brleaf2') - leaf1.run('ip link set dev brleaf1 up') - leaf2.run('ip link set dev brleaf2 up') - leaf1.run('ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev leaf1-eth1 dstport 4789') - leaf2.run('ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev leaf2-eth1 dstport 4789') - leaf1.run('brctl addif brleaf1 vxlan0') - leaf2.run('brctl addif brleaf2 vxlan0') - leaf1.run('ip link set up dev vxlan0') - leaf2.run('ip link set up dev vxlan0') - #tgen.mininet_cli() + leaf1 = tgen.gears["leaf1"] + leaf2 = tgen.gears["leaf2"] + + leaf1.run("brctl addbr brleaf1") + leaf2.run("brctl addbr brleaf2") + leaf1.run("ip link set dev brleaf1 up") + leaf2.run("ip link set dev brleaf2 up") + leaf1.run( + "ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev leaf1-eth1 dstport 4789" + ) + leaf2.run( + "ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev leaf2-eth1 dstport 4789" + ) + leaf1.run("brctl addif brleaf1 vxlan0") + leaf2.run("brctl addif brleaf2 vxlan0") + leaf1.run("ip link set up dev vxlan0") + leaf2.run("ip link set up dev vxlan0") + # tgen.mininet_cli() # This is a sample of configuration loading. router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_PIM, - os.path.join(CWD, '{}/pimd.conf'.format(rname)) + TopoRouter.RD_PIM, os.path.join(CWD, "{}/pimd.conf".format(rname)) ) tgen.start_router() - #tgen.mininet_cli() + # tgen.mininet_cli() + def teardown_module(_mod): "Teardown the pytest environment" @@ -150,16 +153,18 @@ def test_converge_protocols(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - spine = tgen.gears['spine'] - json_file = '{}/{}/bgp.summ.json'.format(CWD, spine.name) + spine = tgen.gears["spine"] + json_file = "{}/{}/bgp.summ.json".format(CWD, spine.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - spine, 'show bgp ipv4 uni summ json', expected) + test_func = partial( + topotest.router_json_cmp, spine, "show bgp ipv4 uni summ json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=125, wait=1) assertmsg = '"{}" JSON output mismatches'.format(spine.name) assert result is None, assertmsg - #tgen.mininet_cli() + # tgen.mininet_cli() + def test_multicast_groups_on_rp(): "Ensure the multicast groups show up on the spine" @@ -172,20 +177,22 @@ def test_multicast_groups_on_rp(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - spine = tgen.gears['spine'] - json_file = '{}/{}/join-info.json'.format(CWD, spine.name) + spine = tgen.gears["spine"] + json_file = "{}/{}/join-info.json".format(CWD, spine.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - spine, 'show ip pim join json', expected) + test_func = partial( + topotest.router_json_cmp, spine, "show ip pim join json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) assertmsg = '"{}" JSON output mismatches'.format(spine.name) assert result is None, assertmsg - #tgen.mininet_cli() + # tgen.mininet_cli() + def test_shutdown_check_stderr(): - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - pytest.skip('Skipping test for Stderr output and memory leaks') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + pytest.skip("Skipping test for Stderr output and memory leaks") tgen = get_topogen() # Don't run this test if we have any failure. @@ -198,18 +205,17 @@ def test_shutdown_check_stderr(): for router in router_list: router.stop() - log = tgen.net[router.name].getStdErr('pimd') + log = tgen.net[router.name].getStdErr("pimd") if log: - logger.error('PIMd StdErr Log:' + log) - log = tgen.net[router.name].getStdErr('bgpd') + logger.error("PIMd StdErr Log:" + log) + log = tgen.net[router.name].getStdErr("bgpd") if log: - logger.error('BGPd StdErr Log:' + log) - log = tgen.net[router.name].getStdErr('zebra') + logger.error("BGPd StdErr Log:" + log) + log = tgen.net[router.name].getStdErr("zebra") if log: - logger.error('Zebra StdErr Log:' + log) + logger.error("Zebra StdErr Log:" + log) -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) - diff --git a/tests/topotests/example-test/test_example.py b/tests/topotests/example-test/test_example.py index 8e37ad11d4..72eceee612 100755 --- a/tests/topotests/example-test/test_example.py +++ b/tests/topotests/example-test/test_example.py @@ -9,52 +9,61 @@ import pytest fatal_error = "" + def setup_module(module): - print ("setup_module module:%s" % module.__name__) + print("setup_module module:%s" % module.__name__) + def teardown_module(module): - print ("teardown_module module:%s" % module.__name__) + print("teardown_module module:%s" % module.__name__) + def setup_function(function): - print ("setup_function function:%s" % function.__name__) + print("setup_function function:%s" % function.__name__) + def teardown_function(function): - print ("teardown_function function:%s" % function.__name__) + print("teardown_function function:%s" % function.__name__) + def test_numbers_compare(): a = 12 - print ("Dummy Output") - assert( a == 12 ) + print("Dummy Output") + assert a == 12 + def test_fail_example(): assert True, "Some Text with explaination in case of failure" + def test_ls_exits_zero(): "Tests for ls command on invalid file" global fatal_error proc = subprocess.Popen( - ["ls", "/some/nonexistant/file"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + ["ls", "/some/nonexistant/file"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, ) stdout, stderr = proc.communicate() - if (proc.returncode != 0): + if proc.returncode != 0: # Mark this as a fatal error which skips some other tests on failure fatal_error = "test_fail_example failed" assert proc.returncode == 0, "Return Code is non-Zero:\n%s" % stderr + def test_skipped_on_fatalerror(): global fatal_error # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) assert True, "Some Text with explaination in case of failure" -if __name__ == '__main__': + +if __name__ == "__main__": retval = pytest.main(["-s"]) sys.exit(retval) diff --git a/tests/topotests/example-test/test_template.py b/tests/topotests/example-test/test_template.py index 4e35ce8b9f..afe974876a 100755 --- a/tests/topotests/example-test/test_template.py +++ b/tests/topotests/example-test/test_template.py @@ -32,7 +32,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -43,8 +43,10 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class TemplateTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -56,17 +58,18 @@ class TemplateTopo(Topo): # # Create 2 routers for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # Create a switch with just one router connected to it to simulate a # empty network. - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) # Create a connection between r1 and r2 - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + def setup_module(mod): "Sets up the pytest environment" @@ -83,12 +86,13 @@ def setup_module(mod): router.load_config( TopoRouter.RD_ZEBRA, # Uncomment next line to load configuration from ./router/zebra.conf - #os.path.join(CWD, '{}/zebra.conf'.format(rname)) + # os.path.join(CWD, '{}/zebra.conf'.format(rname)) ) # After loading the configurations, this function loads configured daemons. tgen.start_router() + def teardown_module(mod): "Teardown the pytest environment" tgen = get_topogen() @@ -96,6 +100,7 @@ def teardown_module(mod): # This function tears down the whole topology. tgen.stop_topology() + def test_call_mininet_cli(): "Dummy test that just calls mininet CLI so we can interact with the build." tgen = get_topogen() @@ -103,18 +108,20 @@ def test_call_mininet_cli(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('calling mininet CLI') + logger.info("calling mininet CLI") tgen.mininet_cli() + # Memory leak test template def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py index 8e794b9946..f24f463b8a 100755 --- a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py +++ b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py @@ -33,8 +33,8 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) -sys.path.append(os.path.join(CWD, '../../')) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../../")) # pylint: disable=C0413 from lib.topogen import Topogen, get_topogen @@ -44,19 +44,19 @@ from mininet.topo import Topo # Import topoJson from lib, to create topology and initial configuration from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, verify_rib + start_topology, + write_test_header, + write_test_footer, + verify_rib, ) from lib.topolog import logger -from lib.bgp import ( - verify_bgp_convergence -) +from lib.bgp import verify_bgp_convergence from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/example_topojson_multiple_links.json".format(CWD) try: - with open(jsonFile, 'r') as topoJson: + with open(jsonFile, "r") as topoJson: topo = json.load(topoJson) except IOError: assert False, "Could not read file {}".format(jsonFile) @@ -155,8 +155,9 @@ def test_bgp_convergence(request): # Api call verify whether BGP is converged bgp_convergence = verify_bgp_convergence(tgen, topo) - assert bgp_convergence is True, "test_bgp_convergence failed.. \n" \ - " Error: {}".format(bgp_convergence) + assert ( + bgp_convergence is True + ), "test_bgp_convergence failed.. \n" " Error: {}".format(bgp_convergence) logger.info("BGP is converged successfully \n") write_test_footer(tc_name) @@ -167,7 +168,7 @@ def test_static_routes(request): tgen = get_topogen() if bgp_convergence is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -175,20 +176,19 @@ def test_static_routes(request): # Static routes are created as part of initial configuration, # verifying RIB - dut = 'r3' - protocol = 'bgp' - next_hop = '10.0.0.1' + dut = "r3" + protocol = "bgp" + next_hop = "10.0.0.1" input_dict = {"r1": topo["routers"]["r1"]} # Uncomment below to debug # tgen.mininet_cli() - result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + result = verify_rib(tgen, "ipv4", dut, input_dict, next_hop=next_hop) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py index 315c7b3f2d..3ae3c9f4fe 100755 --- a/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py +++ b/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py @@ -32,31 +32,31 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) -sys.path.append(os.path.join(CWD, '../../')) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../../")) # pylint: disable=C0413 -from lib.topogen import Topogen, get_topogen +from lib.topogen import Topogen, get_topogen # Required to instantiate the topology builder class. from mininet.topo import Topo # Import topoJson from lib, to create topology and initial configuration from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, verify_rib + start_topology, + write_test_header, + write_test_footer, + verify_rib, ) from lib.topolog import logger -from lib.bgp import ( - verify_bgp_convergence -) +from lib.bgp import verify_bgp_convergence from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/example_topojson.json".format(CWD) try: - with open(jsonFile, 'r') as topoJson: + with open(jsonFile, "r") as topoJson: topo = json.load(topoJson) except IOError: assert False, "Could not read file {}".format(jsonFile) @@ -65,6 +65,7 @@ except IOError: bgp_convergence = False input_dict = {} + class TemplateTopo(Topo): """ Test topology builder @@ -87,6 +88,7 @@ class TemplateTopo(Topo): # Building topology from json file build_topo_from_json(tgen, topo) + def setup_module(mod): """ Sets up the pytest environment @@ -96,7 +98,7 @@ def setup_module(mod): testsuite_run_time = time.asctime(time.localtime(time.time())) logger.info("Testsuite start time: {}".format(testsuite_run_time)) - logger.info("="*40) + logger.info("=" * 40) logger.info("Running setup_module to create topology") @@ -122,6 +124,7 @@ def setup_module(mod): logger.info("Running setup_module() done") + def teardown_module(mod): """ Teardown the pytest environment @@ -152,8 +155,9 @@ def test_bgp_convergence(request): # Api call verify whether BGP is converged bgp_convergence = verify_bgp_convergence(tgen, topo) - assert bgp_convergence is True, "test_bgp_convergence failed.. \n"\ - " Error: {}".format(bgp_convergence) + assert ( + bgp_convergence is True + ), "test_bgp_convergence failed.. \n" " Error: {}".format(bgp_convergence) logger.info("BGP is converged successfully \n") write_test_footer(tc_name) @@ -164,7 +168,7 @@ def test_static_routes(request): tgen = get_topogen() if bgp_convergence is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -172,19 +176,18 @@ def test_static_routes(request): # Static routes are created as part of initial configuration, # verifying RIB - dut = 'r3' - next_hop = '10.0.0.1' + dut = "r3" + next_hop = "10.0.0.1" input_dict = {"r1": topo["routers"]["r1"]} # Uncomment below to debug # tgen.mininet_cli() - result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + result = verify_rib(tgen, "ipv4", dut, input_dict, next_hop=next_hop) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py index cd069aaec5..06fa2f4626 100755 --- a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py +++ b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py @@ -33,32 +33,32 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) -sys.path.append(os.path.join(CWD, '../../')) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../../")) # pylint: disable=C0413 # Import topogen and topotest helpers -from lib.topogen import Topogen, get_topogen +from lib.topogen import Topogen, get_topogen # Required to instantiate the topology builder class. from mininet.topo import Topo # Import topoJson from lib, to create topology and initial configuration from lib.common_config import ( - start_topology, write_test_header, - write_test_footer, verify_rib + start_topology, + write_test_header, + write_test_footer, + verify_rib, ) from lib.topolog import logger -from lib.bgp import ( - verify_bgp_convergence -) +from lib.bgp import verify_bgp_convergence from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/example_topojson.json".format(CWD) try: - with open(jsonFile, 'r') as topoJson: + with open(jsonFile, "r") as topoJson: topo = json.load(topoJson) except IOError: assert False, "Could not read file {}".format(jsonFile) @@ -100,7 +100,7 @@ def setup_module(mod): testsuite_run_time = time.asctime(time.localtime(time.time())) logger.info("Testsuite start time: {}".format(testsuite_run_time)) - logger.info("="*40) + logger.info("=" * 40) logger.info("Running setup_module to create topology") @@ -157,8 +157,9 @@ def test_bgp_convergence(request): # Api call verify whether BGP is converged bgp_convergence = verify_bgp_convergence(tgen, topo) - assert bgp_convergence is True, "test_bgp_convergence failed.. \n"\ - " Error: {}".format(bgp_convergence) + assert ( + bgp_convergence is True + ), "test_bgp_convergence failed.. \n" " Error: {}".format(bgp_convergence) logger.info("BGP is converged successfully \n") write_test_footer(tc_name) @@ -169,7 +170,7 @@ def test_static_routes(request): tgen = get_topogen() if bgp_convergence is not True: - pytest.skip('skipped because of BGP Convergence failure') + pytest.skip("skipped because of BGP Convergence failure") # test case name tc_name = request.node.name @@ -177,8 +178,8 @@ def test_static_routes(request): # Static routes are created as part of initial configuration, # verifying RIB - dut = 'r3' - next_hop = ['10.0.0.1', '10.0.0.5'] + dut = "r3" + next_hop = ["10.0.0.1", "10.0.0.5"] input_dict = { "r1": { "static_routes": [ @@ -186,20 +187,19 @@ def test_static_routes(request): "network": "100.0.20.1/32", "no_of_ip": 9, "admin_distance": 100, - "next_hop": "10.0.0.1" + "next_hop": "10.0.0.1", } ] } } # Uncomment below to debug # tgen.mininet_cli() - result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + result = verify_rib(tgen, "ipv4", dut, input_dict, next_hop=next_hop) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/isis-topo1/test_isis_topo1.py b/tests/topotests/isis-topo1/test_isis_topo1.py index 941f917c6b..6b1d9a8964 100644 --- a/tests/topotests/isis-topo1/test_isis_topo1.py +++ b/tests/topotests/isis-topo1/test_isis_topo1.py @@ -36,7 +36,7 @@ import pytest import time CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -48,6 +48,7 @@ from mininet.topo import Topo class ISISTopo1(Topo): "Simple two layer ISIS topology" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -61,27 +62,27 @@ class ISISTopo1(Topo): # \ / # r5 for routern in range(1, 6): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # r1 <- sw1 -> r3 - sw = tgen.add_switch('sw1') - sw.add_link(tgen.gears['r1']) - sw.add_link(tgen.gears['r3']) + sw = tgen.add_switch("sw1") + sw.add_link(tgen.gears["r1"]) + sw.add_link(tgen.gears["r3"]) # r2 <- sw2 -> r4 - sw = tgen.add_switch('sw2') - sw.add_link(tgen.gears['r2']) - sw.add_link(tgen.gears['r4']) + sw = tgen.add_switch("sw2") + sw.add_link(tgen.gears["r2"]) + sw.add_link(tgen.gears["r4"]) # r3 <- sw3 -> r5 - sw = tgen.add_switch('sw3') - sw.add_link(tgen.gears['r3']) - sw.add_link(tgen.gears['r5']) + sw = tgen.add_switch("sw3") + sw.add_link(tgen.gears["r3"]) + sw.add_link(tgen.gears["r5"]) # r4 <- sw4 -> r5 - sw = tgen.add_switch('sw4') - sw.add_link(tgen.gears['r4']) - sw.add_link(tgen.gears['r5']) + sw = tgen.add_switch("sw4") + sw.add_link(tgen.gears["r4"]) + sw.add_link(tgen.gears["r5"]) def setup_module(mod): @@ -92,12 +93,10 @@ def setup_module(mod): # For all registered routers, load the zebra configuration file for rname, router in tgen.routers().iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_ISIS, - os.path.join(CWD, '{}/isisd.conf'.format(rname)) + TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname)) ) # After loading the configurations, this function loads configured daemons. @@ -105,12 +104,12 @@ def setup_module(mod): has_version_20 = False for router in tgen.routers().values(): - if router.has_version('<', '3'): + if router.has_version("<", "3"): has_version_20 = True if has_version_20: - logger.info('Skipping ISIS tests for FRR 2.0') - tgen.set_error('ISIS has convergence problems with IPv6') + logger.info("Skipping ISIS tests for FRR 2.0") + tgen.set_error("ISIS has convergence problems with IPv6") def teardown_module(mod): @@ -136,7 +135,7 @@ def test_isis_convergence(): # ) for rname, router in tgen.routers().iteritems(): - filename = '{0}/{1}/{1}_topology.json'.format(CWD, rname) + filename = "{0}/{1}/{1}_topology.json".format(CWD, rname) expected = json.loads(open(filename).read()) def compare_isis_topology(router, expected): @@ -145,9 +144,8 @@ def test_isis_convergence(): return topotest.json_cmp(actual, expected) test_func = functools.partial(compare_isis_topology, router, expected) - (result, diff) = topotest.run_and_expect(test_func, None, - wait=0.5, count=120) - assert result, 'ISIS did not converge on {}:\n{}'.format(rname, diff) + (result, diff) = topotest.run_and_expect(test_func, None, wait=0.5, count=120) + assert result, "ISIS did not converge on {}:\n{}".format(rname, diff) def test_isis_route_installation(): @@ -157,24 +155,24 @@ def test_isis_route_installation(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Checking routers for installed ISIS routes') + logger.info("Checking routers for installed ISIS routes") # Check for routes in 'show ip route json' for rname, router in tgen.routers().iteritems(): - filename = '{0}/{1}/{1}_route.json'.format(CWD, rname) - expected = json.loads(open(filename, 'r').read()) - actual = router.vtysh_cmd('show ip route json', isjson=True) + filename = "{0}/{1}/{1}_route.json".format(CWD, rname) + expected = json.loads(open(filename, "r").read()) + actual = router.vtysh_cmd("show ip route json", isjson=True) # Older FRR versions don't list interfaces in some ISIS routes - if router.has_version('<', '3.1'): + if router.has_version("<", "3.1"): for network, routes in expected.iteritems(): for route in routes: - if route['protocol'] != 'isis': + if route["protocol"] != "isis": continue - for nexthop in route['nexthops']: - nexthop.pop('interfaceIndex', None) - nexthop.pop('interfaceName', None) + for nexthop in route["nexthops"]: + nexthop.pop("interfaceIndex", None) + nexthop.pop("interfaceName", None) assertmsg = "Router '{}' routes mismatch".format(rname) assert topotest.json_cmp(actual, expected) is None, assertmsg @@ -187,19 +185,19 @@ def test_isis_linux_route_installation(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Checking routers for installed ISIS routes in OS') + logger.info("Checking routers for installed ISIS routes in OS") # Check for routes in `ip route` for rname, router in tgen.routers().iteritems(): - filename = '{0}/{1}/{1}_route_linux.json'.format(CWD, rname) - expected = json.loads(open(filename, 'r').read()) + filename = "{0}/{1}/{1}_route_linux.json".format(CWD, rname) + expected = json.loads(open(filename, "r").read()) actual = topotest.ip4_route(router) # Older FRR versions install routes using different proto - if router.has_version('<', '3.1'): + if router.has_version("<", "3.1"): for network, netoptions in expected.iteritems(): - if 'proto' in netoptions and netoptions['proto'] == '187': - netoptions['proto'] = 'zebra' + if "proto" in netoptions and netoptions["proto"] == "187": + netoptions["proto"] = "zebra" assertmsg = "Router '{}' OS routes mismatch".format(rname) assert topotest.json_cmp(actual, expected) is None, assertmsg @@ -212,27 +210,27 @@ def test_isis_route6_installation(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Checking routers for installed ISIS IPv6 routes') + logger.info("Checking routers for installed ISIS IPv6 routes") # Check for routes in 'show ip route json' for rname, router in tgen.routers().iteritems(): - filename = '{0}/{1}/{1}_route6.json'.format(CWD, rname) - expected = json.loads(open(filename, 'r').read()) - actual = router.vtysh_cmd('show ipv6 route json', isjson=True) + filename = "{0}/{1}/{1}_route6.json".format(CWD, rname) + expected = json.loads(open(filename, "r").read()) + actual = router.vtysh_cmd("show ipv6 route json", isjson=True) # Older FRR versions don't list interfaces in some ISIS routes - if router.has_version('<', '3.1'): + if router.has_version("<", "3.1"): for network, routes in expected.iteritems(): for route in routes: # Older versions display different metrics for IPv6 routes - route.pop('metric', None) + route.pop("metric", None) - if route['protocol'] != 'isis': + if route["protocol"] != "isis": continue - for nexthop in route['nexthops']: - nexthop.pop('interfaceIndex', None) - nexthop.pop('interfaceName', None) + for nexthop in route["nexthops"]: + nexthop.pop("interfaceIndex", None) + nexthop.pop("interfaceName", None) assertmsg = "Router '{}' routes mismatch".format(rname) assert topotest.json_cmp(actual, expected) is None, assertmsg @@ -245,19 +243,19 @@ def test_isis_linux_route6_installation(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Checking routers for installed ISIS IPv6 routes in OS') + logger.info("Checking routers for installed ISIS IPv6 routes in OS") # Check for routes in `ip route` for rname, router in tgen.routers().iteritems(): - filename = '{0}/{1}/{1}_route6_linux.json'.format(CWD, rname) - expected = json.loads(open(filename, 'r').read()) + filename = "{0}/{1}/{1}_route6_linux.json".format(CWD, rname) + expected = json.loads(open(filename, "r").read()) actual = topotest.ip6_route(router) # Older FRR versions install routes using different proto - if router.has_version('<', '3.1'): + if router.has_version("<", "3.1"): for network, netoptions in expected.iteritems(): - if 'proto' in netoptions and netoptions['proto'] == '187': - netoptions['proto'] = 'zebra' + if "proto" in netoptions and netoptions["proto"] == "187": + netoptions["proto"] = "zebra" assertmsg = "Router '{}' OS routes mismatch".format(rname) assert topotest.json_cmp(actual, expected) is None, assertmsg @@ -267,12 +265,12 @@ def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) @@ -296,8 +294,11 @@ def dict_merge(dct, merge_dct): https://gist.github.com/angstwad/bf22d1822c38a92ec0a9 """ for k, v in merge_dct.iteritems(): - if (k in dct and isinstance(dct[k], dict) - and isinstance(merge_dct[k], collections.Mapping)): + if ( + k in dct + and isinstance(dct[k], dict) + and isinstance(merge_dct[k], collections.Mapping) + ): dict_merge(dct[k], merge_dct[k]) else: dct[k] = merge_dct[k] @@ -316,59 +317,59 @@ def parse_topology(lines, level): if area_match: area = area_match.group(1) if area not in areas: - areas[area] = { - level: { - 'ipv4': [], - 'ipv6': [] - } - } + areas[area] = {level: {"ipv4": [], "ipv6": []}} ipv = None continue elif area is None: continue if re.match(r"IS\-IS paths to level-. routers that speak IPv6", line): - ipv = 'ipv6' + ipv = "ipv6" continue if re.match(r"IS\-IS paths to level-. routers that speak IP", line): - ipv = 'ipv4' + ipv = "ipv4" continue - item_match = re.match( - r"([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+)", line) + item_match = re.match(r"([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+)", line) if item_match is not None: # Skip header - if (item_match.group(1) == 'Vertex' and - item_match.group(2) == 'Type' and - item_match.group(3) == 'Metric' and - item_match.group(4) == 'Next-Hop' and - item_match.group(5) == 'Interface' and - item_match.group(6) == 'Parent'): + if ( + item_match.group(1) == "Vertex" + and item_match.group(2) == "Type" + and item_match.group(3) == "Metric" + and item_match.group(4) == "Next-Hop" + and item_match.group(5) == "Interface" + and item_match.group(6) == "Parent" + ): continue - areas[area][level][ipv].append({ - 'vertex': item_match.group(1), - 'type': item_match.group(2), - 'metric': item_match.group(3), - 'next-hop': item_match.group(4), - 'interface': item_match.group(5), - 'parent': item_match.group(6), - }) + areas[area][level][ipv].append( + { + "vertex": item_match.group(1), + "type": item_match.group(2), + "metric": item_match.group(3), + "next-hop": item_match.group(4), + "interface": item_match.group(5), + "parent": item_match.group(6), + } + ) continue item_match = re.match(r"([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+)", line) if item_match is not None: - areas[area][level][ipv].append({ - 'vertex': item_match.group(1), - 'type': item_match.group(2), - 'metric': item_match.group(3), - 'parent': item_match.group(4), - }) + areas[area][level][ipv].append( + { + "vertex": item_match.group(1), + "type": item_match.group(2), + "metric": item_match.group(3), + "parent": item_match.group(4), + } + ) continue item_match = re.match(r"([^ ]+)", line) if item_match is not None: - areas[area][level][ipv].append({'vertex': item_match.group(1)}) + areas[area][level][ipv].append({"vertex": item_match.group(1)}) continue return areas @@ -410,14 +411,14 @@ def show_isis_topology(router): } """ l1out = topotest.normalize_text( - router.vtysh_cmd('show isis topology level-1') + router.vtysh_cmd("show isis topology level-1") ).splitlines() l2out = topotest.normalize_text( - router.vtysh_cmd('show isis topology level-2') + router.vtysh_cmd("show isis topology level-2") ).splitlines() - l1 = parse_topology(l1out, 'level-1') - l2 = parse_topology(l2out, 'level-2') + l1 = parse_topology(l1out, "level-1") + l2 = parse_topology(l2out, "level-2") dict_merge(l1, l2) return l1 diff --git a/tests/topotests/ldp-oc-acl-topo1/r1/ldpd.conf b/tests/topotests/ldp-oc-acl-topo1/r1/ldpd.conf new file mode 100644 index 0000000000..85bb970fdf --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r1/ldpd.conf @@ -0,0 +1,25 @@ +hostname r1 +log file ldpd.log +! +debug mpls ldp zebra +debug mpls ldp event +debug mpls ldp errors +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp discovery hello recv +debug mpls ldp discovery hello sent +! +mpls ldp + router-id 1.1.1.1 + ordered-control + ! + address-family ipv4 + discovery transport-address 1.1.1.1 + label local allocate host-routes + ! + interface r1-eth0 + ! + ! +! +line vty +! diff --git a/tests/topotests/ldp-oc-acl-topo1/r1/ospfd.conf b/tests/topotests/ldp-oc-acl-topo1/r1/ospfd.conf new file mode 100644 index 0000000000..6daf034d18 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r1/ospfd.conf @@ -0,0 +1,7 @@ +hostname r1 +log file ospfd.log +! +router ospf + router-id 1.1.1.1 + network 0.0.0.0/0 area 0 +! diff --git a/tests/topotests/ldp-oc-acl-topo1/r1/show_ip_ospf_neighbor.json b/tests/topotests/ldp-oc-acl-topo1/r1/show_ip_ospf_neighbor.json new file mode 100644 index 0000000000..2c493173f5 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r1/show_ip_ospf_neighbor.json @@ -0,0 +1,12 @@ +{ + "neighbors":{ + "2.2.2.2":[ + { + "priority":1, + "state":"Full\/DR", + "address":"10.0.1.2", + "ifaceName":"r1-eth0:10.0.1.1" + } + ] + } +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r1/show_ip_route.ref b/tests/topotests/ldp-oc-acl-topo1/r1/show_ip_route.ref new file mode 100644 index 0000000000..d75b8f21db --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r1/show_ip_route.ref @@ -0,0 +1,171 @@ +{ + "1.1.1.1/32":[ + { + "prefix":"1.1.1.1/32", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":1, + "interfaceName":"lo", + "active":true + } + ] + }, + { + "prefix":"1.1.1.1/32", + "protocol":"connected", + "selected":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":1, + "interfaceName":"lo", + "active":true + } + ] + } + ], + "2.2.2.2/32":[ + { + "prefix":"2.2.2.2/32", + "protocol":"ospf", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + } + ], + "3.3.3.3/32":[ + { + "prefix":"3.3.3.3/32", + "protocol":"ospf", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + } + ], + "4.4.4.4/32":[ + { + "prefix":"4.4.4.4/32", + "protocol":"ospf", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + } + ], + "10.0.1.0/24":[ + { + "prefix":"10.0.1.0/24", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + }, + { + "prefix":"10.0.1.0/24", + "protocol":"connected", + "selected":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + } + ], + "10.0.2.0/24":[ + { + "prefix":"10.0.2.0/24", + "protocol":"ospf", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + } + ], + "10.0.3.0/24":[ + { + "prefix":"10.0.3.0/24", + "protocol":"ospf", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + } + ], + "123.0.1.0/24":[ + { + "prefix":"123.0.1.0/24", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + }, + { + "prefix":"123.0.1.0/24", + "protocol":"connected", + "selected":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r1/show_ldp_all_binding.ref b/tests/topotests/ldp-oc-acl-topo1/r1/show_ldp_all_binding.ref new file mode 100644 index 0000000000..99a59668f8 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r1/show_ldp_all_binding.ref @@ -0,0 +1,61 @@ +{ + "bindings":[ + { + "addressFamily":"ipv4", + "prefix":"1.1.1.1/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"2.2.2.2/32", + "neighborId":"2.2.2.2", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"3.3.3.3/32", + "neighborId":"2.2.2.2", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"4.4.4.4/32", + "neighborId":"0.0.0.0", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.1.0/24", + "neighborId":"2.2.2.2", + "localLabel":"imp-null", + "remoteLabel":"imp-null", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.2.0/24", + "neighborId":"2.2.2.2", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.3.0/24", + "neighborId":"2.2.2.2", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"123.0.1.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r1/show_ldp_binding.ref b/tests/topotests/ldp-oc-acl-topo1/r1/show_ldp_binding.ref new file mode 100644 index 0000000000..ccc8413646 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r1/show_ldp_binding.ref @@ -0,0 +1,55 @@ +{ + "bindings":[ + { + "addressFamily":"ipv4", + "prefix":"1.1.1.1/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"2.2.2.2/32", + "neighborId":"2.2.2.2", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"3.3.3.3/32", + "neighborId":"2.2.2.2", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"4.4.4.4/32", + "neighborId":"0.0.0.0", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.1.0/24", + "neighborId":"2.2.2.2", + "localLabel":"-", + "remoteLabel":"imp-null", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.2.0/24", + "neighborId":"2.2.2.2", + "localLabel":"-", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.3.0/24", + "neighborId":"2.2.2.2", + "localLabel":"-", + "inUse":1 + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r1/show_ldp_discovery.ref b/tests/topotests/ldp-oc-acl-topo1/r1/show_ldp_discovery.ref new file mode 100644 index 0000000000..b349f4418f --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r1/show_ldp_discovery.ref @@ -0,0 +1,11 @@ +{ + "adjacencies":[ + { + "addressFamily":"ipv4", + "neighborId":"2.2.2.2", + "type":"link", + "interface":"r1-eth0", + "helloHoldtime":15 + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r1/show_ldp_neighbor.ref b/tests/topotests/ldp-oc-acl-topo1/r1/show_ldp_neighbor.ref new file mode 100644 index 0000000000..4bff444a46 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r1/show_ldp_neighbor.ref @@ -0,0 +1,10 @@ +{ + "neighbors":[ + { + "addressFamily":"ipv4", + "neighborId":"2.2.2.2", + "state":"OPERATIONAL", + "transportAddress":"2.2.2.2" + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r1/zebra.conf b/tests/topotests/ldp-oc-acl-topo1/r1/zebra.conf new file mode 100644 index 0000000000..83aea46e64 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r1/zebra.conf @@ -0,0 +1,17 @@ +log file zebra.log +! +hostname r1 +! +interface lo + ip address 1.1.1.1/32 +! +interface r1-eth0 + description to sw0 + ip address 10.0.1.1/24 + ip address 123.0.1.1/24 +! +ip forwarding +! +! +line vty +! diff --git a/tests/topotests/ldp-oc-acl-topo1/r2/ldpd.conf b/tests/topotests/ldp-oc-acl-topo1/r2/ldpd.conf new file mode 100644 index 0000000000..e1a552c701 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r2/ldpd.conf @@ -0,0 +1,28 @@ +hostname r2 +log file ldpd.log +! +debug mpls ldp zebra +debug mpls ldp event +debug mpls ldp errors +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp discovery hello recv +debug mpls ldp discovery hello sent +! +mpls ldp + router-id 2.2.2.2 + ordered-control + ! + address-family ipv4 + discovery transport-address 2.2.2.2 + ! + interface r2-eth0 + ! + interface r2-eth1 + ! + interface r2-eth2 + ! + ! +! +line vty +! diff --git a/tests/topotests/ldp-oc-acl-topo1/r2/ospfd.conf b/tests/topotests/ldp-oc-acl-topo1/r2/ospfd.conf new file mode 100644 index 0000000000..8678813665 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r2/ospfd.conf @@ -0,0 +1,7 @@ +hostname r2 +log file ospfd.log +! +router ospf + router-id 2.2.2.2 + network 0.0.0.0/0 area 0 +! diff --git a/tests/topotests/ldp-oc-acl-topo1/r2/show_ip_ospf_neighbor.json b/tests/topotests/ldp-oc-acl-topo1/r2/show_ip_ospf_neighbor.json new file mode 100644 index 0000000000..55f12359e5 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r2/show_ip_ospf_neighbor.json @@ -0,0 +1,31 @@ +{ + "neighbors":{ + "1.1.1.1":[ + { + "priority":1, + "state":"Full\/Backup", + "address":"10.0.1.1", + "ifaceName":"r2-eth0:10.0.1.2", + "retransmitCounter":0, + "requestCounter":0, + "dbSummaryCounter":0 + } + ], + "3.3.3.3":[ + { + "priority":1, + "state":"Full\/Backup", + "address":"10.0.2.3", + "ifaceName":"r2-eth1:10.0.2.2" + } + ], + "4.4.4.4":[ + { + "priority":1, + "state":"Full\/DR", + "address":"10.0.2.4", + "ifaceName":"r2-eth1:10.0.2.2" + } + ] + } +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r2/show_ip_route.ref b/tests/topotests/ldp-oc-acl-topo1/r2/show_ip_route.ref new file mode 100644 index 0000000000..060c0b429d --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r2/show_ip_route.ref @@ -0,0 +1,209 @@ +{ + "1.1.1.1/32":[ + { + "prefix":"1.1.1.1/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.1", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r2-eth0", + "active":true + } + ] + } + ], + "2.2.2.2/32":[ + { + "prefix":"2.2.2.2/32", + "protocol":"ospf", + "distance":110, + "metric":0, + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":1, + "interfaceName":"lo", + "active":true + } + ] + }, + { + "prefix":"2.2.2.2/32", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":1, + "interfaceName":"lo", + "active":true + } + ] + } + ], + "3.3.3.3/32":[ + { + "prefix":"3.3.3.3/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceIndex":3, + "interfaceName":"r2-eth1", + "active":true + } + ] + } + ], + "4.4.4.4/32":[ + { + "prefix":"4.4.4.4/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.4", + "afi":"ipv4", + "interfaceIndex":3, + "interfaceName":"r2-eth1", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r2-eth0", + "active":true + } + ] + }, + { + "prefix":"10.0.1.0\/24", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r2-eth0", + "active":true + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":3, + "interfaceName":"r2-eth1", + "active":true + } + ] + }, + { + "prefix":"10.0.2.0\/24", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":3, + "interfaceName":"r2-eth1", + "active":true + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceIndex":3, + "interfaceName":"r2-eth1", + "active":true + } + ] + } + ], + "123.0.1.0\/24":[ + { + "prefix":"123.0.1.0\/24", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.1", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r2-eth0", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r2/show_ldp_all_binding.ref b/tests/topotests/ldp-oc-acl-topo1/r2/show_ldp_all_binding.ref new file mode 100644 index 0000000000..95fb847c1e --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r2/show_ldp_all_binding.ref @@ -0,0 +1,63 @@ +{ + "bindings":[ + { + "addressFamily":"ipv4", + "prefix":"1.1.1.1/32", + "neighborId":"1.1.1.1", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"2.2.2.2/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"3.3.3.3/32", + "neighborId":"3.3.3.3", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"4.4.4.4/32", + "neighborId":"0.0.0.0", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.1.0/24", + "neighborId":"1.1.1.1", + "localLabel":"imp-null", + "remoteLabel":"imp-null", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.2.0/24", + "neighborId":"3.3.3.3", + "localLabel":"imp-null", + "remoteLabel":"imp-null", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.3.0/24", + "neighborId":"3.3.3.3", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"123.0.1.0/24", + "neighborId":"1.1.1.1", + "remoteLabel":"imp-null", + "inUse":1 + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r2/show_ldp_binding.ref b/tests/topotests/ldp-oc-acl-topo1/r2/show_ldp_binding.ref new file mode 100644 index 0000000000..ea32de3eda --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r2/show_ldp_binding.ref @@ -0,0 +1,63 @@ +{ + "bindings":[ + { + "addressFamily":"ipv4", + "prefix":"1.1.1.1/32", + "neighborId":"1.1.1.1", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"2.2.2.2/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"3.3.3.3/32", + "neighborId":"3.3.3.3", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"4.4.4.4/32", + "neighborId":"0.0.0.0", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.1.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.2.0/24", + "neighborId":"3.3.3.3", + "localLabel":"imp-null", + "remoteLabel":"imp-null", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.3.0/24", + "neighborId":"3.3.3.3", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"123.0.1.0/24", + "neighborId":"0.0.0.0", + "remoteLabel":"-", + "inUse":0 + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r2/show_ldp_discovery.ref b/tests/topotests/ldp-oc-acl-topo1/r2/show_ldp_discovery.ref new file mode 100644 index 0000000000..8129570082 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r2/show_ldp_discovery.ref @@ -0,0 +1,18 @@ +{ + "adjacencies":[ + { + "addressFamily":"ipv4", + "neighborId":"1.1.1.1", + "type":"link", + "interface":"r2-eth0", + "helloHoldtime":15 + }, + { + "addressFamily":"ipv4", + "neighborId":"3.3.3.3", + "type":"link", + "interface":"r2-eth1", + "helloHoldtime":15 + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r2/show_ldp_neighbor.ref b/tests/topotests/ldp-oc-acl-topo1/r2/show_ldp_neighbor.ref new file mode 100644 index 0000000000..eed35289ea --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r2/show_ldp_neighbor.ref @@ -0,0 +1,16 @@ +{ + "neighbors":[ + { + "addressFamily":"ipv4", + "neighborId":"1.1.1.1", + "state":"OPERATIONAL", + "transportAddress":"1.1.1.1" + }, + { + "addressFamily":"ipv4", + "neighborId":"3.3.3.3", + "state":"OPERATIONAL", + "transportAddress":"3.3.3.3" + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r2/zebra.conf b/tests/topotests/ldp-oc-acl-topo1/r2/zebra.conf new file mode 100644 index 0000000000..1f1e3e391a --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r2/zebra.conf @@ -0,0 +1,27 @@ +log file zebra.log +! +hostname r2 +! +interface lo + ip address 2.2.2.2/32 +! +interface r2-eth0 + description to sw0 + ip address 10.0.1.2/24 +! no link-detect +! +interface r2-eth1 + description to sw1 + ip address 10.0.2.2/24 +! no link-detect +! +interface r2-eths2 + description to sw2 + ip address 10.0.3.2/24 +! no link-detect +! +ip forwarding +! +! +line vty +! diff --git a/tests/topotests/ldp-oc-acl-topo1/r3/ldpd.conf b/tests/topotests/ldp-oc-acl-topo1/r3/ldpd.conf new file mode 100644 index 0000000000..4e66b140ac --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r3/ldpd.conf @@ -0,0 +1,24 @@ +hostname r3 +log file ldpd.log +! +debug mpls ldp zebra +debug mpls ldp event +debug mpls ldp errors +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp discovery hello recv +debug mpls ldp discovery hello sent +! +mpls ldp + router-id 3.3.3.3 + ordered-control + ! + address-family ipv4 + discovery transport-address 3.3.3.3 + ! + interface r3-eth0 + ! + ! +! +line vty +! diff --git a/tests/topotests/ldp-oc-acl-topo1/r3/ospfd.conf b/tests/topotests/ldp-oc-acl-topo1/r3/ospfd.conf new file mode 100644 index 0000000000..202be238ec --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r3/ospfd.conf @@ -0,0 +1,8 @@ +hostname r3 +password 1 +log file ospfd.log +! +router ospf + router-id 3.3.3.3 + network 0.0.0.0/0 area 0 +! diff --git a/tests/topotests/ldp-oc-acl-topo1/r3/show_ip_ospf_neighbor.json b/tests/topotests/ldp-oc-acl-topo1/r3/show_ip_ospf_neighbor.json new file mode 100644 index 0000000000..24502ed813 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r3/show_ip_ospf_neighbor.json @@ -0,0 +1,20 @@ +{ + "neighbors":{ + "2.2.2.2":[ + { + "priority":1, + "state":"Full\/DROther", + "address":"10.0.2.2", + "ifaceName":"r3-eth0:10.0.2.3" + } + ], + "4.4.4.4":[ + { + "priority":1, + "state":"Full\/DR", + "address":"10.0.2.4", + "ifaceName":"r3-eth0:10.0.2.3" + } + ] + } +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r3/show_ip_route.ref b/tests/topotests/ldp-oc-acl-topo1/r3/show_ip_route.ref new file mode 100644 index 0000000000..40800762ba --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r3/show_ip_route.ref @@ -0,0 +1,209 @@ +{ + "1.1.1.1/32":[ + { + "prefix":"1.1.1.1/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r3-eth0", + "active":true + } + ] + } + ], + "2.2.2.2/32":[ + { + "prefix":"2.2.2.2/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r3-eth0", + "active":true + } + ] + } + ], + "3.3.3.3/32":[ + { + "prefix":"3.3.3.3/32", + "protocol":"ospf", + "distance":110, + "metric":0, + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":1, + "interfaceName":"lo", + "active":true + } + ] + }, + { + "prefix":"3.3.3.3/32", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":1, + "interfaceName":"lo", + "active":true + } + ] + } + ], + "4.4.4.4/32":[ + { + "prefix":"4.4.4.4/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.4", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r3-eth0", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r3-eth0", + "active":true + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r3-eth0", + "active":true + } + ] + }, + { + "prefix":"10.0.2.0\/24", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r3-eth0", + "active":true + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":3, + "interfaceName":"r3-eth1", + "active":true + } + ] + }, + { + "prefix":"10.0.3.0\/24", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":3, + "interfaceName":"r3-eth1", + "active":true + } + ] + } + ], + "123.0.1.0\/24":[ + { + "prefix":"123.0.1.0\/24", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r3-eth0", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r3/show_ldp_all_binding.ref b/tests/topotests/ldp-oc-acl-topo1/r3/show_ldp_all_binding.ref new file mode 100644 index 0000000000..100dd307ea --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r3/show_ldp_all_binding.ref @@ -0,0 +1,61 @@ +{ + "bindings":[ + { + "addressFamily":"ipv4", + "prefix":"1.1.1.1/32", + "neighborId":"2.2.2.2", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"2.2.2.2/32", + "neighborId":"2.2.2.2", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"3.3.3.3/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"4.4.4.4/32", + "neighborId":"0.0.0.0", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.1.0/24", + "neighborId":"2.2.2.2", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.2.0/24", + "neighborId":"2.2.2.2", + "localLabel":"imp-null", + "remoteLabel":"imp-null", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.3.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"123.0.1.0/24", + "neighborId":"2.2.2.2", + "inUse":1 + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r3/show_ldp_binding.ref b/tests/topotests/ldp-oc-acl-topo1/r3/show_ldp_binding.ref new file mode 100644 index 0000000000..bb1b2b3023 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r3/show_ldp_binding.ref @@ -0,0 +1,62 @@ +{ + "bindings":[ + { + "addressFamily":"ipv4", + "prefix":"1.1.1.1/32", + "neighborId":"2.2.2.2", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"2.2.2.2/32", + "neighborId":"2.2.2.2", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"3.3.3.3/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"4.4.4.4/32", + "neighborId":"0.0.0.0", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.1.0/24", + "neighborId":"2.2.2.2", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.2.0/24", + "neighborId":"2.2.2.2", + "localLabel":"imp-null", + "remoteLabel":"imp-null", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.3.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"123.0.1.0/24", + "neighborId":"0.0.0.0", + "remoteLabel":"-", + "inUse":0 + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r3/show_ldp_discovery.ref b/tests/topotests/ldp-oc-acl-topo1/r3/show_ldp_discovery.ref new file mode 100644 index 0000000000..c3a07e7e38 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r3/show_ldp_discovery.ref @@ -0,0 +1,11 @@ +{ + "adjacencies":[ + { + "addressFamily":"ipv4", + "neighborId":"2.2.2.2", + "type":"link", + "interface":"r3-eth0", + "helloHoldtime":15 + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r3/show_ldp_neighbor.ref b/tests/topotests/ldp-oc-acl-topo1/r3/show_ldp_neighbor.ref new file mode 100644 index 0000000000..4bff444a46 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r3/show_ldp_neighbor.ref @@ -0,0 +1,10 @@ +{ + "neighbors":[ + { + "addressFamily":"ipv4", + "neighborId":"2.2.2.2", + "state":"OPERATIONAL", + "transportAddress":"2.2.2.2" + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r3/zebra.conf b/tests/topotests/ldp-oc-acl-topo1/r3/zebra.conf new file mode 100644 index 0000000000..234c215ddf --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r3/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname r3 +! +interface lo + ip address 3.3.3.3/32 +! +interface r3-eth0 + description to sw1 + ip address 10.0.2.3/24 +! no link-detect +! +interface r3-eth1 + description to sw2 + ip address 10.0.3.3/24 +! no link-detect +! +ip forwarding +! +! +line vty +! diff --git a/tests/topotests/ldp-oc-acl-topo1/r4/ldpd.conf b/tests/topotests/ldp-oc-acl-topo1/r4/ldpd.conf new file mode 100644 index 0000000000..6b7d28f983 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r4/ldpd.conf @@ -0,0 +1,24 @@ +hostname r4 +log file ldpd.log +! +debug mpls ldp zebra +debug mpls ldp event +debug mpls ldp errors +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp discovery hello recv +debug mpls ldp discovery hello sent +! +mpls ldp + router-id 4.4.4.4 + ordered-control + ! + address-family ipv4 + discovery transport-address 4.4.4.4 + ! + !interface r4-eth0 + ! + ! +! +line vty +! diff --git a/tests/topotests/ldp-oc-acl-topo1/r4/ospfd.conf b/tests/topotests/ldp-oc-acl-topo1/r4/ospfd.conf new file mode 100644 index 0000000000..569dbc54e2 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r4/ospfd.conf @@ -0,0 +1,7 @@ +hostname r4 +log file ospfd.log +! +router ospf + router-id 4.4.4.4 + network 0.0.0.0/0 area 0 +! diff --git a/tests/topotests/ldp-oc-acl-topo1/r4/show_ip_ospf_neighbor.json b/tests/topotests/ldp-oc-acl-topo1/r4/show_ip_ospf_neighbor.json new file mode 100644 index 0000000000..794410522d --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r4/show_ip_ospf_neighbor.json @@ -0,0 +1,21 @@ + +{ + "neighbors":{ + "2.2.2.2":[ + { + "priority":1, + "state":"Full\/DROther", + "address":"10.0.2.2", + "ifaceName":"r4-eth0:10.0.2.4" + } + ], + "3.3.3.3":[ + { + "priority":1, + "state":"Full\/Backup", + "address":"10.0.2.3", + "ifaceName":"r4-eth0:10.0.2.4" + } + ] + } +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r4/show_ip_route.ref b/tests/topotests/ldp-oc-acl-topo1/r4/show_ip_route.ref new file mode 100644 index 0000000000..c9b83a1c73 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r4/show_ip_route.ref @@ -0,0 +1,196 @@ +{ + "1.1.1.1/32":[ + { + "prefix":"1.1.1.1/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r4-eth0", + "active":true + } + ] + } + ], + "2.2.2.2/32":[ + { + "prefix":"2.2.2.2/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r4-eth0", + "active":true + } + ] + } + ], + "3.3.3.3/32":[ + { + "prefix":"3.3.3.3/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r4-eth0", + "active":true + } + ] + } + ], + "4.4.4.4/32":[ + { + "prefix":"4.4.4.4/32", + "protocol":"ospf", + "distance":110, + "metric":0, + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":1, + "interfaceName":"lo", + "active":true + } + ] + }, + { + "prefix":"4.4.4.4/32", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":1, + "interfaceName":"lo", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r4-eth0", + "active":true + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r4-eth0", + "active":true + } + ] + }, + { + "prefix":"10.0.2.0\/24", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r4-eth0", + "active":true + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r4-eth0", + "active":true + } + ] + } + ], + "123.0.1.0\/24":[ + { + "prefix":"123.0.1.0\/24", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r4-eth0", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r4/show_ldp_all_binding.ref b/tests/topotests/ldp-oc-acl-topo1/r4/show_ldp_all_binding.ref new file mode 100644 index 0000000000..2a46c40346 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r4/show_ldp_all_binding.ref @@ -0,0 +1,68 @@ +{ + "bindings":[ + { + "addressFamily":"ipv4", + "prefix":"1.1.1.1/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"2.2.2.2/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"3.3.3.3/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"4.4.4.4/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.1.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.2.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.3.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"123.0.1.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r4/show_ldp_binding.ref b/tests/topotests/ldp-oc-acl-topo1/r4/show_ldp_binding.ref new file mode 100644 index 0000000000..2a46c40346 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r4/show_ldp_binding.ref @@ -0,0 +1,68 @@ +{ + "bindings":[ + { + "addressFamily":"ipv4", + "prefix":"1.1.1.1/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"2.2.2.2/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"3.3.3.3/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"4.4.4.4/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.1.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.2.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.3.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"123.0.1.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + } + ] +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r4/show_ldp_discovery.ref b/tests/topotests/ldp-oc-acl-topo1/r4/show_ldp_discovery.ref new file mode 100644 index 0000000000..2c63c08510 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r4/show_ldp_discovery.ref @@ -0,0 +1,2 @@ +{ +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r4/show_ldp_neighbor.ref b/tests/topotests/ldp-oc-acl-topo1/r4/show_ldp_neighbor.ref new file mode 100644 index 0000000000..2c63c08510 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r4/show_ldp_neighbor.ref @@ -0,0 +1,2 @@ +{ +} diff --git a/tests/topotests/ldp-oc-acl-topo1/r4/zebra.conf b/tests/topotests/ldp-oc-acl-topo1/r4/zebra.conf new file mode 100644 index 0000000000..7e291053e5 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/r4/zebra.conf @@ -0,0 +1,17 @@ +log file zebra.log +! +hostname r4 +! +interface lo + ip address 4.4.4.4/32 +! +interface r4-eth0 + description to sw1 + ip address 10.0.2.4/24 +! no link-detect +! +ip forwarding +! +! +line vty +! diff --git a/tests/topotests/ldp-oc-acl-topo1/test_ldp_oc_acl_topo1.dot b/tests/topotests/ldp-oc-acl-topo1/test_ldp_oc_acl_topo1.dot new file mode 100644 index 0000000000..62058e3cb1 --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/test_ldp_oc_acl_topo1.dot @@ -0,0 +1,76 @@ +## Color coding: +######################### +## Main FRR: #f08080 red +## Switches: #d0e0d0 gray +## RIP: #19e3d9 Cyan +## RIPng: #fcb314 dark yellow +## OSPFv2: #32b835 Green +## OSPFv3: #19e3d9 Cyan +## ISIS IPv4 #fcb314 dark yellow +## ISIS IPv6 #9a81ec purple +## BGP IPv4 #eee3d3 beige +## BGP IPv6 #fdff00 yellow +##### Colors (see http://www.color-hex.com/) + +graph template { + label="Test Topology - LDP-OC 1"; + + # Routers + r1 [ + shape=doubleoctagon, + label="r1", + fillcolor="#f08080", + style=filled, + ]; + r2 [ + shape=doubleoctagon + label="r2", + fillcolor="#f08080", + style=filled, + ]; + r3 [ + shape=doubleoctagon + label="r3", + fillcolor="#f08080", + style=filled, + ]; + r4 [ + shape=doubleoctagon + label="r4", + fillcolor="#f08080", + style=filled, + ]; + + + # Switches + s0 [ + shape=oval, + label="10.0.1.0/24", + fillcolor="#d0e0d0", + style=filled, + ]; + s1 [ + shape=oval, + label="10.0.2.0/24", + fillcolor="#d0e0d0", + style=filled, + ]; + s2 [ + shape=oval, + label="10.0.3.0/24", + fillcolor="#d0e0d0", + style=filled, + ]; + + # Connections + + r1 -- s0 [label="eth0"]; + r2 -- s0 [label="eth0"]; + + r2 -- s1 [label="eth1"]; + r3 -- s1 [label="eth0"]; + r4 -- s1 [label="eth0"]; + + r2 -- s2 [label="eth2"]; + r3 -- s2 [label="eth1"]; +} diff --git a/tests/topotests/ldp-oc-acl-topo1/test_ldp_oc_acl_topo1.py b/tests/topotests/ldp-oc-acl-topo1/test_ldp_oc_acl_topo1.py new file mode 100755 index 0000000000..450d35e16c --- /dev/null +++ b/tests/topotests/ldp-oc-acl-topo1/test_ldp_oc_acl_topo1.py @@ -0,0 +1,261 @@ +#!/usr/bin/env python + +# +# test_ldp_oc_acl_topo1.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2020 by Volta Networks +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_ldp_oc_acl_topo1.py: Simple FRR/Quagga LDP Test + + +---------+ + | r1 | + | 1.1.1.1 | + +----+----+ + | .1 r1-eth0 + | + ~~~~~~~~~~~~~ + ~~ sw0 ~~ + ~~ 10.0.1.0/24 ~~ + ~~~~~~~~~~~~~ + |10.0.1.0/24 + | + | .2 r2-eth0 + +----+----+ + | r2 | + | 2.2.2.2 | + +--+---+--+ + r2-eth2 .2 | | .2 r2-eth1 + ______/ \______ + / \ + ~~~~~~~~~~~~~ ~~~~~~~~~~~~~ +~~ sw2 ~~ ~~ sw1 ~~ +~~ 10.0.3.0/24 ~~ ~~ 10.0.2.0/24 ~~ + ~~~~~~~~~~~~~ ~~~~~~~~~~~~~ + | / | + \ _________/ | + \ / \ +r3-eth1 .3 | | .3 r3-eth0 | .4 r4-eth0 + +----+--+---+ +----+----+ + | r3 | | r4 | + | 3.3.3.3 | | 4.4.4.4 | + +-----------+ +---------+ +""" + +import os +import sys +import pytest +import json +from time import sleep +from functools import partial + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + + +class TemplateTopo(Topo): + "Test topology builder" + + def build(self, *_args, **_opts): + "Build function" + tgen = get_topogen(self) + + # + # Define FRR Routers + # + for router in ["r1", "r2", "r3", "r4"]: + tgen.add_router(router) + + # + # Define connections + # + switch = tgen.add_switch("s0") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r4"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) + + +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(TemplateTopo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + # For all registered routers, load the zebra configuration file + for rname, router in router_list.iteritems(): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + # Don't start ospfd and ldpd in the CE nodes + if router.name[0] == "r": + router.load_config( + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + + # This function tears down the whole topology. + tgen.stop_topology() + + +def router_compare_json_output(rname, command, reference): + "Compare router JSON output" + + logger.info('Comparing router "%s" "%s" output', rname, command) + + tgen = get_topogen() + filename = "{}/{}/{}".format(CWD, rname, reference) + expected = json.loads(open(filename).read()) + + # Run test function until we get an result. Wait at most 80 seconds. + test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected) + _, diff = topotest.run_and_expect(test_func, None, count=160, wait=0.5) + + assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) + assert diff is None, assertmsg + + +def test_ospf_convergence(): + logger.info("Test: check OSPF adjacencies") + + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json" + ) + + +def test_rib(): + logger.info("Test: verify RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output(rname, "show ip route json", "show_ip_route.ref") + + +def test_ldp_adjacencies(): + logger.info("Test: verify LDP adjacencies") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show mpls ldp discovery json", "show_ldp_discovery.ref" + ) + + +def test_ldp_neighbors(): + logger.info("Test: verify LDP neighbors") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref" + ) + + +def test_ldp_bindings(): + logger.info("Test: verify LDP bindings") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show mpls ldp binding json", "show_ldp_binding.ref" + ) + + +def test_ldp_bindings_all_routes(): + logger.info("Test: verify LDP bindings after host filter removed") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # remove ACL that blocks advertising everything but host routes */ + cmd = 'vtysh -c "configure terminal" -c "mpls ldp" -c "address-family ipv4" -c "no label local allocate host-routes"' + tgen.net["r1"].cmd(cmd) + sleep(2) + + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show mpls ldp binding json", "show_ldp_all_binding.ref" + ) + + +# Memory leak test template +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/ldp-oc-topo1/r1/ldpd.conf b/tests/topotests/ldp-oc-topo1/r1/ldpd.conf new file mode 100644 index 0000000000..2a8e023832 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r1/ldpd.conf @@ -0,0 +1,24 @@ +hostname r1 +log file ldpd.log +! +debug mpls ldp zebra +debug mpls ldp event +debug mpls ldp errors +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp discovery hello recv +debug mpls ldp discovery hello sent +! +mpls ldp + router-id 1.1.1.1 + ordered-control + ! + address-family ipv4 + discovery transport-address 1.1.1.1 + ! + interface r1-eth0 + ! + ! +! +line vty +! diff --git a/tests/topotests/ldp-oc-topo1/r1/ospfd.conf b/tests/topotests/ldp-oc-topo1/r1/ospfd.conf new file mode 100644 index 0000000000..6daf034d18 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r1/ospfd.conf @@ -0,0 +1,7 @@ +hostname r1 +log file ospfd.log +! +router ospf + router-id 1.1.1.1 + network 0.0.0.0/0 area 0 +! diff --git a/tests/topotests/ldp-oc-topo1/r1/show_ip_ospf_neighbor.json b/tests/topotests/ldp-oc-topo1/r1/show_ip_ospf_neighbor.json new file mode 100644 index 0000000000..2c493173f5 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r1/show_ip_ospf_neighbor.json @@ -0,0 +1,12 @@ +{ + "neighbors":{ + "2.2.2.2":[ + { + "priority":1, + "state":"Full\/DR", + "address":"10.0.1.2", + "ifaceName":"r1-eth0:10.0.1.1" + } + ] + } +} diff --git a/tests/topotests/ldp-oc-topo1/r1/show_ip_route.ref b/tests/topotests/ldp-oc-topo1/r1/show_ip_route.ref new file mode 100644 index 0000000000..d75b8f21db --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r1/show_ip_route.ref @@ -0,0 +1,171 @@ +{ + "1.1.1.1/32":[ + { + "prefix":"1.1.1.1/32", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":1, + "interfaceName":"lo", + "active":true + } + ] + }, + { + "prefix":"1.1.1.1/32", + "protocol":"connected", + "selected":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":1, + "interfaceName":"lo", + "active":true + } + ] + } + ], + "2.2.2.2/32":[ + { + "prefix":"2.2.2.2/32", + "protocol":"ospf", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + } + ], + "3.3.3.3/32":[ + { + "prefix":"3.3.3.3/32", + "protocol":"ospf", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + } + ], + "4.4.4.4/32":[ + { + "prefix":"4.4.4.4/32", + "protocol":"ospf", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + } + ], + "10.0.1.0/24":[ + { + "prefix":"10.0.1.0/24", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + }, + { + "prefix":"10.0.1.0/24", + "protocol":"connected", + "selected":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + } + ], + "10.0.2.0/24":[ + { + "prefix":"10.0.2.0/24", + "protocol":"ospf", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + } + ], + "10.0.3.0/24":[ + { + "prefix":"10.0.3.0/24", + "protocol":"ospf", + "selected":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + } + ], + "123.0.1.0/24":[ + { + "prefix":"123.0.1.0/24", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + }, + { + "prefix":"123.0.1.0/24", + "protocol":"connected", + "selected":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r1-eth0", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/ldp-oc-topo1/r1/show_ldp_binding.ref b/tests/topotests/ldp-oc-topo1/r1/show_ldp_binding.ref new file mode 100644 index 0000000000..99a59668f8 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r1/show_ldp_binding.ref @@ -0,0 +1,61 @@ +{ + "bindings":[ + { + "addressFamily":"ipv4", + "prefix":"1.1.1.1/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"2.2.2.2/32", + "neighborId":"2.2.2.2", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"3.3.3.3/32", + "neighborId":"2.2.2.2", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"4.4.4.4/32", + "neighborId":"0.0.0.0", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.1.0/24", + "neighborId":"2.2.2.2", + "localLabel":"imp-null", + "remoteLabel":"imp-null", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.2.0/24", + "neighborId":"2.2.2.2", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.3.0/24", + "neighborId":"2.2.2.2", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"123.0.1.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + } + ] +} diff --git a/tests/topotests/ldp-oc-topo1/r1/show_ldp_discovery.ref b/tests/topotests/ldp-oc-topo1/r1/show_ldp_discovery.ref new file mode 100644 index 0000000000..b349f4418f --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r1/show_ldp_discovery.ref @@ -0,0 +1,11 @@ +{ + "adjacencies":[ + { + "addressFamily":"ipv4", + "neighborId":"2.2.2.2", + "type":"link", + "interface":"r1-eth0", + "helloHoldtime":15 + } + ] +} diff --git a/tests/topotests/ldp-oc-topo1/r1/show_ldp_neighbor.ref b/tests/topotests/ldp-oc-topo1/r1/show_ldp_neighbor.ref new file mode 100644 index 0000000000..4bff444a46 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r1/show_ldp_neighbor.ref @@ -0,0 +1,10 @@ +{ + "neighbors":[ + { + "addressFamily":"ipv4", + "neighborId":"2.2.2.2", + "state":"OPERATIONAL", + "transportAddress":"2.2.2.2" + } + ] +} diff --git a/tests/topotests/ldp-oc-topo1/r1/zebra.conf b/tests/topotests/ldp-oc-topo1/r1/zebra.conf new file mode 100644 index 0000000000..83aea46e64 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r1/zebra.conf @@ -0,0 +1,17 @@ +log file zebra.log +! +hostname r1 +! +interface lo + ip address 1.1.1.1/32 +! +interface r1-eth0 + description to sw0 + ip address 10.0.1.1/24 + ip address 123.0.1.1/24 +! +ip forwarding +! +! +line vty +! diff --git a/tests/topotests/ldp-oc-topo1/r2/ldpd.conf b/tests/topotests/ldp-oc-topo1/r2/ldpd.conf new file mode 100644 index 0000000000..e1a552c701 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r2/ldpd.conf @@ -0,0 +1,28 @@ +hostname r2 +log file ldpd.log +! +debug mpls ldp zebra +debug mpls ldp event +debug mpls ldp errors +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp discovery hello recv +debug mpls ldp discovery hello sent +! +mpls ldp + router-id 2.2.2.2 + ordered-control + ! + address-family ipv4 + discovery transport-address 2.2.2.2 + ! + interface r2-eth0 + ! + interface r2-eth1 + ! + interface r2-eth2 + ! + ! +! +line vty +! diff --git a/tests/topotests/ldp-oc-topo1/r2/ospfd.conf b/tests/topotests/ldp-oc-topo1/r2/ospfd.conf new file mode 100644 index 0000000000..8678813665 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r2/ospfd.conf @@ -0,0 +1,7 @@ +hostname r2 +log file ospfd.log +! +router ospf + router-id 2.2.2.2 + network 0.0.0.0/0 area 0 +! diff --git a/tests/topotests/ldp-oc-topo1/r2/show_ip_ospf_neighbor.json b/tests/topotests/ldp-oc-topo1/r2/show_ip_ospf_neighbor.json new file mode 100644 index 0000000000..55f12359e5 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r2/show_ip_ospf_neighbor.json @@ -0,0 +1,31 @@ +{ + "neighbors":{ + "1.1.1.1":[ + { + "priority":1, + "state":"Full\/Backup", + "address":"10.0.1.1", + "ifaceName":"r2-eth0:10.0.1.2", + "retransmitCounter":0, + "requestCounter":0, + "dbSummaryCounter":0 + } + ], + "3.3.3.3":[ + { + "priority":1, + "state":"Full\/Backup", + "address":"10.0.2.3", + "ifaceName":"r2-eth1:10.0.2.2" + } + ], + "4.4.4.4":[ + { + "priority":1, + "state":"Full\/DR", + "address":"10.0.2.4", + "ifaceName":"r2-eth1:10.0.2.2" + } + ] + } +} diff --git a/tests/topotests/ldp-oc-topo1/r2/show_ip_route.ref b/tests/topotests/ldp-oc-topo1/r2/show_ip_route.ref new file mode 100644 index 0000000000..060c0b429d --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r2/show_ip_route.ref @@ -0,0 +1,209 @@ +{ + "1.1.1.1/32":[ + { + "prefix":"1.1.1.1/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.1", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r2-eth0", + "active":true + } + ] + } + ], + "2.2.2.2/32":[ + { + "prefix":"2.2.2.2/32", + "protocol":"ospf", + "distance":110, + "metric":0, + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":1, + "interfaceName":"lo", + "active":true + } + ] + }, + { + "prefix":"2.2.2.2/32", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":1, + "interfaceName":"lo", + "active":true + } + ] + } + ], + "3.3.3.3/32":[ + { + "prefix":"3.3.3.3/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceIndex":3, + "interfaceName":"r2-eth1", + "active":true + } + ] + } + ], + "4.4.4.4/32":[ + { + "prefix":"4.4.4.4/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.4", + "afi":"ipv4", + "interfaceIndex":3, + "interfaceName":"r2-eth1", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r2-eth0", + "active":true + } + ] + }, + { + "prefix":"10.0.1.0\/24", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r2-eth0", + "active":true + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":3, + "interfaceName":"r2-eth1", + "active":true + } + ] + }, + { + "prefix":"10.0.2.0\/24", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":3, + "interfaceName":"r2-eth1", + "active":true + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceIndex":3, + "interfaceName":"r2-eth1", + "active":true + } + ] + } + ], + "123.0.1.0\/24":[ + { + "prefix":"123.0.1.0\/24", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.1", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r2-eth0", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/ldp-oc-topo1/r2/show_ldp_binding.ref b/tests/topotests/ldp-oc-topo1/r2/show_ldp_binding.ref new file mode 100644 index 0000000000..95fb847c1e --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r2/show_ldp_binding.ref @@ -0,0 +1,63 @@ +{ + "bindings":[ + { + "addressFamily":"ipv4", + "prefix":"1.1.1.1/32", + "neighborId":"1.1.1.1", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"2.2.2.2/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"3.3.3.3/32", + "neighborId":"3.3.3.3", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"4.4.4.4/32", + "neighborId":"0.0.0.0", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.1.0/24", + "neighborId":"1.1.1.1", + "localLabel":"imp-null", + "remoteLabel":"imp-null", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.2.0/24", + "neighborId":"3.3.3.3", + "localLabel":"imp-null", + "remoteLabel":"imp-null", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.3.0/24", + "neighborId":"3.3.3.3", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"123.0.1.0/24", + "neighborId":"1.1.1.1", + "remoteLabel":"imp-null", + "inUse":1 + } + ] +} diff --git a/tests/topotests/ldp-oc-topo1/r2/show_ldp_discovery.ref b/tests/topotests/ldp-oc-topo1/r2/show_ldp_discovery.ref new file mode 100644 index 0000000000..8129570082 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r2/show_ldp_discovery.ref @@ -0,0 +1,18 @@ +{ + "adjacencies":[ + { + "addressFamily":"ipv4", + "neighborId":"1.1.1.1", + "type":"link", + "interface":"r2-eth0", + "helloHoldtime":15 + }, + { + "addressFamily":"ipv4", + "neighborId":"3.3.3.3", + "type":"link", + "interface":"r2-eth1", + "helloHoldtime":15 + } + ] +} diff --git a/tests/topotests/ldp-oc-topo1/r2/show_ldp_neighbor.ref b/tests/topotests/ldp-oc-topo1/r2/show_ldp_neighbor.ref new file mode 100644 index 0000000000..eed35289ea --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r2/show_ldp_neighbor.ref @@ -0,0 +1,16 @@ +{ + "neighbors":[ + { + "addressFamily":"ipv4", + "neighborId":"1.1.1.1", + "state":"OPERATIONAL", + "transportAddress":"1.1.1.1" + }, + { + "addressFamily":"ipv4", + "neighborId":"3.3.3.3", + "state":"OPERATIONAL", + "transportAddress":"3.3.3.3" + } + ] +} diff --git a/tests/topotests/ldp-oc-topo1/r2/zebra.conf b/tests/topotests/ldp-oc-topo1/r2/zebra.conf new file mode 100644 index 0000000000..1f1e3e391a --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r2/zebra.conf @@ -0,0 +1,27 @@ +log file zebra.log +! +hostname r2 +! +interface lo + ip address 2.2.2.2/32 +! +interface r2-eth0 + description to sw0 + ip address 10.0.1.2/24 +! no link-detect +! +interface r2-eth1 + description to sw1 + ip address 10.0.2.2/24 +! no link-detect +! +interface r2-eths2 + description to sw2 + ip address 10.0.3.2/24 +! no link-detect +! +ip forwarding +! +! +line vty +! diff --git a/tests/topotests/ldp-oc-topo1/r3/ldpd.conf b/tests/topotests/ldp-oc-topo1/r3/ldpd.conf new file mode 100644 index 0000000000..4e66b140ac --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r3/ldpd.conf @@ -0,0 +1,24 @@ +hostname r3 +log file ldpd.log +! +debug mpls ldp zebra +debug mpls ldp event +debug mpls ldp errors +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp discovery hello recv +debug mpls ldp discovery hello sent +! +mpls ldp + router-id 3.3.3.3 + ordered-control + ! + address-family ipv4 + discovery transport-address 3.3.3.3 + ! + interface r3-eth0 + ! + ! +! +line vty +! diff --git a/tests/topotests/ldp-oc-topo1/r3/ospfd.conf b/tests/topotests/ldp-oc-topo1/r3/ospfd.conf new file mode 100644 index 0000000000..202be238ec --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r3/ospfd.conf @@ -0,0 +1,8 @@ +hostname r3 +password 1 +log file ospfd.log +! +router ospf + router-id 3.3.3.3 + network 0.0.0.0/0 area 0 +! diff --git a/tests/topotests/ldp-oc-topo1/r3/show_ip_ospf_neighbor.json b/tests/topotests/ldp-oc-topo1/r3/show_ip_ospf_neighbor.json new file mode 100644 index 0000000000..24502ed813 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r3/show_ip_ospf_neighbor.json @@ -0,0 +1,20 @@ +{ + "neighbors":{ + "2.2.2.2":[ + { + "priority":1, + "state":"Full\/DROther", + "address":"10.0.2.2", + "ifaceName":"r3-eth0:10.0.2.3" + } + ], + "4.4.4.4":[ + { + "priority":1, + "state":"Full\/DR", + "address":"10.0.2.4", + "ifaceName":"r3-eth0:10.0.2.3" + } + ] + } +} diff --git a/tests/topotests/ldp-oc-topo1/r3/show_ip_route.ref b/tests/topotests/ldp-oc-topo1/r3/show_ip_route.ref new file mode 100644 index 0000000000..40800762ba --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r3/show_ip_route.ref @@ -0,0 +1,209 @@ +{ + "1.1.1.1/32":[ + { + "prefix":"1.1.1.1/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r3-eth0", + "active":true + } + ] + } + ], + "2.2.2.2/32":[ + { + "prefix":"2.2.2.2/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r3-eth0", + "active":true + } + ] + } + ], + "3.3.3.3/32":[ + { + "prefix":"3.3.3.3/32", + "protocol":"ospf", + "distance":110, + "metric":0, + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":1, + "interfaceName":"lo", + "active":true + } + ] + }, + { + "prefix":"3.3.3.3/32", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":1, + "interfaceName":"lo", + "active":true + } + ] + } + ], + "4.4.4.4/32":[ + { + "prefix":"4.4.4.4/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.4", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r3-eth0", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r3-eth0", + "active":true + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r3-eth0", + "active":true + } + ] + }, + { + "prefix":"10.0.2.0\/24", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r3-eth0", + "active":true + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":3, + "interfaceName":"r3-eth1", + "active":true + } + ] + }, + { + "prefix":"10.0.3.0\/24", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":3, + "interfaceName":"r3-eth1", + "active":true + } + ] + } + ], + "123.0.1.0\/24":[ + { + "prefix":"123.0.1.0\/24", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r3-eth0", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/ldp-oc-topo1/r3/show_ldp_binding.ref b/tests/topotests/ldp-oc-topo1/r3/show_ldp_binding.ref new file mode 100644 index 0000000000..100dd307ea --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r3/show_ldp_binding.ref @@ -0,0 +1,61 @@ +{ + "bindings":[ + { + "addressFamily":"ipv4", + "prefix":"1.1.1.1/32", + "neighborId":"2.2.2.2", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"2.2.2.2/32", + "neighborId":"2.2.2.2", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"3.3.3.3/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"4.4.4.4/32", + "neighborId":"0.0.0.0", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.1.0/24", + "neighborId":"2.2.2.2", + "remoteLabel":"imp-null", + "inUse":1 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.2.0/24", + "neighborId":"2.2.2.2", + "localLabel":"imp-null", + "remoteLabel":"imp-null", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.3.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"123.0.1.0/24", + "neighborId":"2.2.2.2", + "inUse":1 + } + ] +} diff --git a/tests/topotests/ldp-oc-topo1/r3/show_ldp_discovery.ref b/tests/topotests/ldp-oc-topo1/r3/show_ldp_discovery.ref new file mode 100644 index 0000000000..c3a07e7e38 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r3/show_ldp_discovery.ref @@ -0,0 +1,11 @@ +{ + "adjacencies":[ + { + "addressFamily":"ipv4", + "neighborId":"2.2.2.2", + "type":"link", + "interface":"r3-eth0", + "helloHoldtime":15 + } + ] +} diff --git a/tests/topotests/ldp-oc-topo1/r3/show_ldp_neighbor.ref b/tests/topotests/ldp-oc-topo1/r3/show_ldp_neighbor.ref new file mode 100644 index 0000000000..4bff444a46 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r3/show_ldp_neighbor.ref @@ -0,0 +1,10 @@ +{ + "neighbors":[ + { + "addressFamily":"ipv4", + "neighborId":"2.2.2.2", + "state":"OPERATIONAL", + "transportAddress":"2.2.2.2" + } + ] +} diff --git a/tests/topotests/ldp-oc-topo1/r3/zebra.conf b/tests/topotests/ldp-oc-topo1/r3/zebra.conf new file mode 100644 index 0000000000..234c215ddf --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r3/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname r3 +! +interface lo + ip address 3.3.3.3/32 +! +interface r3-eth0 + description to sw1 + ip address 10.0.2.3/24 +! no link-detect +! +interface r3-eth1 + description to sw2 + ip address 10.0.3.3/24 +! no link-detect +! +ip forwarding +! +! +line vty +! diff --git a/tests/topotests/ldp-oc-topo1/r4/ldpd.conf b/tests/topotests/ldp-oc-topo1/r4/ldpd.conf new file mode 100644 index 0000000000..6b7d28f983 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r4/ldpd.conf @@ -0,0 +1,24 @@ +hostname r4 +log file ldpd.log +! +debug mpls ldp zebra +debug mpls ldp event +debug mpls ldp errors +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp discovery hello recv +debug mpls ldp discovery hello sent +! +mpls ldp + router-id 4.4.4.4 + ordered-control + ! + address-family ipv4 + discovery transport-address 4.4.4.4 + ! + !interface r4-eth0 + ! + ! +! +line vty +! diff --git a/tests/topotests/ldp-oc-topo1/r4/ospfd.conf b/tests/topotests/ldp-oc-topo1/r4/ospfd.conf new file mode 100644 index 0000000000..569dbc54e2 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r4/ospfd.conf @@ -0,0 +1,7 @@ +hostname r4 +log file ospfd.log +! +router ospf + router-id 4.4.4.4 + network 0.0.0.0/0 area 0 +! diff --git a/tests/topotests/ldp-oc-topo1/r4/show_ip_ospf_neighbor.json b/tests/topotests/ldp-oc-topo1/r4/show_ip_ospf_neighbor.json new file mode 100644 index 0000000000..794410522d --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r4/show_ip_ospf_neighbor.json @@ -0,0 +1,21 @@ + +{ + "neighbors":{ + "2.2.2.2":[ + { + "priority":1, + "state":"Full\/DROther", + "address":"10.0.2.2", + "ifaceName":"r4-eth0:10.0.2.4" + } + ], + "3.3.3.3":[ + { + "priority":1, + "state":"Full\/Backup", + "address":"10.0.2.3", + "ifaceName":"r4-eth0:10.0.2.4" + } + ] + } +} diff --git a/tests/topotests/ldp-oc-topo1/r4/show_ip_route.ref b/tests/topotests/ldp-oc-topo1/r4/show_ip_route.ref new file mode 100644 index 0000000000..c9b83a1c73 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r4/show_ip_route.ref @@ -0,0 +1,196 @@ +{ + "1.1.1.1/32":[ + { + "prefix":"1.1.1.1/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r4-eth0", + "active":true + } + ] + } + ], + "2.2.2.2/32":[ + { + "prefix":"2.2.2.2/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r4-eth0", + "active":true + } + ] + } + ], + "3.3.3.3/32":[ + { + "prefix":"3.3.3.3/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r4-eth0", + "active":true + } + ] + } + ], + "4.4.4.4/32":[ + { + "prefix":"4.4.4.4/32", + "protocol":"ospf", + "distance":110, + "metric":0, + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":1, + "interfaceName":"lo", + "active":true + } + ] + }, + { + "prefix":"4.4.4.4/32", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":1, + "interfaceName":"lo", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r4-eth0", + "active":true + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r4-eth0", + "active":true + } + ] + }, + { + "prefix":"10.0.2.0\/24", + "protocol":"connected", + "selected":true, + "destSelected":true, + "distance":0, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceIndex":2, + "interfaceName":"r4-eth0", + "active":true + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r4-eth0", + "active":true + } + ] + } + ], + "123.0.1.0\/24":[ + { + "prefix":"123.0.1.0\/24", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "distance":110, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"r4-eth0", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/ldp-oc-topo1/r4/show_ldp_binding.ref b/tests/topotests/ldp-oc-topo1/r4/show_ldp_binding.ref new file mode 100644 index 0000000000..2a46c40346 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r4/show_ldp_binding.ref @@ -0,0 +1,68 @@ +{ + "bindings":[ + { + "addressFamily":"ipv4", + "prefix":"1.1.1.1/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"2.2.2.2/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"3.3.3.3/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"4.4.4.4/32", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.1.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.2.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"10.0.3.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + }, + { + "addressFamily":"ipv4", + "prefix":"123.0.1.0/24", + "neighborId":"0.0.0.0", + "localLabel":"imp-null", + "remoteLabel":"-", + "inUse":0 + } + ] +} diff --git a/tests/topotests/ldp-oc-topo1/r4/show_ldp_discovery.ref b/tests/topotests/ldp-oc-topo1/r4/show_ldp_discovery.ref new file mode 100644 index 0000000000..2c63c08510 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r4/show_ldp_discovery.ref @@ -0,0 +1,2 @@ +{ +} diff --git a/tests/topotests/ldp-oc-topo1/r4/show_ldp_neighbor.ref b/tests/topotests/ldp-oc-topo1/r4/show_ldp_neighbor.ref new file mode 100644 index 0000000000..2c63c08510 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r4/show_ldp_neighbor.ref @@ -0,0 +1,2 @@ +{ +} diff --git a/tests/topotests/ldp-oc-topo1/r4/zebra.conf b/tests/topotests/ldp-oc-topo1/r4/zebra.conf new file mode 100644 index 0000000000..7e291053e5 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/r4/zebra.conf @@ -0,0 +1,17 @@ +log file zebra.log +! +hostname r4 +! +interface lo + ip address 4.4.4.4/32 +! +interface r4-eth0 + description to sw1 + ip address 10.0.2.4/24 +! no link-detect +! +ip forwarding +! +! +line vty +! diff --git a/tests/topotests/ldp-oc-topo1/test_ldp_oc_topo1.dot b/tests/topotests/ldp-oc-topo1/test_ldp_oc_topo1.dot new file mode 100644 index 0000000000..62058e3cb1 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/test_ldp_oc_topo1.dot @@ -0,0 +1,76 @@ +## Color coding: +######################### +## Main FRR: #f08080 red +## Switches: #d0e0d0 gray +## RIP: #19e3d9 Cyan +## RIPng: #fcb314 dark yellow +## OSPFv2: #32b835 Green +## OSPFv3: #19e3d9 Cyan +## ISIS IPv4 #fcb314 dark yellow +## ISIS IPv6 #9a81ec purple +## BGP IPv4 #eee3d3 beige +## BGP IPv6 #fdff00 yellow +##### Colors (see http://www.color-hex.com/) + +graph template { + label="Test Topology - LDP-OC 1"; + + # Routers + r1 [ + shape=doubleoctagon, + label="r1", + fillcolor="#f08080", + style=filled, + ]; + r2 [ + shape=doubleoctagon + label="r2", + fillcolor="#f08080", + style=filled, + ]; + r3 [ + shape=doubleoctagon + label="r3", + fillcolor="#f08080", + style=filled, + ]; + r4 [ + shape=doubleoctagon + label="r4", + fillcolor="#f08080", + style=filled, + ]; + + + # Switches + s0 [ + shape=oval, + label="10.0.1.0/24", + fillcolor="#d0e0d0", + style=filled, + ]; + s1 [ + shape=oval, + label="10.0.2.0/24", + fillcolor="#d0e0d0", + style=filled, + ]; + s2 [ + shape=oval, + label="10.0.3.0/24", + fillcolor="#d0e0d0", + style=filled, + ]; + + # Connections + + r1 -- s0 [label="eth0"]; + r2 -- s0 [label="eth0"]; + + r2 -- s1 [label="eth1"]; + r3 -- s1 [label="eth0"]; + r4 -- s1 [label="eth0"]; + + r2 -- s2 [label="eth2"]; + r3 -- s2 [label="eth1"]; +} diff --git a/tests/topotests/ldp-oc-topo1/test_ldp_oc_topo1.py b/tests/topotests/ldp-oc-topo1/test_ldp_oc_topo1.py new file mode 100755 index 0000000000..ac99eb1a26 --- /dev/null +++ b/tests/topotests/ldp-oc-topo1/test_ldp_oc_topo1.py @@ -0,0 +1,242 @@ +#!/usr/bin/env python + +# +# test_ldp_oc_topo1.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2020 by by Volta Networks +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_ldp_oc_topo1.py: Simple FRR/Quagga LDP Test + + +---------+ + | r1 | + | 1.1.1.1 | + +----+----+ + | .1 r1-eth0 + | + ~~~~~~~~~~~~~ + ~~ sw0 ~~ + ~~ 10.0.1.0/24 ~~ + ~~~~~~~~~~~~~ + |10.0.1.0/24 + | + | .2 r2-eth0 + +----+----+ + | r2 | + | 2.2.2.2 | + +--+---+--+ + r2-eth2 .2 | | .2 r2-eth1 + ______/ \______ + / \ + ~~~~~~~~~~~~~ ~~~~~~~~~~~~~ +~~ sw2 ~~ ~~ sw1 ~~ +~~ 10.0.3.0/24 ~~ ~~ 10.0.2.0/24 ~~ + ~~~~~~~~~~~~~ ~~~~~~~~~~~~~ + | / | + \ _________/ | + \ / \ +r3-eth1 .3 | | .3 r3-eth0 | .4 r4-eth0 + +----+--+---+ +----+----+ + | r3 | | r4 | + | 3.3.3.3 | | 4.4.4.4 | + +-----------+ +---------+ +""" + +import os +import sys +import pytest +import json +from time import sleep +from functools import partial + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + + +class TemplateTopo(Topo): + "Test topology builder" + + def build(self, *_args, **_opts): + "Build function" + tgen = get_topogen(self) + + # + # Define FRR Routers + # + for router in ["r1", "r2", "r3", "r4"]: + tgen.add_router(router) + + # + # Define connections + # + switch = tgen.add_switch("s0") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r4"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) + + +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(TemplateTopo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + # For all registered routers, load the zebra configuration file + for rname, router in router_list.iteritems(): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + # Don't start ospfd and ldpd in the CE nodes + if router.name[0] == "r": + router.load_config( + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + + # This function tears down the whole topology. + tgen.stop_topology() + + +def router_compare_json_output(rname, command, reference): + "Compare router JSON output" + + logger.info('Comparing router "%s" "%s" output', rname, command) + + tgen = get_topogen() + filename = "{}/{}/{}".format(CWD, rname, reference) + expected = json.loads(open(filename).read()) + + # Run test function until we get an result. Wait at most 80 seconds. + test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected) + _, diff = topotest.run_and_expect(test_func, None, count=160, wait=0.5) + + assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) + assert diff is None, assertmsg + + +def test_ospf_convergence(): + logger.info("Test: check OSPF adjacencies") + + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json" + ) + + +def test_rib(): + logger.info("Test: verify RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output(rname, "show ip route json", "show_ip_route.ref") + + +def test_ldp_adjacencies(): + logger.info("Test: verify LDP adjacencies") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show mpls ldp discovery json", "show_ldp_discovery.ref" + ) + + +def test_ldp_neighbors(): + logger.info("Test: verify LDP neighbors") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref" + ) + + +def test_ldp_bindings(): + logger.info("Test: verify LDP bindings") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["r1", "r2", "r3", "r4"]: + router_compare_json_output( + rname, "show mpls ldp binding json", "show_ldp_binding.ref" + ) + + +# Memory leak test template +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/ldp-topo1/r1/ip_mpls_route.ref b/tests/topotests/ldp-topo1/r1/ip_mpls_route.ref index f244122f1a..a13c1d459b 100644 --- a/tests/topotests/ldp-topo1/r1/ip_mpls_route.ref +++ b/tests/topotests/ldp-topo1/r1/ip_mpls_route.ref @@ -3,3 +3,4 @@ xx as to xx via inet 10.0.1.2 dev r1-eth0 proto xx xx via inet 10.0.1.2 dev r1-eth0 proto xx xx via inet 10.0.1.2 dev r1-eth0 proto xx xx via inet 10.0.1.2 dev r1-eth0 proto xx + diff --git a/tests/topotests/ldp-topo1/r1/show_ipv4_route.ref b/tests/topotests/ldp-topo1/r1/show_ipv4_route.ref index 7819d303d1..7d398887c4 100644 --- a/tests/topotests/ldp-topo1/r1/show_ipv4_route.ref +++ b/tests/topotests/ldp-topo1/r1/show_ipv4_route.ref @@ -1,7 +1,7 @@ -O 1.1.1.1/32 [110/0] is directly connected, lo -O>* 2.2.2.2/32 [110/10] via 10.0.1.2, r1-eth0, label implicit-null -O>* 3.3.3.3/32 [110/20] via 10.0.1.2, r1-eth0, label xxx -O>* 4.4.4.4/32 [110/20] via 10.0.1.2, r1-eth0, label xxx -O 10.0.1.0/24 [110/10] is directly connected, r1-eth0 -O>* 10.0.2.0/24 [110/20] via 10.0.1.2, r1-eth0, label implicit-null -O>* 10.0.3.0/24 [110/20] via 10.0.1.2, r1-eth0, label implicit-null +O 1.1.1.1/32 [110/0] is directly connected, lo, weight 1 +O>* 2.2.2.2/32 [110/10] via 10.0.1.2, r1-eth0, label implicit-null, weight 1 +O>* 3.3.3.3/32 [110/20] via 10.0.1.2, r1-eth0, label xxx, weight 1 +O>* 4.4.4.4/32 [110/20] via 10.0.1.2, r1-eth0, label xxx, weight 1 +O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1 +O>* 10.0.2.0/24 [110/20] via 10.0.1.2, r1-eth0, label implicit-null, weight 1 +O>* 10.0.3.0/24 [110/20] via 10.0.1.2, r1-eth0, label implicit-null, weight 1 diff --git a/tests/topotests/ldp-topo1/r2/show_ipv4_route.ref b/tests/topotests/ldp-topo1/r2/show_ipv4_route.ref index 2a97757757..90e18962a8 100644 --- a/tests/topotests/ldp-topo1/r2/show_ipv4_route.ref +++ b/tests/topotests/ldp-topo1/r2/show_ipv4_route.ref @@ -1,7 +1,7 @@ -O>* 1.1.1.1/32 [110/10] via 10.0.1.1, r2-eth0, label implicit-null -O 2.2.2.2/32 [110/0] is directly connected, lo -O>* 3.3.3.3/32 [110/10] via 10.0.2.3, r2-eth1, label implicit-null -O>* 4.4.4.4/32 [110/10] via 10.0.2.4, r2-eth1, label implicit-null -O 10.0.1.0/24 [110/10] is directly connected, r2-eth0 -O 10.0.2.0/24 [110/10] is directly connected, r2-eth1 -O 10.0.3.0/24 [110/10] is directly connected, r2-eth2 +O>* 1.1.1.1/32 [110/10] via 10.0.1.1, r2-eth0, label implicit-null, weight 1 +O 2.2.2.2/32 [110/0] is directly connected, lo, weight 1 +O>* 3.3.3.3/32 [110/10] via 10.0.2.3, r2-eth1, label implicit-null, weight 1 +O>* 4.4.4.4/32 [110/10] via 10.0.2.4, r2-eth1, label implicit-null, weight 1 +O 10.0.1.0/24 [110/10] is directly connected, r2-eth0, weight 1 +O 10.0.2.0/24 [110/10] is directly connected, r2-eth1, weight 1 +O 10.0.3.0/24 [110/10] is directly connected, r2-eth2, weight 1 diff --git a/tests/topotests/ldp-topo1/r3/show_ipv4_route.ref b/tests/topotests/ldp-topo1/r3/show_ipv4_route.ref index 645224a97b..9b9c763339 100644 --- a/tests/topotests/ldp-topo1/r3/show_ipv4_route.ref +++ b/tests/topotests/ldp-topo1/r3/show_ipv4_route.ref @@ -1,7 +1,7 @@ -O>* 1.1.1.1/32 [110/20] via 10.0.2.2, r3-eth0, label xxx -O>* 2.2.2.2/32 [110/10] via 10.0.2.2, r3-eth0, label implicit-null -O 3.3.3.3/32 [110/0] is directly connected, lo -O>* 4.4.4.4/32 [110/10] via 10.0.2.4, r3-eth0, label implicit-null -O>* 10.0.1.0/24 [110/20] via 10.0.2.2, r3-eth0, label implicit-null -O 10.0.2.0/24 [110/10] is directly connected, r3-eth0 -O 10.0.3.0/24 [110/10] is directly connected, r3-eth1 +O>* 1.1.1.1/32 [110/20] via 10.0.2.2, r3-eth0, label xxx, weight 1 +O>* 2.2.2.2/32 [110/10] via 10.0.2.2, r3-eth0, label implicit-null, weight 1 +O 3.3.3.3/32 [110/0] is directly connected, lo, weight 1 +O>* 4.4.4.4/32 [110/10] via 10.0.2.4, r3-eth0, label implicit-null, weight 1 +O>* 10.0.1.0/24 [110/20] via 10.0.2.2, r3-eth0, label implicit-null, weight 1 +O 10.0.2.0/24 [110/10] is directly connected, r3-eth0, weight 1 +O 10.0.3.0/24 [110/10] is directly connected, r3-eth1, weight 1 diff --git a/tests/topotests/ldp-topo1/r4/show_ipv4_route.ref b/tests/topotests/ldp-topo1/r4/show_ipv4_route.ref index 321828bfae..7444cc924b 100644 --- a/tests/topotests/ldp-topo1/r4/show_ipv4_route.ref +++ b/tests/topotests/ldp-topo1/r4/show_ipv4_route.ref @@ -1,7 +1,7 @@ -O>* 1.1.1.1/32 [110/20] via 10.0.2.2, r4-eth0, label xxx -O>* 2.2.2.2/32 [110/10] via 10.0.2.2, r4-eth0, label implicit-null -O>* 3.3.3.3/32 [110/10] via 10.0.2.3, r4-eth0, label implicit-null -O 4.4.4.4/32 [110/0] is directly connected, lo -O>* 10.0.1.0/24 [110/20] via 10.0.2.2, r4-eth0, label implicit-null -O 10.0.2.0/24 [110/10] is directly connected, r4-eth0 -O>* 10.0.3.0/24 [110/20] via 10.0.2.2, r4-eth0, label implicit-null +O>* 1.1.1.1/32 [110/20] via 10.0.2.2, r4-eth0, label xxx, weight 1 +O>* 2.2.2.2/32 [110/10] via 10.0.2.2, r4-eth0, label implicit-null, weight 1 +O>* 3.3.3.3/32 [110/10] via 10.0.2.3, r4-eth0, label implicit-null, weight 1 +O 4.4.4.4/32 [110/0] is directly connected, lo, weight 1 +O>* 10.0.1.0/24 [110/20] via 10.0.2.2, r4-eth0, label implicit-null, weight 1 +O 10.0.2.0/24 [110/10] is directly connected, r4-eth0, weight 1 +O>* 10.0.3.0/24 [110/20] via 10.0.2.2, r4-eth0, label implicit-null, weight 1 diff --git a/tests/topotests/ldp-topo1/test_ldp_topo1.py b/tests/topotests/ldp-topo1/test_ldp_topo1.py index c0d11fd5e0..cef4d6587e 100755 --- a/tests/topotests/ldp-topo1/test_ldp_topo1.py +++ b/tests/topotests/ldp-topo1/test_ldp_topo1.py @@ -57,7 +57,7 @@ r3-eth1 .3 | | .3 r3-eth0 | .4 r4-eth0 | r3 | | r4 | | 3.3.3.3 | | 4.4.4.4 | +-----------+ +---------+ -""" +""" import os import re @@ -83,6 +83,7 @@ fatal_error = "" ## ##################################################### + class NetworkTopo(Topo): "LDP Test Topology 1" @@ -91,23 +92,65 @@ class NetworkTopo(Topo): # Setup Routers router = {} for i in range(1, 5): - router[i] = topotest.addRouter(self, 'r%s' % i) + router[i] = topotest.addRouter(self, "r%s" % i) # Setup Switches, add Interfaces and Connections switch = {} # First switch - switch[0] = self.addSwitch('sw0', cls=topotest.LegacySwitch) - self.addLink(switch[0], router[1], intfName2='r1-eth0', addr1='80:AA:00:00:00:00', addr2='00:11:00:01:00:00') - self.addLink(switch[0], router[2], intfName2='r2-eth0', addr1='80:AA:00:00:00:01', addr2='00:11:00:02:00:00') + switch[0] = self.addSwitch("sw0", cls=topotest.LegacySwitch) + self.addLink( + switch[0], + router[1], + intfName2="r1-eth0", + addr1="80:AA:00:00:00:00", + addr2="00:11:00:01:00:00", + ) + self.addLink( + switch[0], + router[2], + intfName2="r2-eth0", + addr1="80:AA:00:00:00:01", + addr2="00:11:00:02:00:00", + ) # Second switch - switch[1] = self.addSwitch('sw1', cls=topotest.LegacySwitch) - self.addLink(switch[1], router[2], intfName2='r2-eth1', addr1='80:AA:00:01:00:00', addr2='00:11:00:02:00:01') - self.addLink(switch[1], router[3], intfName2='r3-eth0', addr1='80:AA:00:01:00:01', addr2='00:11:00:03:00:00') - self.addLink(switch[1], router[4], intfName2='r4-eth0', addr1='80:AA:00:01:00:02', addr2='00:11:00:04:00:00') + switch[1] = self.addSwitch("sw1", cls=topotest.LegacySwitch) + self.addLink( + switch[1], + router[2], + intfName2="r2-eth1", + addr1="80:AA:00:01:00:00", + addr2="00:11:00:02:00:01", + ) + self.addLink( + switch[1], + router[3], + intfName2="r3-eth0", + addr1="80:AA:00:01:00:01", + addr2="00:11:00:03:00:00", + ) + self.addLink( + switch[1], + router[4], + intfName2="r4-eth0", + addr1="80:AA:00:01:00:02", + addr2="00:11:00:04:00:00", + ) # Third switch - switch[2] = self.addSwitch('sw2', cls=topotest.LegacySwitch) - self.addLink(switch[2], router[2], intfName2='r2-eth2', addr1='80:AA:00:02:00:00', addr2='00:11:00:02:00:02') - self.addLink(switch[2], router[3], intfName2='r3-eth1', addr1='80:AA:00:02:00:01', addr2='00:11:00:03:00:01') + switch[2] = self.addSwitch("sw2", cls=topotest.LegacySwitch) + self.addLink( + switch[2], + router[2], + intfName2="r2-eth2", + addr1="80:AA:00:02:00:00", + addr2="00:11:00:02:00:02", + ) + self.addLink( + switch[2], + router[3], + intfName2="r3-eth1", + addr1="80:AA:00:02:00:01", + addr2="00:11:00:03:00:01", + ) ##################################################### @@ -116,6 +159,7 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(module): global topo, net global fatal_error @@ -124,7 +168,7 @@ def setup_module(module): print("******************************************\n") print("Cleanup old Mininet runs") - os.system('sudo mn -c > /dev/null 2>&1') + os.system("sudo mn -c > /dev/null 2>&1") thisDir = os.path.dirname(os.path.realpath(__file__)) topo = NetworkTopo() @@ -134,10 +178,10 @@ def setup_module(module): # Starting Routers for i in range(1, 5): - net['r%s' % i].loadConf('zebra', '%s/r%s/zebra.conf' % (thisDir, i)) - net['r%s' % i].loadConf('ospfd', '%s/r%s/ospfd.conf' % (thisDir, i)) - net['r%s' % i].loadConf('ldpd', '%s/r%s/ldpd.conf' % (thisDir, i)) - fatal_error = net['r%s' % i].startRouter() + net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i)) + net["r%s" % i].loadConf("ospfd", "%s/r%s/ospfd.conf" % (thisDir, i)) + net["r%s" % i].loadConf("ldpd", "%s/r%s/ldpd.conf" % (thisDir, i)) + fatal_error = net["r%s" % i].startRouter() if fatal_error != "": break @@ -145,6 +189,7 @@ def setup_module(module): # For debugging after starting FRR/Quagga daemons, uncomment the next line # CLI(net) + def teardown_module(module): global net @@ -160,7 +205,7 @@ def test_router_running(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) print("\n\n** Check if FRR/Quagga is running on each Router node") @@ -169,18 +214,19 @@ def test_router_running(): # Starting Routers for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line # CLI(net) + def test_mpls_interfaces(): global fatal_error global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -190,40 +236,51 @@ def test_mpls_interfaces(): print("******************************************\n") failures = 0 for i in range(1, 5): - refTableFile = '%s/r%s/show_mpls_ldp_interface.ref' + refTableFile = "%s/r%s/show_mpls_ldp_interface.ref" if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp interface" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show mpls ldp interface" 2> /dev/null') + .rstrip() + ) # Mask out Timer in Uptime actual = re.sub(r" [0-9][0-9]:[0-9][0-9]:[0-9][0-9] ", " xx:xx:xx ", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual MPLS LDP interface status", - title2="expected MPLS LDP interface status") + title2="expected MPLS LDP interface status", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed MPLS LDP Interface status Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed MPLS LDP Interface status Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - if failures>0: + if failures > 0: fatal_error = "MPLS LDP Interface status failed" - assert failures == 0, "MPLS LDP Interface status failed for router r%s:\n%s" % (i, diff) + assert ( + failures == 0 + ), "MPLS LDP Interface status failed for router r%s:\n%s" % (i, diff) # Make sure that all daemons are running for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -235,7 +292,7 @@ def test_mpls_ldp_neighbor_establish(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) # Wait for MPLS LDP neighbors to establish. @@ -247,17 +304,23 @@ def test_mpls_ldp_neighbor_establish(): sys.stdout.flush() # Look for any node not yet converged for i in range(1, 5): - established = net['r%s' % i].cmd('vtysh -c "show mpls ldp neighbor" 2> /dev/null').rstrip() + established = ( + net["r%s" % i] + .cmd('vtysh -c "show mpls ldp neighbor" 2> /dev/null') + .rstrip() + ) # On current version, we need to make sure they all turn to OPERATIONAL on all lines # - lines = ('\n'.join(established.splitlines()) + '\n').splitlines(1) + lines = ("\n".join(established.splitlines()) + "\n").splitlines(1) # Check all lines to be either table header (starting with ^AF or show OPERATIONAL) - header = r'^AF.*' - operational = r'^ip.*OPERATIONAL.*' + header = r"^AF.*" + operational = r"^ip.*OPERATIONAL.*" found_operational = 0 for j in range(1, len(lines)): - if (not re.search(header, lines[j])) and (not re.search(operational, lines[j])): + if (not re.search(header, lines[j])) and ( + not re.search(operational, lines[j]) + ): established = "" # Empty string shows NOT established if re.search(operational, lines[j]): found_operational += 1 @@ -265,14 +328,14 @@ def test_mpls_ldp_neighbor_establish(): # Need at least one operational neighbor established = "" # Empty string shows NOT established if not established: - print('Waiting for r%s' %i) + print("Waiting for r%s" % i) sys.stdout.flush() break if not established: sleep(5) timeout -= 5 else: - print('Done') + print("Done") break else: # Bail out with error if a router fails to converge @@ -285,10 +348,10 @@ def test_mpls_ldp_neighbor_establish(): # Only wait if we actually went through a convergence print("\nwaiting 15s for LDP sessions to establish") sleep(15) - + # Make sure that all daemons are running for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error @@ -297,7 +360,7 @@ def test_mpls_ldp_discovery(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -307,39 +370,54 @@ def test_mpls_ldp_discovery(): print("******************************************\n") failures = 0 for i in range(1, 5): - refTableFile = '%s/r%s/show_mpls_ldp_discovery.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_mpls_ldp_discovery.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp discovery" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show mpls ldp discovery" 2> /dev/null') + .rstrip() + ) # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp discovery" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show mpls ldp discovery" 2> /dev/null') + .rstrip() + ) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual MPLS LDP discovery output", - title2="expected MPLS LDP discovery output") + title2="expected MPLS LDP discovery output", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed MPLS LDP discovery output Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed MPLS LDP discovery output Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "MPLS LDP Interface discovery output for router r%s:\n%s" % (i, diff) + assert ( + failures == 0 + ), "MPLS LDP Interface discovery output for router r%s:\n%s" % (i, diff) # Make sure that all daemons are running for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -351,7 +429,7 @@ def test_mpls_ldp_neighbor(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -361,44 +439,59 @@ def test_mpls_ldp_neighbor(): print("******************************************\n") failures = 0 for i in range(1, 5): - refTableFile = '%s/r%s/show_mpls_ldp_neighbor.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_mpls_ldp_neighbor.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp neighbor" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show mpls ldp neighbor" 2> /dev/null') + .rstrip() + ) # Mask out changing parts in output # Mask out Timer in Uptime - actual = re.sub(r"(ipv4 [0-9\.]+ +OPERATIONAL [0-9\.]+ +)[0-9][0-9]:[0-9][0-9]:[0-9][0-9]", r"\1xx:xx:xx", actual) + actual = re.sub( + r"(ipv4 [0-9\.]+ +OPERATIONAL [0-9\.]+ +)[0-9][0-9]:[0-9][0-9]:[0-9][0-9]", + r"\1xx:xx:xx", + actual, + ) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual MPLS LDP neighbor output", - title2="expected MPLS LDP neighbor output") + title2="expected MPLS LDP neighbor output", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed MPLS LDP neighbor output Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed MPLS LDP neighbor output Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "MPLS LDP Interface neighbor output for router r%s:\n%s" % (i, diff) + assert ( + failures == 0 + ), "MPLS LDP Interface neighbor output for router r%s:\n%s" % (i, diff) # Make sure that all daemons are running for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line - #CLI(net) + # CLI(net) def test_mpls_ldp_binding(): @@ -410,7 +503,7 @@ def test_mpls_ldp_binding(): # pytest.skip("Skipping test_mpls_ldp_binding") # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -420,58 +513,77 @@ def test_mpls_ldp_binding(): print("******************************************\n") failures = 0 for i in range(1, 5): - refTableFile = '%s/r%s/show_mpls_ldp_binding.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_mpls_ldp_binding.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp binding" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show mpls ldp binding" 2> /dev/null') + .rstrip() + ) # Mask out changing parts in output # Mask out label - actual = re.sub(r"(ipv4 [0-9\./]+ +[0-9\.]+ +)[0-9][0-9] (.*)", r"\1xxx\2", actual) - actual = re.sub(r"(ipv4 [0-9\./]+ +[0-9\.]+ +[a-z\-]+ +)[0-9][0-9] (.*)", r"\1xxx\2", actual) + actual = re.sub( + r"(ipv4 [0-9\./]+ +[0-9\.]+ +)[0-9][0-9] (.*)", r"\1xxx\2", actual + ) + actual = re.sub( + r"(ipv4 [0-9\./]+ +[0-9\.]+ +[a-z\-]+ +)[0-9][0-9] (.*)", + r"\1xxx\2", + actual, + ) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Sort lines which start with "xx via inet " - pattern = r'^\s+[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+' + pattern = r"^\s+[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+" swapped = True while swapped: swapped = False for j in range(1, len(actual)): - if re.search(pattern, actual[j]) and re.search(pattern, actual[j-1]): - if actual[j-1] > actual[j]: - temp = actual[j-1] - actual[j-1] = actual[j] + if re.search(pattern, actual[j]) and re.search( + pattern, actual[j - 1] + ): + if actual[j - 1] > actual[j]: + temp = actual[j - 1] + actual[j - 1] = actual[j] actual[j] = temp swapped = True # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual MPLS LDP binding output", - title2="expected MPLS LDP binding output") + title2="expected MPLS LDP binding output", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed MPLS LDP binding output Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed MPLS LDP binding output Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "MPLS LDP Interface binding output for router r%s:\n%s" % (i, diff) + assert ( + failures == 0 + ), "MPLS LDP Interface binding output for router r%s:\n%s" % (i, diff) # Make sure that all daemons are running for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line - #CLI(net) + # CLI(net) def test_zebra_ipv4_routingTable(): @@ -479,7 +591,7 @@ def test_zebra_ipv4_routingTable(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -489,13 +601,17 @@ def test_zebra_ipv4_routingTable(): print("******************************************\n") failures = 0 for i in range(1, 5): - refTableFile = '%s/r%s/show_ipv4_route.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_ipv4_route.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ip route" 2> /dev/null | grep "^O"').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show ip route" 2> /dev/null | grep "^O"') + .rstrip() + ) # Drop timers on end of line (older Quagga Versions) actual = re.sub(r", [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", "", actual) @@ -505,32 +621,40 @@ def test_zebra_ipv4_routingTable(): # and translating remaining implicit (single-digit) labels to label implicit-null actual = re.sub(r" label [0-9]+", " label implicit-null", actual) # Check if we have implicit labels - if not, then remove them from reference - if (not re.search(r" label implicit-null", actual)): + if not re.search(r" label implicit-null", actual): expected = re.sub(r", label implicit-null", "", expected) # now fix newlines of expected (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual IPv4 zebra routing table", - title2="expected IPv4 zebra routing table") + title2="expected IPv4 zebra routing table", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed IPv4 Zebra Routing Table Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed IPv4 Zebra Routing Table Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "IPv4 Zebra Routing Table verification failed for router r%s:\n%s" % (i, diff) + assert failures == 0, ( + "IPv4 Zebra Routing Table verification failed for router r%s:\n%s" + % (i, diff) + ) # Make sure that all daemons are running for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -542,7 +666,7 @@ def test_mpls_table(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -553,45 +677,54 @@ def test_mpls_table(): failures = 0 for i in range(1, 5): - refTableFile = '%s/r%s/show_mpls_table.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_mpls_table.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show mpls table" 2> /dev/null') + actual = net["r%s" % i].cmd('vtysh -c "show mpls table" 2> /dev/null') # Fix inconsistent Label numbers at beginning of line actual = re.sub(r"(\s+)[0-9]+(\s+LDP)", r"\1XX\2", actual) # Fix inconsistent Label numbers at end of line - actual = re.sub(r"(\s+[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+)[0-9][0-9]", r"\1XX", actual) + actual = re.sub( + r"(\s+[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+)[0-9][0-9]", r"\1XX", actual + ) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Sort lines which start with " XX LDP" - pattern = r'^\s+[0-9X]+\s+LDP' + pattern = r"^\s+[0-9X]+\s+LDP" swapped = True while swapped: swapped = False for j in range(1, len(actual)): - if re.search(pattern, actual[j]) and re.search(pattern, actual[j-1]): - if actual[j-1] > actual[j]: - temp = actual[j-1] - actual[j-1] = actual[j] + if re.search(pattern, actual[j]) and re.search( + pattern, actual[j - 1] + ): + if actual[j - 1] > actual[j]: + temp = actual[j - 1] + actual[j - 1] = actual[j] actual[j] = temp swapped = True # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual MPLS table output", - title2="expected MPLS table output") + title2="expected MPLS table output", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed MPLS table output Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed MPLS table output Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) @@ -600,7 +733,7 @@ def test_mpls_table(): # Make sure that all daemons are running for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -611,8 +744,8 @@ def test_linux_mpls_routes(): global fatal_error global net - # Skip if previous fatal error condition is raised - if (fatal_error != ""): + # Skip if previous fatal error condition is raised + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -622,15 +755,17 @@ def test_linux_mpls_routes(): print("******************************************\n") failures = 0 for i in range(1, 5): - refTableFile = '%s/r%s/ip_mpls_route.ref' % (thisDir, i) + refTableFile = "%s/r%s/ip_mpls_route.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('ip -o -family mpls route 2> /dev/null').rstrip() + actual = ( + net["r%s" % i].cmd("ip -o -family mpls route 2> /dev/null").rstrip() + ) # Mask out label and protocol actual = re.sub(r"[0-9][0-9] via inet ", "xx via inet ", actual) @@ -641,39 +776,48 @@ def test_linux_mpls_routes(): # Sort nexthops nexthop_sorted = [] for line in actual.splitlines(): - tokens = re.split(r'\\\t', line.strip()) - nexthop_sorted.append('{} {}'.format( - tokens[0].strip(), - ' '.join([ token.strip() for token in sorted(tokens[1:]) ]) - ).strip()) + tokens = re.split(r"\\\t", line.strip()) + nexthop_sorted.append( + "{} {}".format( + tokens[0].strip(), + " ".join([token.strip() for token in sorted(tokens[1:])]), + ).strip() + ) # Sort lines and fixup differences between old and new iproute - actual = '\n'.join(sorted(nexthop_sorted)) + actual = "\n".join(sorted(nexthop_sorted)) actual = re.sub(r"nexthop via", "nexthopvia", actual) actual = re.sub(r" nexthop as to xx via inet ", " nexthopvia inet ", actual) actual = re.sub(r" weight 1", "", actual) actual = re.sub(r" [ ]+", " ", actual) # put \n back at line ends - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual Linux Kernel MPLS route", - title2="expected Linux Kernel MPLS route") + title2="expected Linux Kernel MPLS route", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed Linux Kernel MPLS route output Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed Linux Kernel MPLS route output Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "Linux Kernel MPLS route output for router r%s:\n%s" % (i, diff) + assert ( + failures == 0 + ), "Linux Kernel MPLS route output for router r%s:\n%s" % (i, diff) # Make sure that all daemons are running for i in range(1, 5): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -685,12 +829,14 @@ def test_shutdown_check_stderr(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - print("SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n") - pytest.skip('Skipping test for Stderr output') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + print( + "SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n" + ) + pytest.skip("Skipping test for Stderr output") thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -698,14 +844,14 @@ def test_shutdown_check_stderr(): print("******************************************\n") for i in range(1, 5): - net['r%s' % i].stopRouter() - log = net['r%s' % i].getStdErr('ldpd') + net["r%s" % i].stopRouter() + log = net["r%s" % i].getStdErr("ldpd") if log: print("\nRouter r%s LDPd StdErr Log:\n%s" % (i, log)) - log = net['r%s' % i].getStdErr('ospfd') + log = net["r%s" % i].getStdErr("ospfd") if log: print("\nRouter r%s OSPFd StdErr Log:\n%s" % (i, log)) - log = net['r%s' % i].getStdErr('zebra') + log = net["r%s" % i].getStdErr("zebra") if log: print("\nRouter r%s Zebra StdErr Log:\n%s" % (i, log)) @@ -715,23 +861,27 @@ def test_shutdown_check_memleak(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) - if os.environ.get('TOPOTESTS_CHECK_MEMLEAK') is None: - print("SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n") - pytest.skip('Skipping test for memory leaks') - + if os.environ.get("TOPOTESTS_CHECK_MEMLEAK") is None: + print( + "SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n" + ) + pytest.skip("Skipping test for memory leaks") + thisDir = os.path.dirname(os.path.realpath(__file__)) for i in range(1, 5): - net['r%s' % i].stopRouter() - net['r%s' % i].report_memory_leaks(os.environ.get('TOPOTESTS_CHECK_MEMLEAK'), os.path.basename(__file__)) + net["r%s" % i].stopRouter() + net["r%s" % i].report_memory_leaks( + os.environ.get("TOPOTESTS_CHECK_MEMLEAK"), os.path.basename(__file__) + ) -if __name__ == '__main__': +if __name__ == "__main__": - setLogLevel('info') + setLogLevel("info") # To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli # retval = pytest.main(["-s", "--tb=no"]) retval = pytest.main(["-s"]) diff --git a/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py b/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py index 0fae64402a..600d640a70 100755 --- a/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py +++ b/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py @@ -69,7 +69,7 @@ from functools import partial # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -80,8 +80,10 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class TemplateTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -89,35 +91,36 @@ class TemplateTopo(Topo): # # Define FRR Routers # - for router in ['ce1', 'ce2', 'ce3', 'r1', 'r2', 'r3']: + for router in ["ce1", "ce2", "ce3", "r1", "r2", "r3"]: tgen.add_router(router) # # Define connections # - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['ce1']) - switch.add_link(tgen.gears['r1']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["ce1"]) + switch.add_link(tgen.gears["r1"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["ce2"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['ce2']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["ce3"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['ce3']) - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s4') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s5') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s6") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) - switch = tgen.add_switch('s6') - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) def setup_module(mod): "Sets up the pytest environment" @@ -129,22 +132,20 @@ def setup_module(mod): # For all registered routers, load the zebra configuration file for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) # Don't start ospfd and ldpd in the CE nodes - if router.name[0] == 'r': + if router.name[0] == "r": router.load_config( - TopoRouter.RD_OSPF, - os.path.join(CWD, '{}/ospfd.conf'.format(rname)) + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_LDP, - os.path.join(CWD, '{}/ldpd.conf'.format(rname)) + TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): "Teardown the pytest environment" tgen = get_topogen() @@ -159,16 +160,16 @@ def router_compare_json_output(rname, command, reference): logger.info('Comparing router "%s" "%s" output', rname, command) tgen = get_topogen() - filename = '{}/{}/{}'.format(CWD, rname, reference) + filename = "{}/{}/{}".format(CWD, rname, reference) expected = json.loads(open(filename).read()) # Run test function until we get an result. Wait at most 80 seconds. - test_func = partial(topotest.router_json_cmp, - tgen.gears[rname], command, expected) + test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected) _, diff = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) assert diff is None, assertmsg + def test_ospf_convergence(): logger.info("Test: check OSPF adjacencies") tgen = get_topogen() @@ -177,8 +178,11 @@ def test_ospf_convergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3']: - router_compare_json_output(rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json") + for rname in ["r1", "r2", "r3"]: + router_compare_json_output( + rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json" + ) + def test_rib(): logger.info("Test: verify RIB") @@ -188,9 +192,10 @@ def test_rib(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3']: + for rname in ["r1", "r2", "r3"]: router_compare_json_output(rname, "show ip route json", "show_ip_route.ref") + def test_ldp_adjacencies(): logger.info("Test: verify LDP adjacencies") tgen = get_topogen() @@ -199,8 +204,11 @@ def test_ldp_adjacencies(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3']: - router_compare_json_output(rname, "show mpls ldp discovery json", "show_ldp_discovery.ref") + for rname in ["r1", "r2", "r3"]: + router_compare_json_output( + rname, "show mpls ldp discovery json", "show_ldp_discovery.ref" + ) + def test_ldp_neighbors(): logger.info("Test: verify LDP neighbors") @@ -210,8 +218,11 @@ def test_ldp_neighbors(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3']: - router_compare_json_output(rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref") + for rname in ["r1", "r2", "r3"]: + router_compare_json_output( + rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref" + ) + def test_ldp_bindings(): logger.info("Test: verify LDP bindings") @@ -221,8 +232,11 @@ def test_ldp_bindings(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3']: - router_compare_json_output(rname, "show mpls ldp binding json", "show_ldp_binding.ref") + for rname in ["r1", "r2", "r3"]: + router_compare_json_output( + rname, "show mpls ldp binding json", "show_ldp_binding.ref" + ) + def test_ldp_pwid_bindings(): logger.info("Test: verify LDP PW-ID bindings") @@ -232,8 +246,11 @@ def test_ldp_pwid_bindings(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3']: - router_compare_json_output(rname, "show l2vpn atom binding json", "show_l2vpn_binding.ref") + for rname in ["r1", "r2", "r3"]: + router_compare_json_output( + rname, "show l2vpn atom binding json", "show_l2vpn_binding.ref" + ) + def test_ldp_pseudowires(): logger.info("Test: verify LDP pseudowires") @@ -243,8 +260,11 @@ def test_ldp_pseudowires(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['r1', 'r2', 'r3']: - router_compare_json_output(rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref") + for rname in ["r1", "r2", "r3"]: + router_compare_json_output( + rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref" + ) + def test_ldp_pseudowires_after_link_down(): logger.info("Test: verify LDP pseudowires after r1-r2 link goes down") @@ -256,22 +276,26 @@ def test_ldp_pseudowires_after_link_down(): # Shut down r1-r2 link */ tgen = get_topogen() - tgen.gears['r1'].peer_link_enable('r1-eth1', False) + tgen.gears["r1"].peer_link_enable("r1-eth1", False) topotest.sleep(5, "Waiting for the network to reconverge") # check if the pseudowire is still up (using an alternate path for nexthop resolution) - for rname in ['r1', 'r2', 'r3']: - router_compare_json_output(rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref") + for rname in ["r1", "r2", "r3"]: + router_compare_json_output( + rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref" + ) + # Memory leak test template def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py index f3c17be684..b2cd2d284d 100644 --- a/tests/topotests/lib/bgp.py +++ b/tests/topotests/lib/bgp.py @@ -27,13 +27,17 @@ from lib import topotest from lib.topolog import logger # Import common_config to use commomnly used APIs -from lib.common_config import (create_common_configuration, - InvalidCLIError, - load_config_to_router, - check_address_types, - generate_ips, - find_interface_with_greater_ip, - run_frr_cmd, retry) +from lib.common_config import ( + create_common_configuration, + InvalidCLIError, + load_config_to_router, + check_address_types, + generate_ips, + validate_ip_address, + find_interface_with_greater_ip, + run_frr_cmd, + retry, +) BGP_CONVERGENCE_TIMEOUT = 10 @@ -79,6 +83,9 @@ def create_router_bgp(tgen, topo, input_dict=None, build=False): "holddowntimer": 180, "dest_link": { "r4": { + "allowas-in": { + "number_occurences":2 + }, "prefix_lists": [ { "name": "pf_list_1", @@ -126,24 +133,31 @@ def create_router_bgp(tgen, topo, input_dict=None, build=False): bgp_addr_data = bgp_data.setdefault("address_family", {}) if not bgp_addr_data: - logger.debug("Router %s: 'address_family' not present in " - "input_dict for BGP", router) + logger.debug( + "Router %s: 'address_family' not present in " "input_dict for BGP", + router, + ) else: ipv4_data = bgp_addr_data.setdefault("ipv4", {}) ipv6_data = bgp_addr_data.setdefault("ipv6", {}) - neigh_unicast = True if ipv4_data.setdefault("unicast", {}) \ - or ipv6_data.setdefault("unicast", {}) else False + neigh_unicast = ( + True + if ipv4_data.setdefault("unicast", {}) + or ipv6_data.setdefault("unicast", {}) + else False + ) if neigh_unicast: data_all_bgp = __create_bgp_unicast_neighbor( - tgen, topo, input_dict, router, - config_data=data_all_bgp) + tgen, topo, input_dict, router, config_data=data_all_bgp + ) try: - result = create_common_configuration(tgen, router, data_all_bgp, - "bgp", build) + result = create_common_configuration( + tgen, router, data_all_bgp, "bgp", build + ) except InvalidCLIError: # Traceback errormsg = traceback.format_exc() @@ -182,8 +196,9 @@ def __create_bgp_global(tgen, input_dict, router, build=False): config_data = [] if "local_as" not in bgp_data and build: - logger.error("Router %s: 'local_as' not present in input_dict" - "for BGP", router) + logger.error( + "Router %s: 'local_as' not present in input_dict" "for BGP", router + ) return False local_as = bgp_data.setdefault("local_as", "") @@ -199,14 +214,12 @@ def __create_bgp_global(tgen, input_dict, router, build=False): if del_router_id: config_data.append("no bgp router-id") if router_id: - config_data.append("bgp router-id {}".format( - router_id)) + config_data.append("bgp router-id {}".format(router_id)) return config_data -def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, - config_data=None): +def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, config_data=None): """ Helper API to create configuration for address-family unicast @@ -235,11 +248,8 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, addr_data = addr_dict["unicast"] if addr_data: - config_data.append("address-family {} unicast".format( - addr_type - )) - advertise_network = addr_data.setdefault("advertise_networks", - []) + config_data.append("address-family {} unicast".format(addr_type)) + advertise_network = addr_data.setdefault("advertise_networks", []) for advertise_network_dict in advertise_network: network = advertise_network_dict["network"] if type(network) is not list: @@ -250,12 +260,10 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, else: no_of_network = 1 - del_action = advertise_network_dict.setdefault("delete", - False) + del_action = advertise_network_dict.setdefault("delete", False) # Generating IPs for verification - prefix = str( - ipaddr.IPNetwork(unicode(network[0])).prefixlen) + prefix = str(ipaddr.IPNetwork(unicode(network[0])).prefixlen) network_list = generate_ips(network, no_of_network) for ip in network_list: ip = str(ipaddr.IPNetwork(unicode(ip)).network) @@ -271,20 +279,17 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, ibgp = max_paths.setdefault("ibgp", None) ebgp = max_paths.setdefault("ebgp", None) if ibgp: - config_data.append("maximum-paths ibgp {}".format( - ibgp - )) + config_data.append("maximum-paths ibgp {}".format(ibgp)) if ebgp: - config_data.append("maximum-paths {}".format( - ebgp - )) + config_data.append("maximum-paths {}".format(ebgp)) aggregate_addresses = addr_data.setdefault("aggregate_address", []) for aggregate_address in aggregate_addresses: network = aggregate_address.setdefault("network", None) if not network: - logger.debug("Router %s: 'network' not present in " - "input_dict for BGP", router) + logger.debug( + "Router %s: 'network' not present in " "input_dict for BGP", router + ) else: cmd = "aggregate-address {}".format(network) @@ -305,13 +310,12 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, if redistribute_data: for redistribute in redistribute_data: if "redist_type" not in redistribute: - logger.error("Router %s: 'redist_type' not present in " - "input_dict", router) + logger.error( + "Router %s: 'redist_type' not present in " "input_dict", router + ) else: - cmd = "redistribute {}".format( - redistribute["redist_type"]) - redist_attr = redistribute.setdefault("attribute", - None) + cmd = "redistribute {}".format(redistribute["redist_type"]) + redist_attr = redistribute.setdefault("attribute", None) if redist_attr: cmd = "{} {}".format(cmd, redist_attr) del_action = redistribute.setdefault("delete", False) @@ -320,8 +324,9 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, config_data.append(cmd) if "neighbor" in addr_data: - neigh_data = __create_bgp_neighbor(topo, input_dict, - router, addr_type, add_neigh) + neigh_data = __create_bgp_neighbor( + topo, input_dict, router, addr_type, add_neigh + ) config_data.extend(neigh_data) for addr_type, addr_dict in bgp_data.iteritems(): @@ -331,11 +336,11 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, addr_data = addr_dict["unicast"] if "neighbor" in addr_data: neigh_addr_data = __create_bgp_unicast_address_family( - topo, input_dict, router, addr_type, add_neigh) + topo, input_dict, router, addr_type, add_neigh + ) config_data.extend(neigh_addr_data) - logger.debug("Exiting lib API: __create_bgp_unicast_neighbor()") return config_data @@ -365,12 +370,10 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True): update_source = None if dest_link in nh_details["links"].keys(): - ip_addr = \ - nh_details["links"][dest_link][addr_type].split("/")[0] + ip_addr = nh_details["links"][dest_link][addr_type].split("/")[0] # Loopback interface if "source_link" in peer and peer["source_link"] == "lo": - update_source = topo[router]["links"]["lo"][ - addr_type].split("/")[0] + update_source = topo[router]["links"]["lo"][addr_type].split("/")[0] neigh_cxt = "neighbor {}".format(ip_addr) @@ -380,41 +383,44 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True): config_data.append("address-family ipv6 unicast") config_data.append("{} activate".format(neigh_cxt)) - disable_connected = peer.setdefault("disable_connected_check", - False) - keep_alive = peer.setdefault("keep_alive", 60) - hold_down = peer.setdefault("hold_down", 180) + disable_connected = peer.setdefault("disable_connected_check", False) + keep_alive = peer.setdefault("keepalivetimer", 60) + hold_down = peer.setdefault("holddowntimer", 180) password = peer.setdefault("password", None) max_hop_limit = peer.setdefault("ebgp_multihop", 1) if update_source: - config_data.append("{} update-source {}".format( - neigh_cxt, update_source)) + config_data.append( + "{} update-source {}".format(neigh_cxt, update_source) + ) if disable_connected: - config_data.append("{} disable-connected-check".format( - disable_connected)) + config_data.append( + "{} disable-connected-check".format(disable_connected) + ) if update_source: - config_data.append("{} update-source {}".format(neigh_cxt, - update_source)) + config_data.append( + "{} update-source {}".format(neigh_cxt, update_source) + ) if int(keep_alive) != 60 and int(hold_down) != 180: config_data.append( - "{} timers {} {}".format(neigh_cxt, keep_alive, - hold_down)) + "{} timers {} {}".format(neigh_cxt, keep_alive, hold_down) + ) if password: - config_data.append( - "{} password {}".format(neigh_cxt, password)) + config_data.append("{} password {}".format(neigh_cxt, password)) if max_hop_limit > 1: - config_data.append("{} ebgp-multihop {}".format(neigh_cxt, - max_hop_limit)) + config_data.append( + "{} ebgp-multihop {}".format(neigh_cxt, max_hop_limit) + ) config_data.append("{} enforce-multihop".format(neigh_cxt)) logger.debug("Exiting lib API: __create_bgp_unicast_neighbor()") return config_data -def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type, - add_neigh=True): +def __create_bgp_unicast_address_family( + topo, input_dict, router, addr_type, add_neigh=True +): """ API prints bgp global config to bgp_json file. @@ -440,37 +446,34 @@ def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type, nh_details = topo[peer_name] # Loopback interface if "source_link" in peer and peer["source_link"] == "lo": - for destRouterLink, data in sorted(nh_details["links"]. - iteritems()): + for destRouterLink, data in sorted(nh_details["links"].iteritems()): if "type" in data and data["type"] == "loopback": if dest_link == destRouterLink: - ip_addr = \ - nh_details["links"][destRouterLink][ - addr_type].split("/")[0] + ip_addr = nh_details["links"][destRouterLink][ + addr_type + ].split("/")[0] # Physical interface else: if dest_link in nh_details["links"].keys(): - ip_addr = nh_details["links"][dest_link][ - addr_type].split("/")[0] + ip_addr = nh_details["links"][dest_link][addr_type].split("/")[0] if addr_type == "ipv4" and bgp_data["ipv6"]: - deactivate = nh_details["links"][ - dest_link]["ipv6"].split("/")[0] + deactivate = nh_details["links"][dest_link]["ipv6"].split("/")[ + 0 + ] neigh_cxt = "neighbor {}".format(ip_addr) - config_data.append("address-family {} unicast".format( - addr_type - )) + config_data.append("address-family {} unicast".format(addr_type)) if deactivate: - config_data.append( - "no neighbor {} activate".format(deactivate)) + config_data.append("no neighbor {} activate".format(deactivate)) next_hop_self = peer.setdefault("next_hop_self", None) send_community = peer.setdefault("send_community", None) prefix_lists = peer.setdefault("prefix_lists", {}) route_maps = peer.setdefault("route_maps", {}) no_send_community = peer.setdefault("no_send_community", None) + allowas_in = peer.setdefault("allowas-in", None) # next-hop-self if next_hop_self: @@ -481,21 +484,30 @@ def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type, # no_send_community if no_send_community: - config_data.append("no {} send-community {}".format( - neigh_cxt, no_send_community)) + config_data.append( + "no {} send-community {}".format(neigh_cxt, no_send_community) + ) + if "allowas_in" in peer: + allow_as_in = peer["allowas_in"] + config_data.append("{} allowas-in {}".format(neigh_cxt, allow_as_in)) + + if "no_allowas_in" in peer: + allow_as_in = peer["no_allowas_in"] + config_data.append("no {} allowas-in {}".format(neigh_cxt, allow_as_in)) if prefix_lists: for prefix_list in prefix_lists: name = prefix_list.setdefault("name", {}) direction = prefix_list.setdefault("direction", "in") del_action = prefix_list.setdefault("delete", False) if not name: - logger.info("Router %s: 'name' not present in " - "input_dict for BGP neighbor prefix lists", - router) + logger.info( + "Router %s: 'name' not present in " + "input_dict for BGP neighbor prefix lists", + router, + ) else: - cmd = "{} prefix-list {} {}".format(neigh_cxt, name, - direction) + cmd = "{} prefix-list {} {}".format(neigh_cxt, name, direction) if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) @@ -506,16 +518,28 @@ def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type, direction = route_map.setdefault("direction", "in") del_action = route_map.setdefault("delete", False) if not name: - logger.info("Router %s: 'name' not present in " - "input_dict for BGP neighbor route name", - router) + logger.info( + "Router %s: 'name' not present in " + "input_dict for BGP neighbor route name", + router, + ) else: - cmd = "{} route-map {} {}".format(neigh_cxt, name, - direction) + cmd = "{} route-map {} {}".format(neigh_cxt, name, direction) if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) + if allowas_in: + number_occurences = allowas_in.setdefault("number_occurences", {}) + del_action = allowas_in.setdefault("delete", False) + + cmd = "{} allowas-in {}".format(neigh_cxt, number_occurences) + + if del_action: + cmd = "no {}".format(cmd) + + config_data.append(cmd) + return config_data @@ -564,12 +588,10 @@ def verify_router_id(tgen, topo, input_dict): rnode = tgen.routers()[router] - del_router_id = input_dict[router]["bgp"].setdefault( - "del_router_id", False) + del_router_id = input_dict[router]["bgp"].setdefault("del_router_id", False) logger.info("Checking router %s router-id", router) - show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", - isjson=True) + show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", isjson=True) router_id_out = show_bgp_json["ipv4Unicast"]["routerId"] router_id_out = ipaddr.IPv4Address(unicode(router_id_out)) @@ -582,12 +604,12 @@ def verify_router_id(tgen, topo, input_dict): router_id = ipaddr.IPv4Address(unicode(router_id)) if router_id == router_id_out: - logger.info("Found expected router-id %s for router %s", - router_id, router) + logger.info("Found expected router-id %s for router %s", router_id, router) else: - errormsg = "Router-id for router:{} mismatch, expected:" \ - " {} but found:{}".format(router, router_id, - router_id_out) + errormsg = ( + "Router-id for router:{} mismatch, expected:" + " {} but found:{}".format(router, router_id, router_id_out) + ) return errormsg logger.debug("Exiting lib API: verify_router_id()") @@ -617,9 +639,11 @@ def verify_bgp_convergence(tgen, topo): logger.debug("Entering lib API: verify_bgp_convergence()") for router, rnode in tgen.routers().iteritems(): + if "bgp" not in topo["routers"][router]: + continue + logger.info("Verifying BGP Convergence on router %s", router) - show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", - isjson=True) + show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", isjson=True) # Verifying output dictionary show_bgp_json is empty or not if not bool(show_bgp_json): errormsg = "BGP is not running" @@ -647,15 +671,12 @@ def verify_bgp_convergence(tgen, topo): for dest_link in peer_data["dest_link"].keys(): data = topo["routers"][bgp_neighbor]["links"] if dest_link in data: - neighbor_ip = \ - data[dest_link][addr_type].split("/")[0] + neighbor_ip = data[dest_link][addr_type].split("/")[0] if addr_type == "ipv4": - ipv4_data = show_bgp_json["ipv4Unicast"][ - "peers"] + ipv4_data = show_bgp_json["ipv4Unicast"]["peers"] nh_state = ipv4_data[neighbor_ip]["state"] else: - ipv6_data = show_bgp_json["ipv6Unicast"][ - "peers"] + ipv6_data = show_bgp_json["ipv6Unicast"]["peers"] nh_state = ipv6_data[neighbor_ip]["state"] if nh_state == "Established": @@ -663,8 +684,7 @@ def verify_bgp_convergence(tgen, topo): if no_of_peer == total_peer: logger.info("BGP is Converged for router %s", router) else: - errormsg = "BGP is not converged for router {}".format( - router) + errormsg = "BGP is not converged for router {}".format(router) return errormsg logger.debug("Exiting API: verify_bgp_convergence()") @@ -707,16 +727,9 @@ def modify_as_number(tgen, topo, input_dict): for router in input_dict.keys(): # Remove bgp configuration - router_dict.update({ - router: { - "bgp": { - "delete": True - } - } - }) + router_dict.update({router: {"bgp": {"delete": True}}}) - new_topo[router]["bgp"]["local_as"] = \ - input_dict[router]["bgp"]["local_as"] + new_topo[router]["bgp"]["local_as"] = input_dict[router]["bgp"]["local_as"] logger.info("Removing bgp configuration") create_router_bgp(tgen, topo, router_dict) @@ -777,8 +790,9 @@ def verify_as_numbers(tgen, topo, input_dict): logger.info("Verifying AS numbers for dut %s:", router) - show_ip_bgp_neighbor_json = run_frr_cmd(rnode, - "show ip bgp neighbor json", isjson=True) + show_ip_bgp_neighbor_json = run_frr_cmd( + rnode, "show ip bgp neighbor json", isjson=True + ) local_as = input_dict[router]["bgp"]["local_as"] bgp_addr_type = topo["routers"][router]["bgp"]["address_family"] @@ -786,8 +800,7 @@ def verify_as_numbers(tgen, topo, input_dict): if not check_address_types(addr_type): continue - bgp_neighbors = bgp_addr_type[addr_type]["unicast"][ - "neighbor"] + bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"] for bgp_neighbor, peer_data in bgp_neighbors.iteritems(): remote_as = input_dict[bgp_neighbor]["bgp"]["local_as"] @@ -796,32 +809,42 @@ def verify_as_numbers(tgen, topo, input_dict): data = topo["routers"][bgp_neighbor]["links"] if dest_link in data: - neighbor_ip = data[dest_link][addr_type]. \ - split("/")[0] + neighbor_ip = data[dest_link][addr_type].split("/")[0] neigh_data = show_ip_bgp_neighbor_json[neighbor_ip] # Verify Local AS for router if neigh_data["localAs"] != local_as: - errormsg = "Failed: Verify local_as for dut {}," \ - " found: {} but expected: {}".format( - router, neigh_data["localAs"], - local_as) + errormsg = ( + "Failed: Verify local_as for dut {}," + " found: {} but expected: {}".format( + router, neigh_data["localAs"], local_as + ) + ) return errormsg else: - logger.info("Verified local_as for dut %s, found" - " expected: %s", router, local_as) + logger.info( + "Verified local_as for dut %s, found" " expected: %s", + router, + local_as, + ) # Verify Remote AS for neighbor if neigh_data["remoteAs"] != remote_as: - errormsg = "Failed: Verify remote_as for dut " \ - "{}'s neighbor {}, found: {} but " \ - "expected: {}".format( - router, bgp_neighbor, - neigh_data["remoteAs"], remote_as) + errormsg = ( + "Failed: Verify remote_as for dut " + "{}'s neighbor {}, found: {} but " + "expected: {}".format( + router, bgp_neighbor, neigh_data["remoteAs"], remote_as + ) + ) return errormsg else: - logger.info("Verified remote_as for dut %s's " - "neighbor %s, found expected: %s", - router, bgp_neighbor, remote_as) + logger.info( + "Verified remote_as for dut %s's " + "neighbor %s, found expected: %s", + router, + bgp_neighbor, + remote_as, + ) logger.debug("Exiting lib API: verify_AS_numbers()") return True @@ -862,12 +885,14 @@ def clear_bgp_and_verify(tgen, topo, router): for retry in range(31): sleeptime = 3 # Waiting for BGP to converge - logger.info("Waiting for %s sec for BGP to converge on router" - " %s...", sleeptime, router) + logger.info( + "Waiting for %s sec for BGP to converge on router" " %s...", + sleeptime, + router, + ) sleep(sleeptime) - show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", - isjson=True) + show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", isjson=True) # Verifying output dictionary show_bgp_json is empty or not if not bool(show_bgp_json): errormsg = "BGP is not running" @@ -897,38 +922,39 @@ def clear_bgp_and_verify(tgen, topo, router): if dest_link in data: neighbor_ip = data[dest_link][addr_type].split("/")[0] if addr_type == "ipv4": - ipv4_data = show_bgp_json["ipv4Unicast"][ - "peers"] + ipv4_data = show_bgp_json["ipv4Unicast"]["peers"] nh_state = ipv4_data[neighbor_ip]["state"] # Peer up time dictionary - peer_uptime_before_clear_bgp[bgp_neighbor] = \ - ipv4_data[neighbor_ip]["peerUptimeEstablishedEpoch"] + peer_uptime_before_clear_bgp[bgp_neighbor] = ipv4_data[ + neighbor_ip + ]["peerUptimeEstablishedEpoch"] else: - ipv6_data = show_bgp_json["ipv6Unicast"][ - "peers"] + ipv6_data = show_bgp_json["ipv6Unicast"]["peers"] nh_state = ipv6_data[neighbor_ip]["state"] # Peer up time dictionary - peer_uptime_before_clear_bgp[bgp_neighbor] = \ - ipv6_data[neighbor_ip]["peerUptimeEstablishedEpoch"] + peer_uptime_before_clear_bgp[bgp_neighbor] = ipv6_data[ + neighbor_ip + ]["peerUptimeEstablishedEpoch"] if nh_state == "Established": no_of_peer += 1 if no_of_peer == total_peer: - logger.info("BGP is Converged for router %s before bgp" - " clear", router) + logger.info("BGP is Converged for router %s before bgp" " clear", router) break else: - logger.info("BGP is not yet Converged for router %s " - "before bgp clear", router) + logger.info( + "BGP is not yet Converged for router %s " "before bgp clear", router + ) else: - errormsg = "TIMEOUT!! BGP is not converged in 30 seconds for" \ - " router {}".format(router) + errormsg = ( + "TIMEOUT!! BGP is not converged in 30 seconds for" + " router {}".format(router) + ) return errormsg - logger.info(peer_uptime_before_clear_bgp) # Clearing BGP logger.info("Clearing BGP neighborship for router %s..", router) for addr_type in bgp_addr_type.keys(): @@ -942,13 +968,14 @@ def clear_bgp_and_verify(tgen, topo, router): for retry in range(31): sleeptime = 3 # Waiting for BGP to converge - logger.info("Waiting for %s sec for BGP to converge on router" - " %s...", sleeptime, router) + logger.info( + "Waiting for %s sec for BGP to converge on router" " %s...", + sleeptime, + router, + ) sleep(sleeptime) - - show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", - isjson=True) + show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", isjson=True) # Verifying output dictionary show_bgp_json is empty or not if not bool(show_bgp_json): errormsg = "BGP is not running" @@ -975,44 +1002,46 @@ def clear_bgp_and_verify(tgen, topo, router): data = topo["routers"][bgp_neighbor]["links"] if dest_link in data: - neighbor_ip = data[dest_link][addr_type].\ - split("/")[0] + neighbor_ip = data[dest_link][addr_type].split("/")[0] if addr_type == "ipv4": - ipv4_data = show_bgp_json["ipv4Unicast"][ - "peers"] + ipv4_data = show_bgp_json["ipv4Unicast"]["peers"] nh_state = ipv4_data[neighbor_ip]["state"] - peer_uptime_after_clear_bgp[bgp_neighbor] = \ - ipv4_data[neighbor_ip]["peerUptimeEstablishedEpoch"] + peer_uptime_after_clear_bgp[bgp_neighbor] = ipv4_data[ + neighbor_ip + ]["peerUptimeEstablishedEpoch"] else: - ipv6_data = show_bgp_json["ipv6Unicast"][ - "peers"] + ipv6_data = show_bgp_json["ipv6Unicast"]["peers"] nh_state = ipv6_data[neighbor_ip]["state"] # Peer up time dictionary - peer_uptime_after_clear_bgp[bgp_neighbor] = \ - ipv6_data[neighbor_ip]["peerUptimeEstablishedEpoch"] + peer_uptime_after_clear_bgp[bgp_neighbor] = ipv6_data[ + neighbor_ip + ]["peerUptimeEstablishedEpoch"] if nh_state == "Established": no_of_peer += 1 if no_of_peer == total_peer: - logger.info("BGP is Converged for router %s after bgp clear", - router) + logger.info("BGP is Converged for router %s after bgp clear", router) break else: - logger.info("BGP is not yet Converged for router %s after" - " bgp clear", router) + logger.info( + "BGP is not yet Converged for router %s after" " bgp clear", router + ) else: - errormsg = "TIMEOUT!! BGP is not converged in 30 seconds for" \ - " router {}".format(router) + errormsg = ( + "TIMEOUT!! BGP is not converged in 30 seconds for" + " router {}".format(router) + ) return errormsg - logger.info(peer_uptime_after_clear_bgp) + # Comparing peerUptimeEstablishedEpoch dictionaries if peer_uptime_before_clear_bgp != peer_uptime_after_clear_bgp: - logger.info("BGP neighborship is reset after clear BGP on router %s", - router) + logger.info("BGP neighborship is reset after clear BGP on router %s", router) else: - errormsg = "BGP neighborship is not reset after clear bgp on router" \ - " {}".format(router) + errormsg = ( + "BGP neighborship is not reset after clear bgp on router" + " {}".format(router) + ) return errormsg logger.debug("Exiting lib API: clear_bgp_and_verify()") @@ -1060,11 +1089,11 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict): rnode = router_list[router] - logger.info("Verifying bgp timers functionality, DUT is %s:", - router) + logger.info("Verifying bgp timers functionality, DUT is %s:", router) - show_ip_bgp_neighbor_json = \ - run_frr_cmd(rnode, "show ip bgp neighbor json", isjson=True) + show_ip_bgp_neighbor_json = run_frr_cmd( + rnode, "show ip bgp neighbor json", isjson=True + ) bgp_addr_type = input_dict[router]["bgp"]["address_family"] @@ -1072,8 +1101,7 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict): if not check_address_types(addr_type): continue - bgp_neighbors = bgp_addr_type[addr_type]["unicast"][ - "neighbor"] + bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"] for bgp_neighbor, peer_data in bgp_neighbors.iteritems(): for dest_link, peer_dict in peer_data["dest_link"].iteritems(): data = topo["routers"][bgp_neighbor]["links"] @@ -1082,32 +1110,41 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict): holddowntimer = peer_dict["holddowntimer"] if dest_link in data: - neighbor_ip = data[dest_link][addr_type]. \ - split("/")[0] + neighbor_ip = data[dest_link][addr_type].split("/")[0] neighbor_intf = data[dest_link]["interface"] # Verify HoldDownTimer for neighbor - bgpHoldTimeMsecs = show_ip_bgp_neighbor_json[ - neighbor_ip]["bgpTimerHoldTimeMsecs"] + bgpHoldTimeMsecs = show_ip_bgp_neighbor_json[neighbor_ip][ + "bgpTimerHoldTimeMsecs" + ] if bgpHoldTimeMsecs != holddowntimer * 1000: - errormsg = "Verifying holddowntimer for bgp " \ - "neighbor {} under dut {}, found: {} " \ - "but expected: {}".format( - neighbor_ip, router, - bgpHoldTimeMsecs, - holddowntimer * 1000) + errormsg = ( + "Verifying holddowntimer for bgp " + "neighbor {} under dut {}, found: {} " + "but expected: {}".format( + neighbor_ip, + router, + bgpHoldTimeMsecs, + holddowntimer * 1000, + ) + ) return errormsg # Verify KeepAliveTimer for neighbor - bgpKeepAliveTimeMsecs = show_ip_bgp_neighbor_json[ - neighbor_ip]["bgpTimerKeepAliveIntervalMsecs"] + bgpKeepAliveTimeMsecs = show_ip_bgp_neighbor_json[neighbor_ip][ + "bgpTimerKeepAliveIntervalMsecs" + ] if bgpKeepAliveTimeMsecs != keepalivetimer * 1000: - errormsg = "Verifying keepalivetimer for bgp " \ - "neighbor {} under dut {}, found: {} " \ - "but expected: {}".format( - neighbor_ip, router, - bgpKeepAliveTimeMsecs, - keepalivetimer * 1000) + errormsg = ( + "Verifying keepalivetimer for bgp " + "neighbor {} under dut {}, found: {} " + "but expected: {}".format( + neighbor_ip, + router, + bgpKeepAliveTimeMsecs, + keepalivetimer * 1000, + ) + ) return errormsg #################### @@ -1120,40 +1157,50 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict): # Wait till keep alive time logger.info("=" * 20) logger.info("Scenario 1:") - logger.info("Shutdown and bring up peer interface: %s " - "in keep alive time : %s sec and verify " - " BGP neighborship is intact in %s sec ", - neighbor_intf, keepalivetimer, - (holddowntimer - keepalivetimer)) + logger.info( + "Shutdown and bring up peer interface: %s " + "in keep alive time : %s sec and verify " + " BGP neighborship is intact in %s sec ", + neighbor_intf, + keepalivetimer, + (holddowntimer - keepalivetimer), + ) logger.info("=" * 20) logger.info("Waiting for %s sec..", keepalivetimer) sleep(keepalivetimer) # Shutting down peer ineterface - logger.info("Shutting down interface %s on router %s", - neighbor_intf, bgp_neighbor) + logger.info( + "Shutting down interface %s on router %s", + neighbor_intf, + bgp_neighbor, + ) topotest.interface_set_status( - router_list[bgp_neighbor], neighbor_intf, - ifaceaction=False) + router_list[bgp_neighbor], neighbor_intf, ifaceaction=False + ) # Bringing up peer interface sleep(5) - logger.info("Bringing up interface %s on router %s..", - neighbor_intf, bgp_neighbor) + logger.info( + "Bringing up interface %s on router %s..", + neighbor_intf, + bgp_neighbor, + ) topotest.interface_set_status( - router_list[bgp_neighbor], neighbor_intf, - ifaceaction=True) + router_list[bgp_neighbor], neighbor_intf, ifaceaction=True + ) # Verifying BGP neighborship is intact in # (holddown - keepalive) time - for timer in range(keepalivetimer, holddowntimer, - int(holddowntimer / 3)): + for timer in range( + keepalivetimer, holddowntimer, int(holddowntimer / 3) + ): logger.info("Waiting for %s sec..", keepalivetimer) sleep(keepalivetimer) sleep(2) - show_bgp_json = \ - run_frr_cmd(rnode, "show bgp summary json", - isjson=True) + show_bgp_json = run_frr_cmd( + rnode, "show bgp summary json", isjson=True + ) if addr_type == "ipv4": ipv4_data = show_bgp_json["ipv4Unicast"]["peers"] @@ -1162,17 +1209,22 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict): ipv6_data = show_bgp_json["ipv6Unicast"]["peers"] nh_state = ipv6_data[neighbor_ip]["state"] - if timer == \ - (holddowntimer - keepalivetimer): + if timer == (holddowntimer - keepalivetimer): if nh_state != "Established": - errormsg = "BGP neighborship has not gone " \ - "down in {} sec for neighbor {}" \ - .format(timer, bgp_neighbor) + errormsg = ( + "BGP neighborship has not gone " + "down in {} sec for neighbor {}".format( + timer, bgp_neighbor + ) + ) return errormsg else: - logger.info("BGP neighborship is intact in %s" - " sec for neighbor %s", - timer, bgp_neighbor) + logger.info( + "BGP neighborship is intact in %s" + " sec for neighbor %s", + timer, + bgp_neighbor, + ) #################### # Shutting down peer interface and verifying that BGP @@ -1180,27 +1232,36 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict): #################### logger.info("=" * 20) logger.info("Scenario 2:") - logger.info("Shutdown peer interface: %s and verify BGP" - " neighborship has gone down in hold down " - "time %s sec", neighbor_intf, holddowntimer) + logger.info( + "Shutdown peer interface: %s and verify BGP" + " neighborship has gone down in hold down " + "time %s sec", + neighbor_intf, + holddowntimer, + ) logger.info("=" * 20) - logger.info("Shutting down interface %s on router %s..", - neighbor_intf, bgp_neighbor) - topotest.interface_set_status(router_list[bgp_neighbor], - neighbor_intf, - ifaceaction=False) + logger.info( + "Shutting down interface %s on router %s..", + neighbor_intf, + bgp_neighbor, + ) + topotest.interface_set_status( + router_list[bgp_neighbor], neighbor_intf, ifaceaction=False + ) # Verifying BGP neighborship is going down in holddown time - for timer in range(keepalivetimer, - (holddowntimer + keepalivetimer), - int(holddowntimer / 3)): + for timer in range( + keepalivetimer, + (holddowntimer + keepalivetimer), + int(holddowntimer / 3), + ): logger.info("Waiting for %s sec..", keepalivetimer) sleep(keepalivetimer) sleep(2) - show_bgp_json = \ - run_frr_cmd(rnode, "show bgp summary json", - isjson=True) + show_bgp_json = run_frr_cmd( + rnode, "show bgp summary json", isjson=True + ) if addr_type == "ipv4": ipv4_data = show_bgp_json["ipv4Unicast"]["peers"] @@ -1211,22 +1272,29 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict): if timer == holddowntimer: if nh_state == "Established": - errormsg = "BGP neighborship has not gone " \ - "down in {} sec for neighbor {}" \ - .format(timer, bgp_neighbor) + errormsg = ( + "BGP neighborship has not gone " + "down in {} sec for neighbor {}".format( + timer, bgp_neighbor + ) + ) return errormsg else: - logger.info("BGP neighborship has gone down in" - " %s sec for neighbor %s", - timer, bgp_neighbor) + logger.info( + "BGP neighborship has gone down in" + " %s sec for neighbor %s", + timer, + bgp_neighbor, + ) logger.debug("Exiting lib API: verify_bgp_timers_and_functionality()") return True @retry(attempts=3, wait=4, return_is_str=True) -def verify_bgp_attributes(tgen, addr_type, dut, static_routes, rmap_name, - input_dict, seq_id=None): +def verify_bgp_attributes( + tgen, addr_type, dut, static_routes, rmap_name, input_dict, seq_id=None +): """ API will verify BGP attributes set by Route-map for given prefix and DUT. it will run "show bgp ipv4/ipv6 {prefix_address} json" command @@ -1256,7 +1324,7 @@ def verify_bgp_attributes(tgen, addr_type, dut, static_routes, rmap_name, } }, "set": { - "localpref": 150, + "locPrf": 150, "weight": 100 } }], @@ -1269,7 +1337,7 @@ def verify_bgp_attributes(tgen, addr_type, dut, static_routes, rmap_name, } }, "set": { - "med": 50 + "metric": 50 } }] } @@ -1288,7 +1356,7 @@ def verify_bgp_attributes(tgen, addr_type, dut, static_routes, rmap_name, if router != dut: continue - logger.info('Verifying BGP set attributes for dut {}:'.format(router)) + logger.info("Verifying BGP set attributes for dut {}:".format(router)) for static_route in static_routes: cmd = "show bgp {} {} json".format(addr_type, static_route) @@ -1297,8 +1365,7 @@ def verify_bgp_attributes(tgen, addr_type, dut, static_routes, rmap_name, dict_to_test = [] tmp_list = [] for rmap_router in input_dict.keys(): - for rmap, values in input_dict[rmap_router][ - "route_maps"].items(): + for rmap, values in input_dict[rmap_router]["route_maps"].items(): if rmap == rmap_name: dict_to_test = values for rmap_dict in values: @@ -1307,8 +1374,7 @@ def verify_bgp_attributes(tgen, addr_type, dut, static_routes, rmap_name, seq_id = [seq_id] if "seq_id" in rmap_dict: - rmap_seq_id = \ - rmap_dict["seq_id"] + rmap_seq_id = rmap_dict["seq_id"] for _seq_id in seq_id: if _seq_id == rmap_seq_id: tmp_list.append(rmap_dict) @@ -1318,55 +1384,56 @@ def verify_bgp_attributes(tgen, addr_type, dut, static_routes, rmap_name, for rmap_dict in dict_to_test: if "set" in rmap_dict: for criteria in rmap_dict["set"].keys(): - if criteria not in show_bgp_json[ - "paths"][0]: - errormsg = ("BGP attribute: {}" - " is not found in" - " cli: {} output " - "in router {}". - format(criteria, - cmd, - router)) + if criteria not in show_bgp_json["paths"][0]: + errormsg = ( + "BGP attribute: {}" + " is not found in" + " cli: {} output " + "in router {}".format(criteria, cmd, router) + ) return errormsg - if rmap_dict["set"][criteria] == \ - show_bgp_json["paths"][0][ - criteria]: - logger.info("Verifying BGP " - "attribute {} for" - " route: {} in " - "router: {}, found" - " expected value:" - " {}". - format(criteria, - static_route, - dut, - rmap_dict[ - "set"][ - criteria])) + if ( + rmap_dict["set"][criteria] + == show_bgp_json["paths"][0][criteria] + ): + logger.info( + "Verifying BGP " + "attribute {} for" + " route: {} in " + "router: {}, found" + " expected value:" + " {}".format( + criteria, + static_route, + dut, + rmap_dict["set"][criteria], + ) + ) else: - errormsg = \ - ("Failed: Verifying BGP " - "attribute {} for route:" - " {} in router: {}, " - " expected value: {} but" - " found: {}". - format(criteria, - static_route, - dut, - rmap_dict["set"] - [criteria], - show_bgp_json[ - 'paths'][ - 0][criteria])) + errormsg = ( + "Failed: Verifying BGP " + "attribute {} for route:" + " {} in router: {}, " + " expected value: {} but" + " found: {}".format( + criteria, + static_route, + dut, + rmap_dict["set"][criteria], + show_bgp_json["paths"][0][criteria], + ) + ) return errormsg logger.debug("Exiting lib API: verify_bgp_attributes()") return True + @retry(attempts=4, wait=2, return_is_str=True, initial_wait=2) -def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict, - attribute): +def verify_best_path_as_per_bgp_attribute( + tgen, addr_type, router, input_dict, attribute +): """ API is to verify best path according to BGP attributes for given routes. "show bgp ipv4/6 json" command will be run and verify best path according @@ -1406,7 +1473,7 @@ def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict, } } } - attribute = "localpref" + attribute = "locPrf" result = verify_best_path_as_per_bgp_attribute(tgen, "ipv4", dut, \ input_dict, attribute) Returns @@ -1443,40 +1510,38 @@ def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict, attribute_dict[next_hop_ip] = route_attribute[attribute] # AS_PATH attribute - if attribute == "aspath": + if attribute == "path": # Find next_hop for the route have minimum as_path - _next_hop = min(attribute_dict, key=lambda x: len(set( - attribute_dict[x]))) + _next_hop = min( + attribute_dict, key=lambda x: len(set(attribute_dict[x])) + ) compare = "SHORTEST" # LOCAL_PREF attribute - elif attribute == "localpref": + elif attribute == "locPrf": # Find next_hop for the route have highest local preference - _next_hop = max(attribute_dict, key=(lambda k: - attribute_dict[k])) + _next_hop = max(attribute_dict, key=(lambda k: attribute_dict[k])) compare = "HIGHEST" # WEIGHT attribute elif attribute == "weight": # Find next_hop for the route have highest weight - _next_hop = max(attribute_dict, key=(lambda k: - attribute_dict[k])) + _next_hop = max(attribute_dict, key=(lambda k: attribute_dict[k])) compare = "HIGHEST" # ORIGIN attribute elif attribute == "origin": # Find next_hop for the route have IGP as origin, - # - rule is IGP>EGP>INCOMPLETE - _next_hop = [key for (key, value) in - attribute_dict.iteritems() - if value == "IGP"][0] + _next_hop = [ + key for (key, value) in attribute_dict.iteritems() if value == "IGP" + ][0] compare = "" # MED attribute - elif attribute == "med": + elif attribute == "metric": # Find next_hop for the route have LOWEST MED - _next_hop = min(attribute_dict, key=(lambda k: - attribute_dict[k])) + _next_hop = min(attribute_dict, key=(lambda k: attribute_dict[k])) compare = "LOWEST" # Show ip route @@ -1489,8 +1554,7 @@ def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict, # Verifying output dictionary rib_routes_json is not empty if not bool(rib_routes_json): - errormsg = "No route found in RIB of router {}..". \ - format(router) + errormsg = "No route found in RIB of router {}..".format(router) return errormsg st_found = False @@ -1499,31 +1563,41 @@ def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict, if route in rib_routes_json: st_found = True # Verify next_hop in rib_routes_json - if rib_routes_json[route][0]["nexthops"][0]["ip"] in \ - attribute_dict: + if rib_routes_json[route][0]["nexthops"][0]["ip"] in attribute_dict: nh_found = True else: - errormsg = "Incorrect Nexthop for BGP route {} in " \ - "RIB of router {}, Expected: {}, Found:" \ - " {}\n".format(route, router, - rib_routes_json[route][0][ - "nexthops"][0]["ip"], - _next_hop) + errormsg = ( + "Incorrect Nexthop for BGP route {} in " + "RIB of router {}, Expected: {}, Found:" + " {}\n".format( + route, + router, + rib_routes_json[route][0]["nexthops"][0]["ip"], + _next_hop, + ) + ) return errormsg if st_found and nh_found: logger.info( "Best path for prefix: %s with next_hop: %s is " "installed according to %s %s: (%s) in RIB of " - "router %s", route, _next_hop, compare, - attribute, attribute_dict[_next_hop], router) + "router %s", + route, + _next_hop, + compare, + attribute, + attribute_dict[_next_hop], + router, + ) logger.debug("Exiting lib API: verify_best_path_as_per_bgp_attribute()") return True -def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict, - attribute): +def verify_best_path_as_per_admin_distance( + tgen, addr_type, router, input_dict, attribute +): """ API is to verify best path according to admin distance for given route. "show ip/ipv6 route json" command will be run and verify @@ -1548,7 +1622,7 @@ def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict, {"network": "200.50.2.0/32", \ "admin_distance": 60, "next_hop": "10.0.0.18"}] }} - attribute = "localpref" + attribute = "locPrf" result = verify_best_path_as_per_admin_distance(tgen, "ipv4", dut, \ input_dict, attribute): Returns @@ -1574,7 +1648,8 @@ def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict, for routes_from_router in input_dict.keys(): sh_ip_route_json = router_list[routes_from_router].vtysh_cmd( - command, isjson=True) + command, isjson=True + ) networks = input_dict[routes_from_router]["static_routes"] for network in networks: route = network["network"] @@ -1590,8 +1665,7 @@ def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict, attribute_dict[next_hop_ip] = route_attribute["distance"] # Find next_hop for the route have LOWEST Admin Distance - _next_hop = min(attribute_dict, key=(lambda k: - attribute_dict[k])) + _next_hop = min(attribute_dict, key=(lambda k: attribute_dict[k])) compare = "LOWEST" # Show ip route @@ -1608,21 +1682,523 @@ def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict, if route in rib_routes_json: st_found = True # Verify next_hop in rib_routes_json - if rib_routes_json[route][0]["nexthops"][0]["ip"] == \ - _next_hop: + if rib_routes_json[route][0]["nexthops"][0]["ip"] == _next_hop: nh_found = True else: - errormsg = ("Nexthop {} is Missing for BGP route {}" - " in RIB of router {}\n".format(_next_hop, - route, router)) + errormsg = ( + "Nexthop {} is Missing for BGP route {}" + " in RIB of router {}\n".format(_next_hop, route, router) + ) return errormsg if st_found and nh_found: - logger.info("Best path for prefix: %s is installed according" - " to %s %s: (%s) in RIB of router %s", route, - compare, attribute, - attribute_dict[_next_hop], router) + logger.info( + "Best path for prefix: %s is installed according" + " to %s %s: (%s) in RIB of router %s", + route, + compare, + attribute, + attribute_dict[_next_hop], + router, + ) + + logger.info("Exiting lib API: verify_best_path_as_per_admin_distance()") + return True + + +@retry(attempts=5, wait=2, return_is_str=True, initial_wait=2) +def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, aspath=None): + """ + This API is to verify whether bgp rib has any + matching route for a nexthop. + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: input dut router name + * `addr_type` : ip type ipv4/ipv6 + * `input_dict` : input dict, has details of static routes + * `next_hop`[optional]: next_hop which needs to be verified, + default = static + * 'aspath'[optional]: aspath which needs to be verified + + Usage + ----- + dut = 'r1' + next_hop = "192.168.1.10" + input_dict = topo['routers'] + aspath = "100 200 300" + result = verify_bgp_rib(tgen, addr_type, dut, tgen, input_dict, + next_hop, aspath) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: verify_bgp_rib()") + + router_list = tgen.routers() + additional_nexthops_in_required_nhs = [] + list1 = [] + list2 = [] + for routerInput in input_dict.keys(): + for router, rnode in router_list.iteritems(): + if router != dut: + continue + + # Verifying RIB routes + command = "show bgp" + + # Static routes + sleep(2) + logger.info("Checking router {} BGP RIB:".format(dut)) + + if "static_routes" in input_dict[routerInput]: + static_routes = input_dict[routerInput]["static_routes"] + + for static_route in static_routes: + found_routes = [] + missing_routes = [] + st_found = False + nh_found = False + vrf = static_route.setdefault("vrf", None) + if vrf: + cmd = "{} vrf {} {}".format(command, vrf, addr_type) + + else: + cmd = "{} {}".format(command, addr_type) + + cmd = "{} json".format(cmd) + + rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True) + + # Verifying output dictionary rib_routes_json is not empty + if bool(rib_routes_json) == False: + errormsg = "No route found in rib of router {}..".format(router) + return errormsg + + network = static_route["network"] + + if "no_of_ip" in static_route: + no_of_ip = static_route["no_of_ip"] + else: + no_of_ip = 1 + + # Generating IPs for verification + ip_list = generate_ips(network, no_of_ip) + + for st_rt in ip_list: + st_rt = str(ipaddr.IPNetwork(unicode(st_rt))) + + _addr_type = validate_ip_address(st_rt) + if _addr_type != addr_type: + continue + + if st_rt in rib_routes_json["routes"]: + st_found = True + found_routes.append(st_rt) + + if next_hop: + if not isinstance(next_hop, list): + next_hop = [next_hop] + list1 = next_hop + found_hops = [ + rib_r["ip"] + for rib_r in rib_routes_json["routes"][st_rt][0][ + "nexthops" + ] + ] + list2 = found_hops + missing_list_of_nexthops = set(list2).difference(list1) + additional_nexthops_in_required_nhs = set( + list1 + ).difference(list2) + + if list2: + if additional_nexthops_in_required_nhs: + logger.info( + "Missing nexthop %s for route" + " %s in RIB of router %s\n", + additional_nexthops_in_required_nhs, + st_rt, + dut, + ) + errormsg = ( + "Nexthop {} is Missing for " + "route {} in RIB of router {}\n".format( + additional_nexthops_in_required_nhs, + st_rt, + dut, + ) + ) + return errormsg + else: + nh_found = True + if aspath: + found_paths = rib_routes_json["routes"][st_rt][0][ + "path" + ] + if aspath == found_paths: + aspath_found = True + logger.info( + "Found AS path {} for route" + " {} in RIB of router " + "{}\n".format(aspath, st_rt, dut) + ) + else: + errormsg = ( + "AS Path {} is missing for route" + "for route {} in RIB of router {}\n".format( + aspath, st_rt, dut + ) + ) + return errormsg + + else: + missing_routes.append(st_rt) + + if nh_found: + logger.info( + "Found next_hop {} for all bgp" + " routes in RIB of" + " router {}\n".format(next_hop, router) + ) + + if len(missing_routes) > 0: + errormsg = ( + "Missing route in RIB of router {}, " + "routes: {}\n".format(dut, missing_routes) + ) + return errormsg + + if found_routes: + logger.info( + "Verified routes in router {} BGP RIB, " + "found routes are: {} \n".format(dut, found_routes) + ) + continue + + if "bgp" not in input_dict[routerInput]: + continue + + # Advertise networks + bgp_data_list = input_dict[routerInput]["bgp"] + + if type(bgp_data_list) is not list: + bgp_data_list = [bgp_data_list] + + for bgp_data in bgp_data_list: + vrf_id = bgp_data.setdefault("vrf", None) + if vrf_id: + cmd = "{} vrf {} {}".format(command, vrf_id, addr_type) + else: + cmd = "{} {}".format(command, addr_type) + + cmd = "{} json".format(cmd) + + rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True) + + # Verifying output dictionary rib_routes_json is not empty + if bool(rib_routes_json) == False: + errormsg = "No route found in rib of router {}..".format(router) + return errormsg + + bgp_net_advertise = bgp_data["address_family"][addr_type]["unicast"] + advertise_network = bgp_net_advertise.setdefault( + "advertise_networks", [] + ) + + for advertise_network_dict in advertise_network: + found_routes = [] + missing_routes = [] + found = False + + network = advertise_network_dict["network"] + + if "no_of_network" in advertise_network_dict: + no_of_network = advertise_network_dict["no_of_network"] + else: + no_of_network = 1 + + # Generating IPs for verification + ip_list = generate_ips(network, no_of_network) + + for st_rt in ip_list: + st_rt = str(ipaddr.IPNetwork(unicode(st_rt))) + + _addr_type = validate_ip_address(st_rt) + if _addr_type != addr_type: + continue + + if st_rt in rib_routes_json["routes"]: + found = True + found_routes.append(st_rt) + else: + found = False + missing_routes.append(st_rt) + + if len(missing_routes) > 0: + errormsg = ( + "Missing route in BGP RIB of router {}," + " are: {}\n".format(dut, missing_routes) + ) + return errormsg + + if found_routes: + logger.info( + "Verified routes in router {} BGP RIB, found " + "routes are: {}\n".format(dut, found_routes) + ) + + logger.debug("Exiting lib API: verify_bgp_rib()") + return True + + +@retry(attempts=5, wait=2, return_is_str=True, initial_wait=2) +def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, aspath=None): + """ + This API is to verify whether bgp rib has any + matching route for a nexthop. + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: input dut router name + * `addr_type` : ip type ipv4/ipv6 + * `input_dict` : input dict, has details of static routes + * `next_hop`[optional]: next_hop which needs to be verified, + default = static + * 'aspath'[optional]: aspath which needs to be verified + + Usage + ----- + dut = 'r1' + next_hop = "192.168.1.10" + input_dict = topo['routers'] + aspath = "100 200 300" + result = verify_bgp_rib(tgen, addr_type, dut, tgen, input_dict, + next_hop, aspath) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: verify_bgp_rib()") + + router_list = tgen.routers() + additional_nexthops_in_required_nhs = [] + list1 = [] + list2 = [] + for routerInput in input_dict.keys(): + for router, rnode in router_list.iteritems(): + if router != dut: + continue + + # Verifying RIB routes + command = "show bgp" + + # Static routes + sleep(2) + logger.info("Checking router {} BGP RIB:".format(dut)) + + if "static_routes" in input_dict[routerInput]: + static_routes = input_dict[routerInput]["static_routes"] + + for static_route in static_routes: + found_routes = [] + missing_routes = [] + st_found = False + nh_found = False + vrf = static_route.setdefault("vrf", None) + if vrf: + cmd = "{} vrf {} {}".format(command, vrf, addr_type) + + else: + cmd = "{} {}".format(command, addr_type) + + cmd = "{} json".format(cmd) + + rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True) + + # Verifying output dictionary rib_routes_json is not empty + if bool(rib_routes_json) == False: + errormsg = "No route found in rib of router {}..".format(router) + return errormsg + + network = static_route["network"] + + if "no_of_ip" in static_route: + no_of_ip = static_route["no_of_ip"] + else: + no_of_ip = 1 + + # Generating IPs for verification + ip_list = generate_ips(network, no_of_ip) + + for st_rt in ip_list: + st_rt = str(ipaddr.IPNetwork(unicode(st_rt))) + + _addr_type = validate_ip_address(st_rt) + if _addr_type != addr_type: + continue + + if st_rt in rib_routes_json["routes"]: + st_found = True + found_routes.append(st_rt) + + if next_hop: + if not isinstance(next_hop, list): + next_hop = [next_hop] + list1 = next_hop + found_hops = [ + rib_r["ip"] + for rib_r in rib_routes_json["routes"][st_rt][0][ + "nexthops" + ] + ] + list2 = found_hops + missing_list_of_nexthops = set(list2).difference(list1) + additional_nexthops_in_required_nhs = set( + list1 + ).difference(list2) + + if list2: + if additional_nexthops_in_required_nhs: + logger.info( + "Missing nexthop %s for route" + " %s in RIB of router %s\n", + additional_nexthops_in_required_nhs, + st_rt, + dut, + ) + errormsg = ( + "Nexthop {} is Missing for " + "route {} in RIB of router {}\n".format( + additional_nexthops_in_required_nhs, + st_rt, + dut, + ) + ) + return errormsg + else: + nh_found = True + if aspath: + found_paths = rib_routes_json["routes"][st_rt][0][ + "path" + ] + if aspath == found_paths: + aspath_found = True + logger.info( + "Found AS path {} for route" + " {} in RIB of router " + "{}\n".format(aspath, st_rt, dut) + ) + else: + errormsg = ( + "AS Path {} is missing for route" + "for route {} in RIB of router {}\n".format( + aspath, st_rt, dut + ) + ) + return errormsg + + else: + missing_routes.append(st_rt) + + if nh_found: + logger.info( + "Found next_hop {} for all bgp" + " routes in RIB of" + " router {}\n".format(next_hop, router) + ) + + if len(missing_routes) > 0: + errormsg = ( + "Missing route in RIB of router {}, " + "routes: {}\n".format(dut, missing_routes) + ) + return errormsg + + if found_routes: + logger.info( + "Verified routes in router {} BGP RIB, " + "found routes are: {} \n".format(dut, found_routes) + ) + continue + + if "bgp" not in input_dict[routerInput]: + continue + + # Advertise networks + bgp_data_list = input_dict[routerInput]["bgp"] + + if type(bgp_data_list) is not list: + bgp_data_list = [bgp_data_list] + + for bgp_data in bgp_data_list: + vrf_id = bgp_data.setdefault("vrf", None) + if vrf_id: + cmd = "{} vrf {} {}".format(command, vrf_id, addr_type) + else: + cmd = "{} {}".format(command, addr_type) + + cmd = "{} json".format(cmd) + + rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True) + + # Verifying output dictionary rib_routes_json is not empty + if bool(rib_routes_json) == False: + errormsg = "No route found in rib of router {}..".format(router) + return errormsg + + bgp_net_advertise = bgp_data["address_family"][addr_type]["unicast"] + advertise_network = bgp_net_advertise.setdefault( + "advertise_networks", [] + ) + + for advertise_network_dict in advertise_network: + found_routes = [] + missing_routes = [] + found = False + + network = advertise_network_dict["network"] + + if "no_of_network" in advertise_network_dict: + no_of_network = advertise_network_dict["no_of_network"] + else: + no_of_network = 1 + + # Generating IPs for verification + ip_list = generate_ips(network, no_of_network) + + for st_rt in ip_list: + st_rt = str(ipaddr.IPNetwork(unicode(st_rt))) + + _addr_type = validate_ip_address(st_rt) + if _addr_type != addr_type: + continue + + if st_rt in rib_routes_json["routes"]: + found = True + found_routes.append(st_rt) + else: + found = False + missing_routes.append(st_rt) + + if len(missing_routes) > 0: + errormsg = ( + "Missing route in BGP RIB of router {}," + " are: {}\n".format(dut, missing_routes) + ) + return errormsg + + if found_routes: + logger.info( + "Verified routes in router {} BGP RIB, found " + "routes are: {}\n".format(dut, found_routes) + ) - logger.info( - "Exiting lib API: verify_best_path_as_per_admin_distance()") + logger.debug("Exiting lib API: verify_bgp_rib()") return True diff --git a/tests/topotests/lib/bgprib.py b/tests/topotests/lib/bgprib.py index 5a81036643..3d92718c78 100644 --- a/tests/topotests/lib/bgprib.py +++ b/tests/topotests/lib/bgprib.py @@ -16,14 +16,14 @@ # with this program; see the file COPYING; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# +# # want_rd_routes = [ # {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'}, # {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'}, -# +# # {'rd':'10:3', 'p':'5.1.0.0/24', 'n':'3.3.3.3'}, # ] -# +# # ribRequireVpnRoutes('r2','Customer routes',want_rd_routes) # # want_unicast_routes = [ @@ -34,116 +34,133 @@ # ribRequireUnicastRoutes('r1','ipv4','','Customer routes in default',want_unicast_routes) # -from lutil import luCommand,luResult +from lutil import luCommand, luResult import json import re # gpz: get rib in json form and compare against desired routes class BgpRib: - def routes_include_wanted(self,pfxtbl,want,debug): - # helper function to RequireVpnRoutes - for pfx in pfxtbl.iterkeys(): - if debug: - print 'trying pfx ' + pfx - if pfx != want['p']: - if debug: - print 'want pfx=' + want['p'] + ', not ' + pfx - continue - if debug: - print 'have pfx=' + pfx - for r in pfxtbl[pfx]: - if debug: - print 'trying route' - nexthops = r['nexthops'] - for nh in nexthops: - if debug: - print 'trying nh ' + nh['ip'] - if nh['ip'] == want['n']: - if debug: - print 'found ' + want['n'] - return 1 - else: - if debug: - print 'want nh=' + want['n'] + ', not ' + nh['ip'] - if debug: - print 'missing route: pfx=' + want['p'] + ', nh=' + want['n'] - return 0 + def routes_include_wanted(self, pfxtbl, want, debug): + # helper function to RequireVpnRoutes + for pfx in pfxtbl.iterkeys(): + if debug: + print "trying pfx " + pfx + if pfx != want["p"]: + if debug: + print "want pfx=" + want["p"] + ", not " + pfx + continue + if debug: + print "have pfx=" + pfx + for r in pfxtbl[pfx]: + if debug: + print "trying route" + nexthops = r["nexthops"] + for nh in nexthops: + if debug: + print "trying nh " + nh["ip"] + if nh["ip"] == want["n"]: + if debug: + print "found " + want["n"] + return 1 + else: + if debug: + print "want nh=" + want["n"] + ", not " + nh["ip"] + if debug: + print "missing route: pfx=" + want["p"] + ", nh=" + want["n"] + return 0 def RequireVpnRoutes(self, target, title, wantroutes, debug=0): - import json + import json + logstr = "RequireVpnRoutes " + str(wantroutes) - #non json form for humans - luCommand(target,'vtysh -c "show bgp ipv4 vpn"','.','None','Get VPN RIB (non-json)') - ret = luCommand(target,'vtysh -c "show bgp ipv4 vpn json"','.*','None','Get VPN RIB (json)') - if re.search(r'^\s*$', ret): + # non json form for humans + luCommand( + target, + 'vtysh -c "show bgp ipv4 vpn"', + ".", + "None", + "Get VPN RIB (non-json)", + ) + ret = luCommand( + target, + 'vtysh -c "show bgp ipv4 vpn json"', + ".*", + "None", + "Get VPN RIB (json)", + ) + if re.search(r"^\s*$", ret): # degenerate case: empty json means no routes if len(wantroutes) > 0: luResult(target, False, title, logstr) return luResult(target, True, title, logstr) - rib = json.loads(ret) - rds = rib['routes']['routeDistinguishers'] - for want in wantroutes: - found = 0 - if debug: - print "want rd " + want['rd'] - for rd in rds.iterkeys(): - if rd != want['rd']: - continue - if debug: - print "found rd " + rd - table = rds[rd] - if self.routes_include_wanted(table,want,debug): - found = 1 - break - if not found: - luResult(target, False, title, logstr) - return - luResult(target, True, title, logstr) + rib = json.loads(ret) + rds = rib["routes"]["routeDistinguishers"] + for want in wantroutes: + found = 0 + if debug: + print "want rd " + want["rd"] + for rd in rds.iterkeys(): + if rd != want["rd"]: + continue + if debug: + print "found rd " + rd + table = rds[rd] + if self.routes_include_wanted(table, want, debug): + found = 1 + break + if not found: + luResult(target, False, title, logstr) + return + luResult(target, True, title, logstr) - def RequireUnicastRoutes(self,target,afi,vrf,title,wantroutes,debug=0): + def RequireUnicastRoutes(self, target, afi, vrf, title, wantroutes, debug=0): logstr = "RequireVpnRoutes " + str(wantroutes) - vrfstr = '' - if vrf != '': - vrfstr = 'vrf %s' % (vrf) + vrfstr = "" + if vrf != "": + vrfstr = "vrf %s" % (vrf) - if (afi != 'ipv4') and (afi != 'ipv6'): - print "ERROR invalid afi"; + if (afi != "ipv4") and (afi != "ipv6"): + print "ERROR invalid afi" - cmdstr = 'show bgp %s %s unicast' % (vrfstr, afi) - #non json form for humans - cmd = 'vtysh -c "%s"' % cmdstr - luCommand(target,cmd,'.','None','Get %s %s RIB (non-json)' % (vrfstr, afi)) + cmdstr = "show bgp %s %s unicast" % (vrfstr, afi) + # non json form for humans + cmd = 'vtysh -c "%s"' % cmdstr + luCommand(target, cmd, ".", "None", "Get %s %s RIB (non-json)" % (vrfstr, afi)) cmd = 'vtysh -c "%s json"' % cmdstr - ret = luCommand(target,cmd,'.*','None','Get %s %s RIB (json)' % (vrfstr, afi)) - if re.search(r'^\s*$', ret): + ret = luCommand( + target, cmd, ".*", "None", "Get %s %s RIB (json)" % (vrfstr, afi) + ) + if re.search(r"^\s*$", ret): # degenerate case: empty json means no routes if len(wantroutes) > 0: luResult(target, False, title, logstr) return luResult(target, True, title, logstr) - rib = json.loads(ret) + rib = json.loads(ret) try: - table = rib['routes'] - # KeyError: 'routes' probably means missing/bad VRF + table = rib["routes"] + # KeyError: 'routes' probably means missing/bad VRF except KeyError as err: - if vrf != '': - errstr = '-script ERROR: check if wrong vrf (%s)' % (vrf) + if vrf != "": + errstr = "-script ERROR: check if wrong vrf (%s)" % (vrf) else: - errstr = '-script ERROR: check if vrf missing' - luResult(target, False, title + errstr, logstr) - return - for want in wantroutes: - if not self.routes_include_wanted(table,want,debug): - luResult(target, False, title, logstr) - return - luResult(target, True, title, logstr) + errstr = "-script ERROR: check if vrf missing" + luResult(target, False, title + errstr, logstr) + return + for want in wantroutes: + if not self.routes_include_wanted(table, want, debug): + luResult(target, False, title, logstr) + return + luResult(target, True, title, logstr) -BgpRib=BgpRib() +BgpRib = BgpRib() + def bgpribRequireVpnRoutes(target, title, wantroutes, debug=0): BgpRib.RequireVpnRoutes(target, title, wantroutes, debug) + def bgpribRequireUnicastRoutes(target, afi, vrf, title, wantroutes, debug=0): BgpRib.RequireUnicastRoutes(target, afi, vrf, title, wantroutes, debug) diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py index fc7581b1f2..5ee59070cc 100644 --- a/tests/topotests/lib/common_config.py +++ b/tests/topotests/lib/common_config.py @@ -79,9 +79,9 @@ if config.has_option("topogen", "frrtest_log_dir"): frrtest_log_file = frrtest_log_dir + logfile_name + str(time_stamp) print("frrtest_log_file..", frrtest_log_file) - logger = logger_config.get_logger(name="test_execution_logs", - log_level=loglevel, - target=frrtest_log_file) + logger = logger_config.get_logger( + name="test_execution_logs", log_level=loglevel, target=frrtest_log_file + ) print("Logs will be sent to logfile: {}".format(frrtest_log_file)) if config.has_option("topogen", "show_router_config"): @@ -94,10 +94,7 @@ ADDRESS_TYPES = os.environ.get("ADDRESS_TYPES") # Saves sequence id numbers -SEQ_ID = { - "prefix_lists": {}, - "route_maps": {} -} +SEQ_ID = {"prefix_lists": {}, "route_maps": {}} def get_seq_id(obj_type, router, obj_name): @@ -145,6 +142,7 @@ def set_seq_id(obj_type, router, id, obj_name): class InvalidCLIError(Exception): """Raise when the CLI command is wrong""" + pass @@ -169,16 +167,19 @@ def run_frr_cmd(rnode, cmd, isjson=False): else: print_data = ret_data - logger.info('Output for command [ %s] on router %s:\n%s', - cmd.rstrip("json"), rnode.name, print_data) + logger.info( + "Output for command [ %s] on router %s:\n%s", + cmd.rstrip("json"), + rnode.name, + print_data, + ) return ret_data else: - raise InvalidCLIError('No actual cmd passed') + raise InvalidCLIError("No actual cmd passed") -def create_common_configuration(tgen, router, data, config_type=None, - build=False): +def create_common_configuration(tgen, router, data, config_type=None, build=False): """ API to create object of class FRRConfig and also create frr_json.conf file. It will create interface and common configurations and save it to @@ -201,15 +202,17 @@ def create_common_configuration(tgen, router, data, config_type=None, fname = "{}/{}/{}".format(TMPDIR, router, FRRCFG_FILE) - config_map = OrderedDict({ - "general_config": "! FRR General Config\n", - "interface_config": "! Interfaces Config\n", - "static_route": "! Static Route Config\n", - "prefix_list": "! Prefix List Config\n", - "bgp_community_list": "! Community List Config\n", - "route_maps": "! Route Maps Config\n", - "bgp": "! BGP Config\n" - }) + config_map = OrderedDict( + { + "general_config": "! FRR General Config\n", + "interface_config": "! Interfaces Config\n", + "static_route": "! Static Route Config\n", + "prefix_list": "! Prefix List Config\n", + "bgp_community_list": "! Community List Config\n", + "route_maps": "! Route Maps Config\n", + "bgp": "! BGP Config\n", + } + ) if build: mode = "a" @@ -225,8 +228,9 @@ def create_common_configuration(tgen, router, data, config_type=None, frr_cfg_fd.write("\n") except IOError as err: - logger.error("Unable to open FRR Config File. error(%s): %s" % - (err.errno, err.strerror)) + logger.error( + "Unable to open FRR Config File. error(%s): %s" % (err.errno, err.strerror) + ) return False finally: frr_cfg_fd.close() @@ -257,8 +261,7 @@ def reset_config_on_routers(tgen, routerName=None): continue router = router_list[rname] - logger.info("Configuring router %s to initial test configuration", - rname) + logger.info("Configuring router %s to initial test configuration", rname) cfg = router.run("vtysh -c 'show running'") fname = "{}/{}/frr.sav".format(TMPDIR, rname) dname = "{}/{}/delta.conf".format(TMPDIR, rname) @@ -266,9 +269,11 @@ def reset_config_on_routers(tgen, routerName=None): for line in cfg.split("\n"): line = line.strip() - if (line == "Building configuration..." or - line == "Current configuration:" or - not line): + if ( + line == "Building configuration..." + or line == "Current configuration:" + or not line + ): continue f.write(line) f.write("\n") @@ -279,37 +284,39 @@ def reset_config_on_routers(tgen, routerName=None): init_cfg_file = "{}/{}/frr_json_initial.conf".format(TMPDIR, rname) tempdir = mkdtemp() - with open(os.path.join(tempdir, 'vtysh.conf'), 'w') as fd: + with open(os.path.join(tempdir, "vtysh.conf"), "w") as fd: pass - command = "/usr/lib/frr/frr-reload.py --confdir {} --input {} --test {} > {}". \ - format(tempdir, run_cfg_file, init_cfg_file, dname) - result = call(command, shell=True, stderr=SUB_STDOUT, - stdout=SUB_PIPE) + command = "/usr/lib/frr/frr-reload.py --confdir {} --input {} --test {} > {}".format( + tempdir, run_cfg_file, init_cfg_file, dname + ) + result = call(command, shell=True, stderr=SUB_STDOUT, stdout=SUB_PIPE) - os.unlink(os.path.join(tempdir, 'vtysh.conf')) + os.unlink(os.path.join(tempdir, "vtysh.conf")) os.rmdir(tempdir) # Assert if command fail if result > 0: - logger.error("Delta file creation failed. Command executed %s", - command) - with open(run_cfg_file, 'r') as fd: - logger.info('Running configuration saved in %s is:\n%s', - run_cfg_file, fd.read()) - with open(init_cfg_file, 'r') as fd: - logger.info('Test configuration saved in %s is:\n%s', - init_cfg_file, fd.read()) - - err_cmd = ['/usr/bin/vtysh', '-m', '-f', run_cfg_file] + logger.error("Delta file creation failed. Command executed %s", command) + with open(run_cfg_file, "r") as fd: + logger.info( + "Running configuration saved in %s is:\n%s", run_cfg_file, fd.read() + ) + with open(init_cfg_file, "r") as fd: + logger.info( + "Test configuration saved in %s is:\n%s", init_cfg_file, fd.read() + ) + + err_cmd = ["/usr/bin/vtysh", "-m", "-f", run_cfg_file] result = Popen(err_cmd, stdout=SUB_PIPE, stderr=SUB_PIPE) output = result.communicate() for out_data in output: - temp_data = out_data.decode('utf-8').lower() + temp_data = out_data.decode("utf-8").lower() for out_err in ERROR_LIST: if out_err.lower() in temp_data: - logger.error("Found errors while validating data in" - " %s", run_cfg_file) + logger.error( + "Found errors while validating data in" " %s", run_cfg_file + ) raise InvalidCLIError(out_data) raise InvalidCLIError("Unknown error in %s", output) @@ -319,18 +326,19 @@ def reset_config_on_routers(tgen, routerName=None): t_delta = f.read() for line in t_delta.split("\n"): line = line.strip() - if (line == "Lines To Delete" or - line == "===============" or - line == "Lines To Add" or - line == "============" or - not line): + if ( + line == "Lines To Delete" + or line == "===============" + or line == "Lines To Add" + or line == "============" + or not line + ): continue delta.write(line) delta.write("\n") delta.write("end\n") - output = router.vtysh_multicmd(delta.getvalue(), - pretty_output=False) + output = router.vtysh_multicmd(delta.getvalue(), pretty_output=False) delta.close() delta = StringIO.StringIO() @@ -343,8 +351,7 @@ def reset_config_on_routers(tgen, routerName=None): # Router current configuration to log file or console if # "show_router_config" is defined in "pytest.ini" if show_router_config: - logger.info("Configuration on router {} after config reset:". - format(rname)) + logger.info("Configuration on router {} after config reset:".format(rname)) logger.info(delta.getvalue()) delta.close() @@ -373,12 +380,13 @@ def load_config_to_router(tgen, routerName, save_bkup=False): router = router_list[rname] try: frr_cfg_file = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_FILE) - frr_cfg_bkup = "{}/{}/{}".format(TMPDIR, rname, - FRRCFG_BKUP_FILE) + frr_cfg_bkup = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_BKUP_FILE) with open(frr_cfg_file, "r+") as cfg: data = cfg.read() - logger.info("Applying following configuration on router" - " {}:\n{}".format(rname, data)) + logger.info( + "Applying following configuration on router" + " {}:\n{}".format(rname, data) + ) if save_bkup: with open(frr_cfg_bkup, "w") as bkup: bkup.write(data) @@ -390,8 +398,10 @@ def load_config_to_router(tgen, routerName, save_bkup=False): cfg.truncate(0) except IOError as err: - errormsg = ("Unable to open config File. error(%s):" - " %s", (err.errno, err.strerror)) + errormsg = ( + "Unable to open config File. error(%s):" " %s", + (err.errno, err.strerror), + ) return errormsg # Router current configuration to log file or console if @@ -418,8 +428,9 @@ def start_topology(tgen): # Starting deamons router_list = tgen.routers() - ROUTER_LIST = sorted(router_list.keys(), - key=lambda x: int(re_search('\d+', x).group(0))) + ROUTER_LIST = sorted( + router_list.keys(), key=lambda x: int(re_search("\d+", x).group(0)) + ) TMPDIR = os.path.join(LOGDIR, tgen.modname) router_list = tgen.routers() @@ -430,31 +441,27 @@ def start_topology(tgen): # Creating router named dir and empty zebra.conf bgpd.conf files # inside the current directory - if os.path.isdir('{}'.format(rname)): + if os.path.isdir("{}".format(rname)): os.system("rm -rf {}".format(rname)) - os.mkdir('{}'.format(rname)) - os.system('chmod -R go+rw {}'.format(rname)) - os.chdir('{}/{}'.format(TMPDIR, rname)) - os.system('touch zebra.conf bgpd.conf') + os.mkdir("{}".format(rname)) + os.system("chmod -R go+rw {}".format(rname)) + os.chdir("{}/{}".format(TMPDIR, rname)) + os.system("touch zebra.conf bgpd.conf") else: - os.mkdir('{}'.format(rname)) - os.system('chmod -R go+rw {}'.format(rname)) - os.chdir('{}/{}'.format(TMPDIR, rname)) - os.system('touch zebra.conf bgpd.conf') + os.mkdir("{}".format(rname)) + os.system("chmod -R go+rw {}".format(rname)) + os.chdir("{}/{}".format(TMPDIR, rname)) + os.system("touch zebra.conf bgpd.conf") except IOError as (errno, strerror): logger.error("I/O error({0}): {1}".format(errno, strerror)) # Loading empty zebra.conf file to router, to start the zebra deamon router.load_config( - TopoRouter.RD_ZEBRA, - '{}/{}/zebra.conf'.format(TMPDIR, rname) + TopoRouter.RD_ZEBRA, "{}/{}/zebra.conf".format(TMPDIR, rname) ) # Loading empty bgpd.conf file to router, to start the bgp deamon - router.load_config( - TopoRouter.RD_BGP, - '{}/{}/bgpd.conf'.format(TMPDIR, rname) - ) + router.load_config(TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(TMPDIR, rname)) # Starting routers logger.info("Starting all routers once topology is created") @@ -483,6 +490,7 @@ def number_to_column(routerName): # Common APIs, will be used by all protocols ############################################# + def validate_ip_address(ip_address): """ Validates the type of ip address @@ -518,8 +526,9 @@ def validate_ip_address(ip_address): return "ipv6" if not v4 and not v6: - raise Exception("InvalidIpAddr", "%s is neither valid IPv4 or IPv6" - " address" % ip_address) + raise Exception( + "InvalidIpAddr", "%s is neither valid IPv4 or IPv6" " address" % ip_address + ) def check_address_types(addr_type=None): @@ -542,8 +551,11 @@ def check_address_types(addr_type=None): return addr_types if addr_type not in addr_types: - logger.error("{} not in supported/configured address types {}". - format(addr_type, addr_types)) + logger.error( + "{} not in supported/configured address types {}".format( + addr_type, addr_types + ) + ) return False return True @@ -589,8 +601,7 @@ def generate_ips(network, no_of_ips): return ipaddress_list -def find_interface_with_greater_ip(topo, router, loopback=True, - interface=True): +def find_interface_with_greater_ip(topo, router, loopback=True, interface=True): """ Returns highest interface ip for ipv4/ipv6. If loopback is there then it will return highest IP from loopback IPs otherwise from physical @@ -608,12 +619,14 @@ def find_interface_with_greater_ip(topo, router, loopback=True, if loopback: if "type" in data and data["type"] == "loopback": lo_exists = True - ip_address = topo["routers"][router]["links"][ - destRouterLink]["ipv4"].split("/")[0] + ip_address = topo["routers"][router]["links"][destRouterLink][ + "ipv4" + ].split("/")[0] lo_list.append(ip_address) if interface: - ip_address = topo["routers"][router]["links"][ - destRouterLink]["ipv4"].split("/")[0] + ip_address = topo["routers"][router]["links"][destRouterLink]["ipv4"].split( + "/" + )[0] interfaces_list.append(ip_address) if lo_exists: @@ -625,17 +638,17 @@ def find_interface_with_greater_ip(topo, router, loopback=True, def write_test_header(tc_name): """ Display message at beginning of test case""" count = 20 - logger.info("*"*(len(tc_name)+count)) + logger.info("*" * (len(tc_name) + count)) step("START -> Testcase : %s" % tc_name, reset=True) - logger.info("*"*(len(tc_name)+count)) + logger.info("*" * (len(tc_name) + count)) def write_test_footer(tc_name): """ Display message at end of test case""" count = 21 - logger.info("="*(len(tc_name)+count)) + logger.info("=" * (len(tc_name) + count)) logger.info("Testcase : %s -> PASSED", tc_name) - logger.info("="*(len(tc_name)+count)) + logger.info("=" * (len(tc_name) + count)) def interface_status(tgen, topo, input_dict): @@ -664,8 +677,8 @@ def interface_status(tgen, topo, input_dict): global frr_cfg for router in input_dict.keys(): - interface_list = input_dict[router]['interface_list'] - status = input_dict[router].setdefault('status', 'up') + interface_list = input_dict[router]["interface_list"] + status = input_dict[router].setdefault("status", "up") for intf in interface_list: rnode = tgen.routers()[router] interface_set_status(rnode, intf, status) @@ -698,11 +711,10 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0): """ def _retry(func): - @wraps(func) def func_retry(*args, **kwargs): - _wait = kwargs.pop('wait', wait) - _attempts = kwargs.pop('attempts', attempts) + _wait = kwargs.pop("wait", wait) + _attempts = kwargs.pop("attempts", attempts) _attempts = int(_attempts) if _attempts < 0: raise ValueError("attempts must be 0 or greater") @@ -711,11 +723,11 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0): logger.info("Waiting for [%s]s as initial delay", initial_wait) sleep(initial_wait) - _return_is_str = kwargs.pop('return_is_str', return_is_str) + _return_is_str = kwargs.pop("return_is_str", return_is_str) for i in range(1, _attempts + 1): try: - _expected = kwargs.setdefault('expected', True) - kwargs.pop('expected') + _expected = kwargs.setdefault("expected", True) + kwargs.pop("expected") ret = func(*args, **kwargs) logger.debug("Function returned %s" % ret) if return_is_str and isinstance(ret, bool) and _expected: @@ -727,17 +739,17 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0): return ret except Exception as err: if _attempts == i: - logger.info("Max number of attempts (%r) reached", - _attempts) + logger.info("Max number of attempts (%r) reached", _attempts) raise else: logger.info("Function returned %s", err) if i < _attempts: - logger.info("Retry [#%r] after sleeping for %ss" - % (i, _wait)) + logger.info("Retry [#%r] after sleeping for %ss" % (i, _wait)) sleep(_wait) + func_retry._original = func return func_retry + return _retry @@ -745,6 +757,7 @@ class Stepper: """ Prints step number for the test case step being executed """ + count = 1 def __call__(self, msg, reset): @@ -795,24 +808,17 @@ def create_interfaces_cfg(tgen, topo, build=False): interface_name = destRouterLink else: interface_name = data["interface"] - interface_data.append("interface {}".format( - str(interface_name) - )) + interface_data.append("interface {}".format(str(interface_name))) if "ipv4" in data: intf_addr = c_data["links"][destRouterLink]["ipv4"] - interface_data.append("ip address {}".format( - intf_addr - )) + interface_data.append("ip address {}".format(intf_addr)) if "ipv6" in data: intf_addr = c_data["links"][destRouterLink]["ipv6"] - interface_data.append("ipv6 address {}".format( - intf_addr - )) - - result = create_common_configuration(tgen, c_router, - interface_data, - "interface_config", - build=build) + interface_data.append("ipv6 address {}".format(intf_addr)) + + result = create_common_configuration( + tgen, c_router, interface_data, "interface_config", build=build + ) except InvalidCLIError: # Traceback errormsg = traceback.format_exc() @@ -880,13 +886,10 @@ def create_static_routes(tgen, input_dict, build=False): del_action = static_route.setdefault("delete", False) # No of IPs no_of_ip = static_route.setdefault("no_of_ip", 1) - admin_distance = static_route.setdefault("admin_distance", - None) + admin_distance = static_route.setdefault("admin_distance", None) tag = static_route.setdefault("tag", None) - if "next_hop" not in static_route or \ - "network" not in static_route: - errormsg = "'next_hop' or 'network' missing in" \ - " input_dict" + if "next_hop" not in static_route or "network" not in static_route: + errormsg = "'next_hop' or 'network' missing in" " input_dict" return errormsg next_hop = static_route["next_hop"] @@ -914,10 +917,9 @@ def create_static_routes(tgen, input_dict, build=False): static_routes_list.append(cmd) - result = create_common_configuration(tgen, router, - static_routes_list, - "static_route", - build=build) + result = create_common_configuration( + tgen, router, static_routes_list, "static_route", build=build + ) except InvalidCLIError: # Traceback @@ -992,10 +994,8 @@ def create_prefix_lists(tgen, input_dict, build=False): for prefix_name, prefix_list in prefix_data.iteritems(): for prefix_dict in prefix_list: - if "action" not in prefix_dict or \ - "network" not in prefix_dict: - errormsg = "'action' or network' missing in" \ - " input_dict" + if "action" not in prefix_dict or "network" not in prefix_dict: + errormsg = "'action' or network' missing in" " input_dict" return errormsg network_addr = prefix_dict["network"] @@ -1005,11 +1005,9 @@ def create_prefix_lists(tgen, input_dict, build=False): seqid = prefix_dict.setdefault("seqid", None) del_action = prefix_dict.setdefault("delete", False) if seqid is None: - seqid = get_seq_id("prefix_lists", router, - prefix_name) + seqid = get_seq_id("prefix_lists", router, prefix_name) else: - set_seq_id("prefix_lists", router, seqid, - prefix_name) + set_seq_id("prefix_lists", router, seqid, prefix_name) if addr_type == "ipv4": protocol = "ip" @@ -1028,10 +1026,9 @@ def create_prefix_lists(tgen, input_dict, build=False): cmd = "no {}".format(cmd) config_data.append(cmd) - result = create_common_configuration(tgen, router, - config_data, - "prefix_list", - build=build) + result = create_common_configuration( + tgen, router, config_data, "prefix_list", build=build + ) except InvalidCLIError: # Traceback @@ -1101,9 +1098,9 @@ def create_route_maps(tgen, input_dict, build=False): "tag": "tag_id" }, "set": { - "localpref": 150, - "med": 30, - "aspath": { + "locPrf": 150, + "metric": 30, + "path": { "num": 20000, "action": "prepend", }, @@ -1137,8 +1134,7 @@ def create_route_maps(tgen, input_dict, build=False): logger.debug("route_maps not present in input_dict") continue rmap_data = [] - for rmap_name, rmap_value in \ - input_dict[router]["route_maps"].iteritems(): + for rmap_name, rmap_value in input_dict[router]["route_maps"].iteritems(): for rmap_dict in rmap_value: del_action = rmap_dict.setdefault("delete", False) @@ -1160,38 +1156,39 @@ def create_route_maps(tgen, input_dict, build=False): else: set_seq_id("route_maps", router, seq_id, rmap_name) - rmap_data.append("route-map {} {} {}".format( - rmap_name, rmap_action, seq_id - )) + rmap_data.append( + "route-map {} {} {}".format(rmap_name, rmap_action, seq_id) + ) if "continue" in rmap_dict: continue_to = rmap_dict["continue"] if continue_to: - rmap_data.append("on-match goto {}". - format(continue_to)) + rmap_data.append("on-match goto {}".format(continue_to)) else: - logger.error("In continue, 'route-map entry " - "sequence number' is not provided") + logger.error( + "In continue, 'route-map entry " + "sequence number' is not provided" + ) return False if "goto" in rmap_dict: go_to = rmap_dict["goto"] if go_to: - rmap_data.append("on-match goto {}". - format(go_to)) + rmap_data.append("on-match goto {}".format(go_to)) else: - logger.error("In goto, 'Goto Clause number' is not" - " provided") + logger.error( + "In goto, 'Goto Clause number' is not" " provided" + ) return False if "call" in rmap_dict: call_rmap = rmap_dict["call"] if call_rmap: - rmap_data.append("call {}". - format(call_rmap)) + rmap_data.append("call {}".format(call_rmap)) else: - logger.error("In call, 'destination Route-Map' is" - " not provided") + logger.error( + "In call, 'destination Route-Map' is" " not provided" + ) return False # Verifying if SET criteria is defined @@ -1199,24 +1196,22 @@ def create_route_maps(tgen, input_dict, build=False): set_data = rmap_dict["set"] ipv4_data = set_data.setdefault("ipv4", {}) ipv6_data = set_data.setdefault("ipv6", {}) - local_preference = set_data.setdefault("localpref", - None) - metric = set_data.setdefault("med", None) - as_path = set_data.setdefault("aspath", {}) + local_preference = set_data.setdefault("locPrf", None) + metric = set_data.setdefault("metric", None) + as_path = set_data.setdefault("path", {}) weight = set_data.setdefault("weight", None) community = set_data.setdefault("community", {}) - large_community = set_data.setdefault( - "large_community", {}) - large_comm_list = set_data.setdefault( - "large_comm_list", {}) + large_community = set_data.setdefault("large_community", {}) + large_comm_list = set_data.setdefault("large_comm_list", {}) set_action = set_data.setdefault("set_action", None) nexthop = set_data.setdefault("nexthop", None) origin = set_data.setdefault("origin", None) # Local Preference if local_preference: - rmap_data.append("set local-preference {}". - format(local_preference)) + rmap_data.append( + "set local-preference {}".format(local_preference) + ) # Metric if metric: @@ -1231,8 +1226,9 @@ def create_route_maps(tgen, input_dict, build=False): as_num = as_path.setdefault("as_num", None) as_action = as_path.setdefault("as_action", None) if as_action and as_num: - rmap_data.append("set as-path {} {}". - format(as_action, as_num)) + rmap_data.append( + "set as-path {} {}".format(as_action, as_num) + ) # Community if community: @@ -1244,14 +1240,12 @@ def create_route_maps(tgen, input_dict, build=False): cmd = "{} {}".format(cmd, comm_action) rmap_data.append(cmd) else: - logger.error("In community, AS Num not" - " provided") + logger.error("In community, AS Num not" " provided") return False if large_community: num = large_community.setdefault("num", None) - comm_action = large_community.setdefault("action", - None) + comm_action = large_community.setdefault("action", None) if num: cmd = "set large-community {}".format(num) if comm_action: @@ -1259,13 +1253,13 @@ def create_route_maps(tgen, input_dict, build=False): rmap_data.append(cmd) else: - logger.error("In large_community, AS Num not" - " provided") + logger.error( + "In large_community, AS Num not" " provided" + ) return False if large_comm_list: id = large_comm_list.setdefault("id", None) - del_comm = large_comm_list.setdefault("delete", - None) + del_comm = large_comm_list.setdefault("delete", None) if id: cmd = "set large-comm-list {}".format(id) if del_comm: @@ -1273,43 +1267,36 @@ def create_route_maps(tgen, input_dict, build=False): rmap_data.append(cmd) else: - logger.error("In large_comm_list 'id' not" - " provided") + logger.error("In large_comm_list 'id' not" " provided") return False # Weight if weight: - rmap_data.append("set weight {}".format( - weight)) + rmap_data.append("set weight {}".format(weight)) if ipv6_data: nexthop = ipv6_data.setdefault("nexthop", None) if nexthop: - rmap_data.append("set ipv6 next-hop {}".format( - nexthop - )) + rmap_data.append("set ipv6 next-hop {}".format(nexthop)) # Adding MATCH and SET sequence to RMAP if defined if "match" in rmap_dict: match_data = rmap_dict["match"] ipv4_data = match_data.setdefault("ipv4", {}) ipv6_data = match_data.setdefault("ipv6", {}) - community = match_data.setdefault( - "community_list",{}) - large_community = match_data.setdefault( - "large_community", {} - ) + community = match_data.setdefault("community_list", {}) + large_community = match_data.setdefault("large_community", {}) large_community_list = match_data.setdefault( "large_community_list", {} ) if ipv4_data: # fetch prefix list data from rmap - prefix_name = \ - ipv4_data.setdefault("prefix_lists", - None) + prefix_name = ipv4_data.setdefault("prefix_lists", None) if prefix_name: - rmap_data.append("match ip address" - " prefix-list {}".format(prefix_name)) + rmap_data.append( + "match ip address" + " prefix-list {}".format(prefix_name) + ) # fetch tag data from rmap tag = ipv4_data.setdefault("tag", None) @@ -1318,16 +1305,19 @@ def create_route_maps(tgen, input_dict, build=False): # fetch large community data from rmap large_community_list = ipv4_data.setdefault( - "large_community_list",{}) + "large_community_list", {} + ) large_community = match_data.setdefault( - "large_community", {}) + "large_community", {} + ) if ipv6_data: - prefix_name = ipv6_data.setdefault("prefix_lists", - None) + prefix_name = ipv6_data.setdefault("prefix_lists", None) if prefix_name: - rmap_data.append("match ipv6 address" - " prefix-list {}".format(prefix_name)) + rmap_data.append( + "match ipv6 address" + " prefix-list {}".format(prefix_name) + ) # fetch tag data from rmap tag = ipv6_data.setdefault("tag", None) @@ -1336,54 +1326,64 @@ def create_route_maps(tgen, input_dict, build=False): # fetch large community data from rmap large_community_list = ipv6_data.setdefault( - "large_community_list",{}) + "large_community_list", {} + ) large_community = match_data.setdefault( - "large_community", {}) + "large_community", {} + ) if community: if "id" not in community: - logger.error("'id' is mandatory for " - "community-list in match" - " criteria") + logger.error( + "'id' is mandatory for " + "community-list in match" + " criteria" + ) return False cmd = "match community {}".format(community["id"]) - exact_match = community.setdefault("exact_match", - False) + exact_match = community.setdefault("exact_match", False) if exact_match: cmd = "{} exact-match".format(cmd) rmap_data.append(cmd) if large_community: if "id" not in large_community: - logger.error("'id' is mandatory for " - "large-community-list in match " - "criteria") + logger.error( + "'id' is mandatory for " + "large-community-list in match " + "criteria" + ) return False cmd = "match large-community {}".format( - large_community["id"]) + large_community["id"] + ) exact_match = large_community.setdefault( - "exact_match", False) + "exact_match", False + ) if exact_match: cmd = "{} exact-match".format(cmd) rmap_data.append(cmd) if large_community_list: if "id" not in large_community_list: - logger.error("'id' is mandatory for " - "large-community-list in match " - "criteria") + logger.error( + "'id' is mandatory for " + "large-community-list in match " + "criteria" + ) return False cmd = "match large-community {}".format( - large_community_list["id"]) + large_community_list["id"] + ) exact_match = large_community_list.setdefault( - "exact_match", False) + "exact_match", False + ) if exact_match: cmd = "{} exact-match".format(cmd) rmap_data.append(cmd) - result = create_common_configuration(tgen, router, - rmap_data, - "route_maps", - build=build) + result = create_common_configuration( + tgen, router, rmap_data, "route_maps", build=build + ) except InvalidCLIError: # Traceback @@ -1424,12 +1424,7 @@ def delete_route_maps(tgen, input_dict): rmap_data = input_dict[router] rmap_data["route_maps"] = {} for route_map_name in route_maps: - rmap_data["route_maps"].update({ - route_map_name: - [{ - "delete": True - }] - }) + rmap_data["route_maps"].update({route_map_name: [{"delete": True}]}) return create_route_maps(tgen, input_dict) @@ -1478,10 +1473,9 @@ def create_bgp_community_lists(tgen, input_dict, build=False): community_list = input_dict[router]["bgp_community_lists"] for community_dict in community_list: del_action = community_dict.setdefault("delete", False) - community_type = community_dict.setdefault("community_type", - None) + community_type = community_dict.setdefault("community_type", None) action = community_dict.setdefault("action", None) - value = community_dict.setdefault("value", '') + value = community_dict.setdefault("value", "") large = community_dict.setdefault("large", None) name = community_dict.setdefault("name", None) if large: @@ -1490,28 +1484,30 @@ def create_bgp_community_lists(tgen, input_dict, build=False): cmd = "bgp community-list" if not large and not (community_type and action and value): - errormsg = "community_type, action and value are " \ - "required in bgp_community_list" + errormsg = ( + "community_type, action and value are " + "required in bgp_community_list" + ) logger.error(errormsg) return False try: community_type = int(community_type) - cmd = "{} {} {} {}".format(cmd, community_type, action, - value) + cmd = "{} {} {} {}".format(cmd, community_type, action, value) except ValueError: cmd = "{} {} {} {} {}".format( - cmd, community_type, name, action, value) + cmd, community_type, name, action, value + ) if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) - result = create_common_configuration(tgen, router, config_data, - "bgp_community_list", - build=build) + result = create_common_configuration( + tgen, router, config_data, "bgp_community_list", build=build + ) except InvalidCLIError: # Traceback @@ -1634,8 +1630,9 @@ def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None): # Verifying output dictionary rib_routes_json is not empty if bool(rib_routes_json) is False: - errormsg = "No {} route found in rib of router {}..". \ - format(protocol, router) + errormsg = "No {} route found in rib of router {}..".format( + protocol, router + ) return errormsg if "static_routes" in input_dict[routerInput]: @@ -1665,47 +1662,62 @@ def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None): if type(next_hop) is not list: next_hop = [next_hop] - found_hops = [rib_r["ip"] for rib_r in - rib_routes_json[st_rt][0][ - "nexthops"]] + found_hops = [ + rib_r["ip"] + for rib_r in rib_routes_json[st_rt][0]["nexthops"] + ] for nh in found_hops: nh_found = False if nh and nh in next_hop: nh_found = True else: - errormsg = ("Nexthop {} is Missing for {}" - " route {} in RIB of router" - " {}\n".format(next_hop, - protocol, - st_rt, dut)) + errormsg = ( + "Nexthop {} is Missing for {}" + " route {} in RIB of router" + " {}\n".format( + next_hop, protocol, st_rt, dut + ) + ) return errormsg else: missing_routes.append(st_rt) if nh_found: - logger.info("Found next_hop %s for all routes in RIB of" - " router %s\n", next_hop, dut) + logger.info( + "Found next_hop %s for all routes in RIB of" " router %s\n", + next_hop, + dut, + ) if not st_found and len(missing_routes) > 0: - errormsg = "Missing route in RIB of router {}, routes: " \ - "{}\n".format(dut, missing_routes) + errormsg = ( + "Missing route in RIB of router {}, routes: " + "{}\n".format(dut, missing_routes) + ) return errormsg - logger.info("Verified routes in router %s RIB, found routes" - " are: %s\n", dut, found_routes) + logger.info( + "Verified routes in router %s RIB, found routes" " are: %s\n", + dut, + found_routes, + ) continue if "bgp" in input_dict[routerInput]: - if 'advertise_networks' in input_dict[routerInput]["bgp"]\ - ["address_family"][addr_type]["unicast"]: + if ( + "advertise_networks" + in input_dict[routerInput]["bgp"]["address_family"][addr_type][ + "unicast" + ] + ): found_routes = [] missing_routes = [] - advertise_network = input_dict[routerInput]["bgp"]\ - ["address_family"][addr_type]["unicast"]\ - ["advertise_networks"] + advertise_network = input_dict[routerInput]["bgp"][ + "address_family" + ][addr_type]["unicast"]["advertise_networks"] for advertise_network_dict in advertise_network: start_ip = advertise_network_dict["network"] @@ -1730,34 +1742,43 @@ def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None): next_hop = [next_hop] for index, nh in enumerate(next_hop): - if rib_routes_json[st_rt][0]\ - ['nexthops'][index]['ip'] == nh: + if ( + rib_routes_json[st_rt][0]["nexthops"][ + index + ]["ip"] + == nh + ): nh_found = True else: - errormsg=("Nexthop {} is Missing" - " for {} route {} in " - "RIB of router {}\n".\ - format(next_hop, - protocol, - st_rt, dut)) + errormsg = ( + "Nexthop {} is Missing" + " for {} route {} in " + "RIB of router {}\n".format( + next_hop, protocol, st_rt, dut + ) + ) return errormsg else: missing_routes.append(st_rt) if nh_found: - logger.info("Found next_hop {} for all routes in RIB" - " of router {}\n".format(next_hop, dut)) + logger.info( + "Found next_hop {} for all routes in RIB" + " of router {}\n".format(next_hop, dut) + ) if not found and len(missing_routes) > 0: - errormsg = ("Missing {} route in RIB of router {}, " - "routes: {} \n".\ - format(addr_type, dut, missing_routes)) + errormsg = ( + "Missing {} route in RIB of router {}, " + "routes: {} \n".format(addr_type, dut, missing_routes) + ) return errormsg - logger.info("Verified {} routes in router {} RIB, found" - " routes are: {}\n".\ - format(addr_type, dut, found_routes)) + logger.info( + "Verified {} routes in router {} RIB, found" + " routes are: {}\n".format(addr_type, dut, found_routes) + ) logger.debug("Exiting lib API: verify_rib()") return True @@ -1810,8 +1831,11 @@ def verify_admin_distance_for_static_routes(tgen, input_dict): command = "show ipv6 route json" show_ip_route_json = run_frr_cmd(rnode, command, isjson=True) - logger.info("Verifying admin distance for static route %s" - " under dut %s:", static_route, router) + logger.info( + "Verifying admin distance for static route %s" " under dut %s:", + static_route, + router, + ) network = static_route["network"] next_hop = static_route["next_hop"] admin_distance = static_route["admin_distance"] @@ -1819,23 +1843,32 @@ def verify_admin_distance_for_static_routes(tgen, input_dict): if network in show_ip_route_json: if route_data["nexthops"][0]["ip"] == next_hop: if route_data["distance"] != admin_distance: - errormsg = ("Verification failed: admin distance" - " for static route {} under dut {}," - " found:{} but expected:{}". - format(static_route, router, - route_data["distance"], - admin_distance)) + errormsg = ( + "Verification failed: admin distance" + " for static route {} under dut {}," + " found:{} but expected:{}".format( + static_route, + router, + route_data["distance"], + admin_distance, + ) + ) return errormsg else: - logger.info("Verification successful: admin" - " distance for static route %s under" - " dut %s, found:%s", static_route, - router, route_data["distance"]) + logger.info( + "Verification successful: admin" + " distance for static route %s under" + " dut %s, found:%s", + static_route, + router, + route_data["distance"], + ) else: - errormsg = ("Static route {} not found in " - "show_ip_route_json for dut {}". - format(network, router)) + errormsg = ( + "Static route {} not found in " + "show_ip_route_json for dut {}".format(network, router) + ) return errormsg logger.debug("Exiting lib API: verify_admin_distance_for_static_routes()") @@ -1885,12 +1918,17 @@ def verify_prefix_lists(tgen, input_dict): for prefix_list in prefix_lists_addr[addr_type].keys(): if prefix_list in show_prefix_list: - errormsg = ("Prefix list {} is/are present in the router" - " {}".format(prefix_list, router)) + errormsg = ( + "Prefix list {} is/are present in the router" + " {}".format(prefix_list, router) + ) return errormsg - logger.info("Prefix list %s is/are not present in the router" - " from router %s", prefix_list, router) + logger.info( + "Prefix list %s is/are not present in the router" " from router %s", + prefix_list, + router, + ) logger.debug("Exiting lib API: verify_prefix_lists()") return True @@ -1933,12 +1971,16 @@ def verify_route_maps(tgen, input_dict): route_maps = input_dict[router]["route_maps"] for route_map in route_maps: if route_map in show_route_maps: - errormsg = ("Route map {} is not deleted from router" - " {}".format(route_map, router)) + errormsg = "Route map {} is not deleted from router" " {}".format( + route_map, router + ) return errormsg - logger.info("Route map %s is/are deleted successfully from" - " router %s", route_maps, router) + logger.info( + "Route map %s is/are deleted successfully from" " router %s", + route_maps, + router, + ) logger.debug("Exiting lib API: verify_route_maps()") return True @@ -1977,47 +2019,60 @@ def verify_bgp_community(tgen, addr_type, router, network, input_dict=None): rnode = tgen.routers()[router] - logger.debug("Verifying BGP community attributes on dut %s: for %s " - "network %s", router, addr_type, network) + logger.debug( + "Verifying BGP community attributes on dut %s: for %s " "network %s", + router, + addr_type, + network, + ) for net in network: cmd = "show bgp {} {} json".format(addr_type, net) show_bgp_json = rnode.vtysh_cmd(cmd, isjson=True) logger.info(show_bgp_json) if "paths" not in show_bgp_json: - return "Prefix {} not found in BGP table of router: {}". \ - format(net, router) + return "Prefix {} not found in BGP table of router: {}".format(net, router) as_paths = show_bgp_json["paths"] found = False for i in range(len(as_paths)): - if "largeCommunity" in show_bgp_json["paths"][i] or \ - "community" in show_bgp_json["paths"][i]: + if ( + "largeCommunity" in show_bgp_json["paths"][i] + or "community" in show_bgp_json["paths"][i] + ): found = True - logger.info("Large Community attribute is found for route:" - " %s in router: %s", net, router) + logger.info( + "Large Community attribute is found for route:" " %s in router: %s", + net, + router, + ) if input_dict is not None: for criteria, comm_val in input_dict.items(): - show_val = show_bgp_json["paths"][i][criteria][ - "string"] + show_val = show_bgp_json["paths"][i][criteria]["string"] if comm_val == show_val: - logger.info("Verifying BGP %s for prefix: %s" - " in router: %s, found expected" - " value: %s", criteria, net, router, - comm_val) + logger.info( + "Verifying BGP %s for prefix: %s" + " in router: %s, found expected" + " value: %s", + criteria, + net, + router, + comm_val, + ) else: - errormsg = "Failed: Verifying BGP attribute" \ - " {} for route: {} in router: {}" \ - ", expected value: {} but found" \ - ": {}".format( - criteria, net, router, comm_val, - show_val) + errormsg = ( + "Failed: Verifying BGP attribute" + " {} for route: {} in router: {}" + ", expected value: {} but found" + ": {}".format(criteria, net, router, comm_val, show_val) + ) return errormsg if not found: errormsg = ( "Large Community attribute is not found for route: " - "{} in router: {} ".format(net, router)) + "{} in router: {} ".format(net, router) + ) return errormsg logger.debug("Exiting lib API: verify_bgp_community()") @@ -2057,25 +2112,24 @@ def verify_create_community_list(tgen, input_dict): rnode = tgen.routers()[router] - logger.info("Verifying large-community is created for dut %s:", - router) + logger.info("Verifying large-community is created for dut %s:", router) for comm_data in input_dict[router]["bgp_community_lists"]: comm_name = comm_data["name"] comm_type = comm_data["community_type"] - show_bgp_community = \ - run_frr_cmd(rnode, - "show bgp large-community-list {} detail". - format(comm_name)) + show_bgp_community = run_frr_cmd( + rnode, "show bgp large-community-list {} detail".format(comm_name) + ) # Verify community list and type - if comm_name in show_bgp_community and comm_type in \ - show_bgp_community: - logger.info("BGP %s large-community-list %s is" - " created", comm_type, comm_name) + if comm_name in show_bgp_community and comm_type in show_bgp_community: + logger.info( + "BGP %s large-community-list %s is" " created", comm_type, comm_name + ) else: - errormsg = "BGP {} large-community-list {} is not" \ - " created".format(comm_type, comm_name) + errormsg = "BGP {} large-community-list {} is not" " created".format( + comm_type, comm_name + ) return errormsg logger.debug("Exiting lib API: verify_create_community_list()") diff --git a/tests/topotests/lib/test/test_json.py b/tests/topotests/lib/test/test_json.py index 3927ba095d..ba118d607a 100755 --- a/tests/topotests/lib/test/test_json.py +++ b/tests/topotests/lib/test/test_json.py @@ -32,36 +32,37 @@ import pytest # Save the Current Working Directory to find lib files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../../')) +sys.path.append(os.path.join(CWD, "../../")) # pylint: disable=C0413 from lib.topotest import json_cmp + def test_json_intersect_true(): "Test simple correct JSON intersections" dcomplete = { - 'i1': 'item1', - 'i2': 'item2', - 'i3': 'item3', - 'i100': 'item4', + "i1": "item1", + "i2": "item2", + "i3": "item3", + "i100": "item4", } dsub1 = { - 'i1': 'item1', - 'i3': 'item3', + "i1": "item1", + "i3": "item3", } dsub2 = { - 'i1': 'item1', - 'i2': 'item2', + "i1": "item1", + "i2": "item2", } dsub3 = { - 'i100': 'item4', - 'i2': 'item2', + "i100": "item4", + "i2": "item2", } dsub4 = { - 'i50': None, - 'i100': 'item4', + "i50": None, + "i100": "item4", } assert json_cmp(dcomplete, dsub1) is None @@ -69,99 +70,66 @@ def test_json_intersect_true(): assert json_cmp(dcomplete, dsub3) is None assert json_cmp(dcomplete, dsub4) is None + def test_json_intersect_false(): "Test simple incorrect JSON intersections" dcomplete = { - 'i1': 'item1', - 'i2': 'item2', - 'i3': 'item3', - 'i100': 'item4', + "i1": "item1", + "i2": "item2", + "i3": "item3", + "i100": "item4", } # Incorrect value for 'i1' dsub1 = { - 'i1': 'item3', - 'i3': 'item3', + "i1": "item3", + "i3": "item3", } # Non-existing key 'i5' dsub2 = { - 'i1': 'item1', - 'i5': 'item2', + "i1": "item1", + "i5": "item2", } # Key should not exist dsub3 = { - 'i100': None, + "i100": None, } assert json_cmp(dcomplete, dsub1) is not None assert json_cmp(dcomplete, dsub2) is not None assert json_cmp(dcomplete, dsub3) is not None + def test_json_intersect_multilevel_true(): "Test multi level correct JSON intersections" dcomplete = { - 'i1': 'item1', - 'i2': 'item2', - 'i3': { - 'i100': 'item100', + "i1": "item1", + "i2": "item2", + "i3": {"i100": "item100",}, + "i4": { + "i41": {"i411": "item411",}, + "i42": {"i421": "item421", "i422": "item422",}, }, - 'i4': { - 'i41': { - 'i411': 'item411', - }, - 'i42': { - 'i421': 'item421', - 'i422': 'item422', - } - } } dsub1 = { - 'i1': 'item1', - 'i3': { - 'i100': 'item100', - }, - 'i10': None, + "i1": "item1", + "i3": {"i100": "item100",}, + "i10": None, } dsub2 = { - 'i1': 'item1', - 'i2': 'item2', - 'i3': {}, + "i1": "item1", + "i2": "item2", + "i3": {}, } dsub3 = { - 'i2': 'item2', - 'i4': { - 'i41': { - 'i411': 'item411', - }, - 'i42': { - 'i422': 'item422', - 'i450': None, - } - } - } - dsub4 = { - 'i2': 'item2', - 'i4': { - 'i41': {}, - 'i42': { - 'i450': None, - } - } - } - dsub5 = { - 'i2': 'item2', - 'i3': { - 'i100': 'item100', - }, - 'i4': { - 'i42': { - 'i450': None, - } - } + "i2": "item2", + "i4": {"i41": {"i411": "item411",}, "i42": {"i422": "item422", "i450": None,}}, } + dsub4 = {"i2": "item2", "i4": {"i41": {}, "i42": {"i450": None,}}} + dsub5 = {"i2": "item2", "i3": {"i100": "item100",}, "i4": {"i42": {"i450": None,}}} assert json_cmp(dcomplete, dsub1) is None assert json_cmp(dcomplete, dsub2) is None @@ -169,78 +137,43 @@ def test_json_intersect_multilevel_true(): assert json_cmp(dcomplete, dsub4) is None assert json_cmp(dcomplete, dsub5) is None + def test_json_intersect_multilevel_false(): "Test multi level incorrect JSON intersections" dcomplete = { - 'i1': 'item1', - 'i2': 'item2', - 'i3': { - 'i100': 'item100', + "i1": "item1", + "i2": "item2", + "i3": {"i100": "item100",}, + "i4": { + "i41": {"i411": "item411",}, + "i42": {"i421": "item421", "i422": "item422",}, }, - 'i4': { - 'i41': { - 'i411': 'item411', - }, - 'i42': { - 'i421': 'item421', - 'i422': 'item422', - } - } } # Incorrect sub-level value dsub1 = { - 'i1': 'item1', - 'i3': { - 'i100': 'item00', - }, - 'i10': None, + "i1": "item1", + "i3": {"i100": "item00",}, + "i10": None, } # Inexistent sub-level dsub2 = { - 'i1': 'item1', - 'i2': 'item2', - 'i3': None, + "i1": "item1", + "i2": "item2", + "i3": None, } # Inexistent sub-level value dsub3 = { - 'i1': 'item1', - 'i3': { - 'i100': None, - }, + "i1": "item1", + "i3": {"i100": None,}, } # Inexistent sub-sub-level value - dsub4 = { - 'i4': { - 'i41': { - 'i412': 'item412', - }, - 'i42': { - 'i421': 'item421', - } - } - } + dsub4 = {"i4": {"i41": {"i412": "item412",}, "i42": {"i421": "item421",}}} # Invalid sub-sub-level value - dsub5 = { - 'i4': { - 'i41': { - 'i411': 'item411', - }, - 'i42': { - 'i421': 'item420000', - } - } - } + dsub5 = {"i4": {"i41": {"i411": "item411",}, "i42": {"i421": "item420000",}}} # sub-sub-level should be value - dsub6 = { - 'i4': { - 'i41': { - 'i411': 'item411', - }, - 'i42': 'foobar', - } - } + dsub6 = {"i4": {"i41": {"i411": "item411",}, "i42": "foobar",}} assert json_cmp(dcomplete, dsub1) is not None assert json_cmp(dcomplete, dsub2) is not None @@ -249,80 +182,54 @@ def test_json_intersect_multilevel_false(): assert json_cmp(dcomplete, dsub5) is not None assert json_cmp(dcomplete, dsub6) is not None + def test_json_with_list_sucess(): "Test successful json comparisons that have lists." dcomplete = { - 'list': [ - { - 'i1': 'item 1', - 'i2': 'item 2', - }, - { - 'i10': 'item 10', - }, - ], - 'i100': 'item 100', + "list": [{"i1": "item 1", "i2": "item 2",}, {"i10": "item 10",},], + "i100": "item 100", } # Test list type dsub1 = { - 'list': [], + "list": [], } # Test list correct list items dsub2 = { - 'list': [ - { - 'i1': 'item 1', - }, - ], - 'i100': 'item 100', + "list": [{"i1": "item 1",},], + "i100": "item 100", } # Test list correct list size dsub3 = { - 'list': [ - {}, {}, - ], + "list": [{}, {},], } assert json_cmp(dcomplete, dsub1) is None assert json_cmp(dcomplete, dsub2) is None assert json_cmp(dcomplete, dsub3) is None + def test_json_with_list_failure(): "Test failed json comparisons that have lists." dcomplete = { - 'list': [ - { - 'i1': 'item 1', - 'i2': 'item 2', - }, - { - 'i10': 'item 10', - }, - ], - 'i100': 'item 100', + "list": [{"i1": "item 1", "i2": "item 2",}, {"i10": "item 10",},], + "i100": "item 100", } # Test list type dsub1 = { - 'list': {}, + "list": {}, } # Test list incorrect list items dsub2 = { - 'list': [ - { - 'i1': 'item 2', - }, - ], - 'i100': 'item 100', + "list": [{"i1": "item 2",},], + "i100": "item 100", } # Test list correct list size dsub3 = { - 'list': [ - {}, {}, {}, - ], + "list": [{}, {}, {},], } assert json_cmp(dcomplete, dsub1) is not None @@ -334,53 +241,20 @@ def test_json_list_start_success(): "Test JSON encoded data that starts with a list that should succeed." dcomplete = [ - { - "id": 100, - "value": "abc", - }, - { - "id": 200, - "value": "abcd", - }, - { - "id": 300, - "value": "abcde", - }, + {"id": 100, "value": "abc",}, + {"id": 200, "value": "abcd",}, + {"id": 300, "value": "abcde",}, ] - dsub1 = [ - { - "id": 100, - "value": "abc", - } - ] + dsub1 = [{"id": 100, "value": "abc",}] - dsub2 = [ - { - "id": 100, - "value": "abc", - }, - { - "id": 200, - "value": "abcd", - } - ] + dsub2 = [{"id": 100, "value": "abc",}, {"id": 200, "value": "abcd",}] - dsub3 = [ - { - "id": 300, - "value": "abcde", - } - ] + dsub3 = [{"id": 300, "value": "abcde",}] - dsub4 = [ - ] + dsub4 = [] - dsub5 = [ - { - "id": 100, - } - ] + dsub5 = [{"id": 100,}] assert json_cmp(dcomplete, dsub1) is None assert json_cmp(dcomplete, dsub2) is None @@ -393,58 +267,18 @@ def test_json_list_start_failure(): "Test JSON encoded data that starts with a list that should fail." dcomplete = [ - { - "id": 100, - "value": "abc" - }, - { - "id": 200, - "value": "abcd" - }, - { - "id": 300, - "value": "abcde" - }, + {"id": 100, "value": "abc"}, + {"id": 200, "value": "abcd"}, + {"id": 300, "value": "abcde"}, ] - dsub1 = [ - { - "id": 100, - "value": "abcd", - } - ] + dsub1 = [{"id": 100, "value": "abcd",}] - dsub2 = [ - { - "id": 100, - "value": "abc", - }, - { - "id": 200, - "value": "abc", - } - ] + dsub2 = [{"id": 100, "value": "abc",}, {"id": 200, "value": "abc",}] - dsub3 = [ - { - "id": 100, - "value": "abc", - }, - { - "id": 350, - "value": "abcde", - } - ] + dsub3 = [{"id": 100, "value": "abc",}, {"id": 350, "value": "abcde",}] - dsub4 = [ - { - "value": "abcx", - }, - { - "id": 300, - "value": "abcde", - } - ] + dsub4 = [{"value": "abcx",}, {"id": 300, "value": "abcde",}] assert json_cmp(dcomplete, dsub1) is not None assert json_cmp(dcomplete, dsub2) is not None @@ -452,5 +286,5 @@ def test_json_list_start_failure(): assert json_cmp(dcomplete, dsub4) is not None -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(pytest.main()) diff --git a/tests/topotests/lib/test/test_run_and_expect.py b/tests/topotests/lib/test/test_run_and_expect.py index 3c22c20e7b..d65d5baf37 100755 --- a/tests/topotests/lib/test/test_run_and_expect.py +++ b/tests/topotests/lib/test/test_run_and_expect.py @@ -32,11 +32,12 @@ import pytest # Save the Current Working Directory to find lib files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../../')) +sys.path.append(os.path.join(CWD, "../../")) # pylint: disable=C0413 from lib.topotest import run_and_expect_type + def test_run_and_expect_type(): "Test basic `run_and_expect_type` functionality." @@ -45,12 +46,16 @@ def test_run_and_expect_type(): return True # Test value success. - success, value = run_and_expect_type(return_true, bool, count=1, wait=0, avalue=True) + success, value = run_and_expect_type( + return_true, bool, count=1, wait=0, avalue=True + ) assert success is True assert value is True # Test value failure. - success, value = run_and_expect_type(return_true, bool, count=1, wait=0, avalue=False) + success, value = run_and_expect_type( + return_true, bool, count=1, wait=0, avalue=False + ) assert success is False assert value is True @@ -70,5 +75,5 @@ def test_run_and_expect_type(): assert value is True -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(pytest.main()) diff --git a/tests/topotests/lib/test/test_version.py b/tests/topotests/lib/test/test_version.py index 9204ac2084..7c2df00337 100755 --- a/tests/topotests/lib/test/test_version.py +++ b/tests/topotests/lib/test/test_version.py @@ -32,21 +32,22 @@ import pytest # Save the Current Working Directory to find lib files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../../')) +sys.path.append(os.path.join(CWD, "../../")) # pylint: disable=C0413 from lib.topotest import version_cmp + def test_valid_versions(): "Test valid version compare results" - curver = '3.0' - samever = '3' - oldver = '2.0' - newver = '3.0.1' - newerver = '3.0.11' - vercustom = '3.0-dev' - verysmallinc = '3.0.0.0.0.0.0.1' + curver = "3.0" + samever = "3" + oldver = "2.0" + newver = "3.0.1" + newerver = "3.0.11" + vercustom = "3.0-dev" + verysmallinc = "3.0.0.0.0.0.0.1" assert version_cmp(curver, oldver) == 1 assert version_cmp(curver, newver) == -1 @@ -64,14 +65,15 @@ def test_valid_versions(): assert version_cmp(verysmallinc, verysmallinc) == 0 assert version_cmp(vercustom, verysmallinc) == -1 + def test_invalid_versions(): "Test invalid version strings" - curver = '3.0' - badver1 = '.1' - badver2 = '-1.0' - badver3 = '.' - badver4 = '3.-0.3' + curver = "3.0" + badver1 = ".1" + badver2 = "-1.0" + badver3 = "." + badver4 = "3.-0.3" with pytest.raises(ValueError): assert version_cmp(curver, badver1) @@ -79,9 +81,10 @@ def test_invalid_versions(): assert version_cmp(curver, badver3) assert version_cmp(curver, badver4) + def test_regression_1(): """ Test regression on the following type of comparison: '3.0.2' > '3' Expected result is 1. """ - assert version_cmp('3.0.2', '3') == 1 + assert version_cmp("3.0.2", "3") == 1 diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py index 6859f5a076..6a6bbc7c78 100644 --- a/tests/topotests/lib/topogen.py +++ b/tests/topotests/lib/topogen.py @@ -70,6 +70,7 @@ CWD = os.path.dirname(os.path.realpath(__file__)) # all test functions without declaring a test local variable. global_tgen = None + def get_topogen(topo=None): """ Helper function to retrieve Topogen. Must be called with `topo` when called @@ -79,31 +80,34 @@ def get_topogen(topo=None): global_tgen.topo = topo return global_tgen + def set_topogen(tgen): "Helper function to set Topogen" # pylint: disable=W0603 global global_tgen global_tgen = tgen + # # Main class: topology builder # # Topogen configuration defaults tgen_defaults = { - 'verbosity': 'info', - 'frrdir': '/usr/lib/frr', - 'quaggadir': '/usr/lib/quagga', - 'routertype': 'frr', - 'memleak_path': None, + "verbosity": "info", + "frrdir": "/usr/lib/frr", + "quaggadir": "/usr/lib/quagga", + "routertype": "frr", + "memleak_path": None, } + class Topogen(object): "A topology test builder helper." - CONFIG_SECTION = 'topogen' + CONFIG_SECTION = "topogen" - def __init__(self, cls, modname='unnamed'): + def __init__(self, cls, modname="unnamed"): """ Topogen initialization function, takes the following arguments: * `cls`: the topology class that is child of mininet.topo @@ -117,16 +121,16 @@ class Topogen(object): self.switchn = 1 self.modname = modname self.errorsd = {} - self.errors = '' + self.errors = "" self.peern = 1 self._init_topo(cls) - logger.info('loading topology: {}'.format(self.modname)) + logger.info("loading topology: {}".format(self.modname)) @staticmethod def _mininet_reset(): "Reset the mininet environment" # Clean up the mininet environment - os.system('mn -c > /dev/null 2>&1') + os.system("mn -c > /dev/null 2>&1") def _init_topo(self, cls): """ @@ -138,10 +142,10 @@ class Topogen(object): # Test for MPLS Kernel modules available self.hasmpls = False - if not topotest.module_present('mpls-router'): - logger.info('MPLS tests will not run (missing mpls-router kernel module)') - elif not topotest.module_present('mpls-iptunnel'): - logger.info('MPLS tests will not run (missing mpls-iptunnel kernel module)') + if not topotest.module_present("mpls-router"): + logger.info("MPLS tests will not run (missing mpls-router kernel module)") + elif not topotest.module_present("mpls-iptunnel"): + logger.info("MPLS tests will not run (missing mpls-iptunnel kernel module)") else: self.hasmpls = True # Load the default topology configurations @@ -160,7 +164,7 @@ class Topogen(object): topotests. """ self.config = configparser.ConfigParser(tgen_defaults) - pytestini_path = os.path.join(CWD, '../pytest.ini') + pytestini_path = os.path.join(CWD, "../pytest.ini") self.config.read(pytestini_path) def add_router(self, name=None, cls=topotest.Router, **params): @@ -173,15 +177,15 @@ class Topogen(object): Returns a TopoRouter. """ if name is None: - name = 'r{}'.format(self.routern) + name = "r{}".format(self.routern) if name in self.gears: - raise KeyError('router already exists') + raise KeyError("router already exists") - params['frrdir'] = self.config.get(self.CONFIG_SECTION, 'frrdir') - params['quaggadir'] = self.config.get(self.CONFIG_SECTION, 'quaggadir') - params['memleak_path'] = self.config.get(self.CONFIG_SECTION, 'memleak_path') - if not params.has_key('routertype'): - params['routertype'] = self.config.get(self.CONFIG_SECTION, 'routertype') + params["frrdir"] = self.config.get(self.CONFIG_SECTION, "frrdir") + params["quaggadir"] = self.config.get(self.CONFIG_SECTION, "quaggadir") + params["memleak_path"] = self.config.get(self.CONFIG_SECTION, "memleak_path") + if not params.has_key("routertype"): + params["routertype"] = self.config.get(self.CONFIG_SECTION, "routertype") self.gears[name] = TopoRouter(self, cls, name, **params) self.routern += 1 @@ -195,9 +199,9 @@ class Topogen(object): Returns the switch name and number. """ if name is None: - name = 's{}'.format(self.switchn) + name = "s{}".format(self.switchn) if name in self.gears: - raise KeyError('switch already exists') + raise KeyError("switch already exists") self.gears[name] = TopoSwitch(self, cls, name) self.switchn += 1 @@ -211,9 +215,9 @@ class Topogen(object): * `defaultRoute`: the peer default route (e.g. 'via 1.2.3.1') """ if name is None: - name = 'peer{}'.format(self.peern) + name = "peer{}".format(self.peern) if name in self.gears: - raise KeyError('exabgp peer already exists') + raise KeyError("exabgp peer already exists") self.gears[name] = TopoExaBGP(self, name, ip=ip, defaultRoute=defaultRoute) self.peern += 1 @@ -228,9 +232,9 @@ class Topogen(object): * TopoSwitch """ if not isinstance(node1, TopoGear): - raise ValueError('invalid node1 type') + raise ValueError("invalid node1 type") if not isinstance(node2, TopoGear): - raise ValueError('invalid node2 type') + raise ValueError("invalid node2 type") if ifname1 is None: ifname1 = node1.new_link() @@ -239,8 +243,7 @@ class Topogen(object): node1.register_link(ifname1, node2, ifname2) node2.register_link(ifname2, node1, ifname1) - self.topo.addLink(node1.name, node2.name, - intfName1=ifname1, intfName2=ifname2) + self.topo.addLink(node1.name, node2.name, intfName1=ifname1, intfName2=ifname2) def get_gears(self, geartype): """ @@ -262,8 +265,11 @@ class Topogen(object): # Do stuff ``` """ - return dict((name, gear) for name, gear in self.gears.iteritems() - if isinstance(gear, geartype)) + return dict( + (name, gear) + for name, gear in self.gears.iteritems() + if isinstance(gear, geartype) + ) def routers(self): """ @@ -291,16 +297,16 @@ class Topogen(object): """ # If log_level is not specified use the configuration. if log_level is None: - log_level = self.config.get(self.CONFIG_SECTION, 'verbosity') + log_level = self.config.get(self.CONFIG_SECTION, "verbosity") # Set python logger level logger_config.set_log_level(log_level) # Run mininet - if log_level == 'debug': + if log_level == "debug": setLogLevel(log_level) - logger.info('starting topology: {}'.format(self.modname)) + logger.info("starting topology: {}".format(self.modname)) self.net.start() def start_router(self, router=None): @@ -326,7 +332,7 @@ class Topogen(object): first is a simple kill with no sleep, the second will sleep if not killed and try with a different signal. """ - logger.info('stopping topology: {}'.format(self.modname)) + logger.info("stopping topology: {}".format(self.modname)) errors = "" for gear in self.gears.values(): gear.stop(False, False) @@ -344,7 +350,8 @@ class Topogen(object): """ if not sys.stdin.isatty(): raise EnvironmentError( - 'you must run pytest with \'-s\' in order to use mininet CLI') + "you must run pytest with '-s' in order to use mininet CLI" + ) CLI(self.net) @@ -354,8 +361,9 @@ class Topogen(object): if self.routers_have_failure(): return False - memleak_file = (os.environ.get('TOPOTESTS_CHECK_MEMLEAK') or - self.config.get(self.CONFIG_SECTION, 'memleak_path')) + memleak_file = os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.config.get( + self.CONFIG_SECTION, "memleak_path" + ) if memleak_file is None: return False return True @@ -382,7 +390,7 @@ class Topogen(object): code = len(self.errorsd) self.errorsd[code] = message - self.errors += '\n{}: {}'.format(code, message) + self.errors += "\n{}: {}".format(code, message) def has_errors(self): "Returns whether errors exist or not." @@ -393,23 +401,25 @@ class Topogen(object): if self.has_errors(): return True - errors = '' + errors = "" router_list = self.routers().values() for router in router_list: result = router.check_router_running() - if result != '': - errors += result + '\n' + if result != "": + errors += result + "\n" - if errors != '': - self.set_error(errors, 'router_error') + if errors != "": + self.set_error(errors, "router_error") assert False, errors return True return False + # # Topology gears (equipment) # + class TopoGear(object): "Abstract class for type checking" @@ -421,11 +431,11 @@ class TopoGear(object): self.linkn = 0 def __str__(self): - links = '' + links = "" for myif, dest in self.links.iteritems(): _, destif = dest - if links != '': - links += ',' + if links != "": + links += "," links += '"{}"<->"{}"'.format(myif, destif) return 'TopoGear<name="{}",links=[{}]>'.format(self.name, links) @@ -462,20 +472,22 @@ class TopoGear(object): enabled: whether we should enable or disable the interface """ if myif not in self.links.keys(): - raise KeyError('interface doesn\'t exists') + raise KeyError("interface doesn't exists") if enabled is True: - operation = 'up' + operation = "up" else: - operation = 'down' + operation = "down" - logger.info('setting node "{}" link "{}" to state "{}"'.format( - self.name, myif, operation - )) - extract='' + logger.info( + 'setting node "{}" link "{}" to state "{}"'.format( + self.name, myif, operation + ) + ) + extract = "" if netns is not None: - extract = 'ip netns exec {} '.format(netns) - return self.run('{}ip link set dev {} {}'.format(extract, myif, operation)) + extract = "ip netns exec {} ".format(netns) + return self.run("{}ip link set dev {} {}".format(extract, myif, operation)) def peer_link_enable(self, myif, enabled=True, netns=None): """ @@ -487,7 +499,7 @@ class TopoGear(object): peer disables their interface our interface status changes to no link. """ if myif not in self.links.keys(): - raise KeyError('interface doesn\'t exists') + raise KeyError("interface doesn't exists") node, nodeif = self.links[myif] node.link_enable(nodeif, enabled, netns) @@ -498,7 +510,7 @@ class TopoGear(object): NOTE: This function should only be called by Topogen. """ - ifname = '{}-eth{}'.format(self.name, self.linkn) + ifname = "{}-eth{}".format(self.name, self.linkn) self.linkn += 1 return ifname @@ -509,10 +521,11 @@ class TopoGear(object): NOTE: This function should only be called by Topogen. """ if myif in self.links.keys(): - raise KeyError('interface already exists') + raise KeyError("interface already exists") self.links[myif] = (node, nodeif) + class TopoRouter(TopoGear): """ Router abstraction. @@ -520,11 +533,11 @@ class TopoRouter(TopoGear): # The default required directories by Quagga/FRR PRIVATE_DIRS = [ - '/etc/frr', - '/etc/quagga', - '/var/run/frr', - '/var/run/quagga', - '/var/log' + "/etc/frr", + "/etc/quagga", + "/var/run/frr", + "/var/run/quagga", + "/var/log", ] # Router Daemon enumeration definition. @@ -543,20 +556,20 @@ class TopoRouter(TopoGear): RD_BFD = 13 RD_SHARP = 14 RD = { - RD_ZEBRA: 'zebra', - RD_RIP: 'ripd', - RD_RIPNG: 'ripngd', - RD_OSPF: 'ospfd', - RD_OSPF6: 'ospf6d', - RD_ISIS: 'isisd', - RD_BGP: 'bgpd', - RD_PIM: 'pimd', - RD_LDP: 'ldpd', - RD_EIGRP: 'eigrpd', - RD_NHRP: 'nhrpd', - RD_STATIC: 'staticd', - RD_BFD: 'bfdd', - RD_SHARP: 'sharpd', + RD_ZEBRA: "zebra", + RD_RIP: "ripd", + RD_RIPNG: "ripngd", + RD_OSPF: "ospfd", + RD_OSPF6: "ospf6d", + RD_ISIS: "isisd", + RD_BGP: "bgpd", + RD_PIM: "pimd", + RD_LDP: "ldpd", + RD_EIGRP: "eigrpd", + RD_NHRP: "nhrpd", + RD_STATIC: "staticd", + RD_BFD: "bfdd", + RD_SHARP: "sharpd", } def __init__(self, tgen, cls, name, **params): @@ -574,34 +587,34 @@ class TopoRouter(TopoGear): self.name = name self.cls = cls self.options = {} - self.routertype = params.get('routertype', 'frr') - if not params.has_key('privateDirs'): - params['privateDirs'] = self.PRIVATE_DIRS + self.routertype = params.get("routertype", "frr") + if not params.has_key("privateDirs"): + params["privateDirs"] = self.PRIVATE_DIRS - self.options['memleak_path'] = params.get('memleak_path', None) + self.options["memleak_path"] = params.get("memleak_path", None) # Create new log directory - self.logdir = '/tmp/topotests/{}'.format(self.tgen.modname) + self.logdir = "/tmp/topotests/{}".format(self.tgen.modname) # Clean up before starting new log files: avoids removing just created # log files. self._prepare_tmpfiles() # Propagate the router log directory - params['logdir'] = self.logdir + params["logdir"] = self.logdir - #setup the per node directory - dir = '{}/{}'.format(self.logdir, self.name) - os.system('mkdir -p ' + dir) - os.system('chmod -R go+rw /tmp/topotests') + # setup the per node directory + dir = "{}/{}".format(self.logdir, self.name) + os.system("mkdir -p " + dir) + os.system("chmod -R go+rw /tmp/topotests") # Open router log file - logfile = '{0}/{1}.log'.format(self.logdir, name) + logfile = "{0}/{1}.log".format(self.logdir, name) self.logger = logger_config.get_logger(name=name, target=logfile) self.tgen.topo.addNode(self.name, cls=self.cls, **params) def __str__(self): gear = super(TopoRouter, self).__str__() - gear += ' TopoRouter<>' + gear += " TopoRouter<>" return gear def _prepare_tmpfiles(self): @@ -622,9 +635,9 @@ class TopoRouter(TopoGear): os.chmod(self.logdir, 0o1777) # Try to find relevant old logfiles in /tmp and delete them - map(os.remove, glob.glob('{}/{}/*.log'.format(self.logdir, self.name))) + map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name))) # Remove old core files - map(os.remove, glob.glob('{}/{}/*.dmp'.format(self.logdir, self.name))) + map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name))) def check_capability(self, daemon, param): """ @@ -651,7 +664,7 @@ class TopoRouter(TopoGear): """ Run a series of checks and returns a status string. """ - self.logger.info('checking if daemons are running') + self.logger.info("checking if daemons are running") return self.tgen.net[self.name].checkRouterRunning() def start(self): @@ -663,7 +676,7 @@ class TopoRouter(TopoGear): * Start daemons (e.g. FRR/Quagga) * Configure daemon logging files """ - self.logger.debug('starting') + self.logger.debug("starting") nrouter = self.tgen.net[self.name] result = nrouter.startRouter(self.tgen) @@ -672,15 +685,17 @@ class TopoRouter(TopoGear): for daemon, enabled in nrouter.daemons.iteritems(): if enabled == 0: continue - self.vtysh_cmd('configure terminal\nlog commands\nlog file {}.log'.format( - daemon), daemon=daemon) + self.vtysh_cmd( + "configure terminal\nlog commands\nlog file {}.log".format(daemon), + daemon=daemon, + ) - if result != '': + if result != "": self.tgen.set_error(result) else: # Enable MPLS processing on all interfaces. for interface in self.links.keys(): - set_sysctl(nrouter, 'net.mpls.conf.{}.input'.format(interface), 1) + set_sysctl(nrouter, "net.mpls.conf.{}.input".format(interface), 1) return result @@ -689,7 +704,7 @@ class TopoRouter(TopoGear): Stop router: * Kill daemons """ - self.logger.debug('stopping') + self.logger.debug("stopping") return self.tgen.net[self.name].stopRouter(wait, assertOnError) def vtysh_cmd(self, command, isjson=False, daemon=None): @@ -701,25 +716,26 @@ class TopoRouter(TopoGear): return output for each command. See vtysh_multicmd() for more details. """ # Detect multi line commands - if command.find('\n') != -1: + if command.find("\n") != -1: return self.vtysh_multicmd(command, daemon=daemon) - dparam = '' + dparam = "" if daemon is not None: - dparam += '-d {}'.format(daemon) + dparam += "-d {}".format(daemon) vtysh_command = 'vtysh {} -c "{}" 2>/dev/null'.format(dparam, command) output = self.run(vtysh_command) - self.logger.info('\nvtysh command => {}\nvtysh output <= {}'.format( - command, output)) + self.logger.info( + "\nvtysh command => {}\nvtysh output <= {}".format(command, output) + ) if isjson is False: return output try: return json.loads(output) except ValueError: - logger.warning('vtysh_cmd: failed to convert json output') + logger.warning("vtysh_cmd: failed to convert json output") return {} def vtysh_multicmd(self, commands, pretty_output=True, daemon=None): @@ -734,21 +750,22 @@ class TopoRouter(TopoGear): # Prepare the temporary file that will hold the commands fname = topotest.get_file(commands) - dparam = '' + dparam = "" if daemon is not None: - dparam += '-d {}'.format(daemon) + dparam += "-d {}".format(daemon) # Run the commands and delete the temporary file if pretty_output: - vtysh_command = 'vtysh {} < {}'.format(dparam, fname) + vtysh_command = "vtysh {} < {}".format(dparam, fname) else: - vtysh_command = 'vtysh {} -f {}'.format(dparam, fname) + vtysh_command = "vtysh {} -f {}".format(dparam, fname) res = self.run(vtysh_command) os.unlink(fname) - self.logger.info('\nvtysh command => "{}"\nvtysh output <= "{}"'.format( - vtysh_command, res)) + self.logger.info( + '\nvtysh command => "{}"\nvtysh output <= "{}"'.format(vtysh_command, res) + ) return res @@ -760,27 +777,29 @@ class TopoRouter(TopoGear): NOTE: to run this you must have the environment variable TOPOTESTS_CHECK_MEMLEAK set or memleak_path configured in `pytest.ini`. """ - memleak_file = os.environ.get('TOPOTESTS_CHECK_MEMLEAK') or self.options['memleak_path'] + memleak_file = ( + os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.options["memleak_path"] + ) if memleak_file is None: return self.stop() - self.logger.info('running memory leak report') + self.logger.info("running memory leak report") self.tgen.net[self.name].report_memory_leaks(memleak_file, testname) def version_info(self): "Get equipment information from 'show version'." - output = self.vtysh_cmd('show version').split('\n')[0] - columns = topotest.normalize_text(output).split(' ') + output = self.vtysh_cmd("show version").split("\n")[0] + columns = topotest.normalize_text(output).split(" ") try: return { - 'type': columns[0], - 'version': columns[1], + "type": columns[0], + "version": columns[1], } except IndexError: return { - 'type': None, - 'version': None, + "type": None, + "version": None, } def has_version(self, cmpop, version): @@ -802,19 +821,21 @@ class TopoRouter(TopoGear): Compares router type with `rtype`. Returns `True` if the type matches, otherwise `false`. """ - curtype = self.version_info()['type'] + curtype = self.version_info()["type"] return rtype == curtype def has_mpls(self): nrouter = self.tgen.net[self.name] return nrouter.hasmpls + class TopoSwitch(TopoGear): """ Switch abstraction. Has the following properties: * cls: switch class that will be used to instantiate * name: switch name """ + # pylint: disable=too-few-public-methods def __init__(self, tgen, cls, name): @@ -827,9 +848,10 @@ class TopoSwitch(TopoGear): def __str__(self): gear = super(TopoSwitch, self).__str__() - gear += ' TopoSwitch<>' + gear += " TopoSwitch<>" return gear + class TopoHost(TopoGear): "Host abstraction." # pylint: disable=too-few-public-methods @@ -853,18 +875,21 @@ class TopoHost(TopoGear): def __str__(self): gear = super(TopoHost, self).__str__() gear += ' TopoHost<ip="{}",defaultRoute="{}",privateDirs="{}">'.format( - self.options['ip'], self.options['defaultRoute'], - str(self.options['privateDirs'])) + self.options["ip"], + self.options["defaultRoute"], + str(self.options["privateDirs"]), + ) return gear + class TopoExaBGP(TopoHost): "ExaBGP peer abstraction." # pylint: disable=too-few-public-methods PRIVATE_DIRS = [ - '/etc/exabgp', - '/var/run/exabgp', - '/var/log', + "/etc/exabgp", + "/var/run/exabgp", + "/var/log", ] def __init__(self, tgen, name, **params): @@ -878,13 +903,13 @@ class TopoExaBGP(TopoHost): has a privateDirs already defined and contains functions to handle ExaBGP things. """ - params['privateDirs'] = self.PRIVATE_DIRS + params["privateDirs"] = self.PRIVATE_DIRS super(TopoExaBGP, self).__init__(tgen, name, **params) self.tgen.topo.addHost(name, **params) def __str__(self): gear = super(TopoExaBGP, self).__str__() - gear += ' TopoExaBGP<>'.format() + gear += " TopoExaBGP<>".format() return gear def start(self, peer_dir, env_file=None): @@ -895,22 +920,22 @@ class TopoExaBGP(TopoHost): * Make all python files runnable * Run ExaBGP with env file `env_file` and configuration peer*/exabgp.cfg """ - self.run('mkdir /etc/exabgp') - self.run('chmod 755 /etc/exabgp') - self.run('cp {}/* /etc/exabgp/'.format(peer_dir)) + self.run("mkdir /etc/exabgp") + self.run("chmod 755 /etc/exabgp") + self.run("cp {}/* /etc/exabgp/".format(peer_dir)) if env_file is not None: - self.run('cp {} /etc/exabgp/exabgp.env'.format(env_file)) - self.run('chmod 644 /etc/exabgp/*') - self.run('chmod a+x /etc/exabgp/*.py') - self.run('chown -R exabgp:exabgp /etc/exabgp') - output = self.run('exabgp -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg') + self.run("cp {} /etc/exabgp/exabgp.env".format(env_file)) + self.run("chmod 644 /etc/exabgp/*") + self.run("chmod a+x /etc/exabgp/*.py") + self.run("chown -R exabgp:exabgp /etc/exabgp") + output = self.run("exabgp -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg") if output == None or len(output) == 0: - output = '<none>' - logger.info('{} exabgp started, output={}'.format(self.name, output)) + output = "<none>" + logger.info("{} exabgp started, output={}".format(self.name, output)) def stop(self, wait=True, assertOnError=True): "Stop ExaBGP peer and kill the daemon" - self.run('kill `cat /var/run/exabgp/exabgp.pid`') + self.run("kill `cat /var/run/exabgp/exabgp.pid`") return "" @@ -928,160 +953,189 @@ def diagnose_env_linux(): ret = True # Test log path exists before installing handler. - if not os.path.isdir('/tmp'): - logger.warning('could not find /tmp for logs') + if not os.path.isdir("/tmp"): + logger.warning("could not find /tmp for logs") else: - os.system('mkdir /tmp/topotests') + os.system("mkdir /tmp/topotests") # Log diagnostics to file so it can be examined later. - fhandler = logging.FileHandler(filename='/tmp/topotests/diagnostics.txt') + fhandler = logging.FileHandler(filename="/tmp/topotests/diagnostics.txt") fhandler.setLevel(logging.DEBUG) fhandler.setFormatter( - logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s') + logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s") ) logger.addHandler(fhandler) - logger.info('Running environment diagnostics') + logger.info("Running environment diagnostics") # Load configuration config = configparser.ConfigParser(tgen_defaults) - pytestini_path = os.path.join(CWD, '../pytest.ini') + pytestini_path = os.path.join(CWD, "../pytest.ini") config.read(pytestini_path) # Assert that we are running as root if os.getuid() != 0: - logger.error('you must run topotest as root') + logger.error("you must run topotest as root") ret = False # Assert that we have mininet - if os.system('which mn >/dev/null 2>/dev/null') != 0: - logger.error('could not find mininet binary (mininet is not installed)') + if os.system("which mn >/dev/null 2>/dev/null") != 0: + logger.error("could not find mininet binary (mininet is not installed)") ret = False # Assert that we have iproute installed - if os.system('which ip >/dev/null 2>/dev/null') != 0: - logger.error('could not find ip binary (iproute is not installed)') + if os.system("which ip >/dev/null 2>/dev/null") != 0: + logger.error("could not find ip binary (iproute is not installed)") ret = False # Assert that we have gdb installed - if os.system('which gdb >/dev/null 2>/dev/null') != 0: - logger.error('could not find gdb binary (gdb is not installed)') + if os.system("which gdb >/dev/null 2>/dev/null") != 0: + logger.error("could not find gdb binary (gdb is not installed)") ret = False # Assert that FRR utilities exist - frrdir = config.get('topogen', 'frrdir') + frrdir = config.get("topogen", "frrdir") hasfrr = False if not os.path.isdir(frrdir): - logger.error('could not find {} directory'.format(frrdir)) + logger.error("could not find {} directory".format(frrdir)) ret = False else: hasfrr = True try: - pwd.getpwnam('frr')[2] + pwd.getpwnam("frr")[2] except KeyError: logger.warning('could not find "frr" user') try: - grp.getgrnam('frr')[2] + grp.getgrnam("frr")[2] except KeyError: logger.warning('could not find "frr" group') try: - if 'frr' not in grp.getgrnam('frrvty').gr_mem: - logger.error('"frr" user and group exist, but user is not under "frrvty"') + if "frr" not in grp.getgrnam("frrvty").gr_mem: + logger.error( + '"frr" user and group exist, but user is not under "frrvty"' + ) except KeyError: logger.warning('could not find "frrvty" group') - for fname in ['zebra', 'ospfd', 'ospf6d', 'bgpd', 'ripd', 'ripngd', - 'isisd', 'pimd', 'ldpd']: + for fname in [ + "zebra", + "ospfd", + "ospf6d", + "bgpd", + "ripd", + "ripngd", + "isisd", + "pimd", + "ldpd", + ]: path = os.path.join(frrdir, fname) if not os.path.isfile(path): # LDPd is an exception - if fname == 'ldpd': - logger.info('could not find {} in {}'.format(fname, frrdir) + - '(LDPd tests will not run)') + if fname == "ldpd": + logger.info( + "could not find {} in {}".format(fname, frrdir) + + "(LDPd tests will not run)" + ) continue - logger.warning('could not find {} in {}'.format(fname, frrdir)) + logger.warning("could not find {} in {}".format(fname, frrdir)) ret = False else: - if fname != 'zebra': + if fname != "zebra": continue - os.system( - '{} -v 2>&1 >/tmp/topotests/frr_zebra.txt'.format(path) - ) + os.system("{} -v 2>&1 >/tmp/topotests/frr_zebra.txt".format(path)) # Assert that Quagga utilities exist - quaggadir = config.get('topogen', 'quaggadir') + quaggadir = config.get("topogen", "quaggadir") if hasfrr: # if we have frr, don't check for quagga pass elif not os.path.isdir(quaggadir): - logger.info('could not find {} directory (quagga tests will not run)'.format(quaggadir)) + logger.info( + "could not find {} directory (quagga tests will not run)".format(quaggadir) + ) else: ret = True try: - pwd.getpwnam('quagga')[2] + pwd.getpwnam("quagga")[2] except KeyError: logger.info('could not find "quagga" user') try: - grp.getgrnam('quagga')[2] + grp.getgrnam("quagga")[2] except KeyError: logger.info('could not find "quagga" group') try: - if 'quagga' not in grp.getgrnam('quaggavty').gr_mem: - logger.error('"quagga" user and group exist, but user is not under "quaggavty"') + if "quagga" not in grp.getgrnam("quaggavty").gr_mem: + logger.error( + '"quagga" user and group exist, but user is not under "quaggavty"' + ) except KeyError: logger.warning('could not find "quaggavty" group') - for fname in ['zebra', 'ospfd', 'ospf6d', 'bgpd', 'ripd', 'ripngd', - 'isisd', 'pimd']: + for fname in [ + "zebra", + "ospfd", + "ospf6d", + "bgpd", + "ripd", + "ripngd", + "isisd", + "pimd", + ]: path = os.path.join(quaggadir, fname) if not os.path.isfile(path): - logger.warning('could not find {} in {}'.format(fname, quaggadir)) + logger.warning("could not find {} in {}".format(fname, quaggadir)) ret = False else: - if fname != 'zebra': + if fname != "zebra": continue - os.system( - '{} -v 2>&1 >/tmp/topotests/quagga_zebra.txt'.format(path) - ) + os.system("{} -v 2>&1 >/tmp/topotests/quagga_zebra.txt".format(path)) # Test MPLS availability krel = platform.release() - if topotest.version_cmp(krel, '4.5') < 0: - logger.info('LDPd tests will not run (have kernel "{}", but it requires 4.5)'.format(krel)) + if topotest.version_cmp(krel, "4.5") < 0: + logger.info( + 'LDPd tests will not run (have kernel "{}", but it requires 4.5)'.format( + krel + ) + ) # Test for MPLS Kernel modules available - if not topotest.module_present('mpls-router', load=False) != 0: - logger.info('LDPd tests will not run (missing mpls-router kernel module)') - if not topotest.module_present('mpls-iptunnel', load=False) != 0: - logger.info('LDPd tests will not run (missing mpls-iptunnel kernel module)') + if not topotest.module_present("mpls-router", load=False) != 0: + logger.info("LDPd tests will not run (missing mpls-router kernel module)") + if not topotest.module_present("mpls-iptunnel", load=False) != 0: + logger.info("LDPd tests will not run (missing mpls-iptunnel kernel module)") # TODO remove me when we start supporting exabgp >= 4 try: - output = subprocess.check_output(['exabgp', '-v']) - line = output.split('\n')[0] - version = line.split(' ')[2] - if topotest.version_cmp(version, '4') >= 0: - logger.warning('BGP topologies are still using exabgp version 3, expect failures') + output = subprocess.check_output(["exabgp", "-v"]) + line = output.split("\n")[0] + version = line.split(" ")[2] + if topotest.version_cmp(version, "4") >= 0: + logger.warning( + "BGP topologies are still using exabgp version 3, expect failures" + ) # We want to catch all exceptions # pylint: disable=W0702 except: - logger.warning('failed to find exabgp or returned error') + logger.warning("failed to find exabgp or returned error") # After we logged the output to file, remove the handler. logger.removeHandler(fhandler) return ret + def diagnose_env_freebsd(): return True + def diagnose_env(): if sys.platform.startswith("linux"): return diagnose_env_linux() diff --git a/tests/topotests/lib/topojson.py b/tests/topotests/lib/topojson.py index fff5a1e82f..b25317ba7f 100644 --- a/tests/topotests/lib/topojson.py +++ b/tests/topotests/lib/topojson.py @@ -29,13 +29,14 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from lib.common_config import ( - number_to_row, number_to_column, + number_to_row, + number_to_column, load_config_to_router, create_interfaces_cfg, create_static_routes, create_prefix_lists, create_route_maps, - create_bgp_community_lists + create_bgp_community_lists, ) from lib.bgp import create_router_bgp @@ -53,56 +54,69 @@ def build_topo_from_json(tgen, topo): * `topo`: json file data """ - ROUTER_LIST = sorted(topo['routers'].keys(), - key=lambda x: int(re_search('\d+', x).group(0))) + ROUTER_LIST = sorted( + topo["routers"].keys(), key=lambda x: int(re_search("\d+", x).group(0)) + ) listRouters = ROUTER_LIST[:] for routerN in ROUTER_LIST: - logger.info('Topo: Add router {}'.format(routerN)) + logger.info("Topo: Add router {}".format(routerN)) tgen.add_router(routerN) listRouters.append(routerN) - if 'ipv4base' in topo: - ipv4Next = ipaddr.IPv4Address(topo['link_ip_start']['ipv4']) - ipv4Step = 2 ** (32 - topo['link_ip_start']['v4mask']) - if topo['link_ip_start']['v4mask'] < 32: + if "ipv4base" in topo: + ipv4Next = ipaddr.IPv4Address(topo["link_ip_start"]["ipv4"]) + ipv4Step = 2 ** (32 - topo["link_ip_start"]["v4mask"]) + if topo["link_ip_start"]["v4mask"] < 32: ipv4Next += 1 - if 'ipv6base' in topo: - ipv6Next = ipaddr.IPv6Address(topo['link_ip_start']['ipv6']) - ipv6Step = 2 ** (128 - topo['link_ip_start']['v6mask']) - if topo['link_ip_start']['v6mask'] < 127: + if "ipv6base" in topo: + ipv6Next = ipaddr.IPv6Address(topo["link_ip_start"]["ipv6"]) + ipv6Step = 2 ** (128 - topo["link_ip_start"]["v6mask"]) + if topo["link_ip_start"]["v6mask"] < 127: ipv6Next += 1 for router in listRouters: - topo['routers'][router]['nextIfname'] = 0 + topo["routers"][router]["nextIfname"] = 0 while listRouters != []: curRouter = listRouters.pop(0) # Physical Interfaces - if 'links' in topo['routers'][curRouter]: + if "links" in topo["routers"][curRouter]: + def link_sort(x): - if x == 'lo': + if x == "lo": return 0 - elif 'link' in x: - return int(x.split('-link')[1]) + elif "link" in x: + return int(x.split("-link")[1]) else: - return int(re_search('\d+', x).group(0)) - for destRouterLink, data in sorted(topo['routers'][curRouter]['links']. \ - iteritems(), - key=lambda x: link_sort(x[0])): - currRouter_lo_json = \ - topo['routers'][curRouter]['links'][destRouterLink] + return int(re_search("\d+", x).group(0)) + + for destRouterLink, data in sorted( + topo["routers"][curRouter]["links"].iteritems(), + key=lambda x: link_sort(x[0]), + ): + currRouter_lo_json = topo["routers"][curRouter]["links"][destRouterLink] # Loopback interfaces - if 'type' in data and data['type'] == 'loopback': - if 'ipv4' in currRouter_lo_json and \ - currRouter_lo_json['ipv4'] == 'auto': - currRouter_lo_json['ipv4'] = '{}{}.{}/{}'. \ - format(topo['lo_prefix']['ipv4'], number_to_row(curRouter), \ - number_to_column(curRouter), topo['lo_prefix']['v4mask']) - if 'ipv6' in currRouter_lo_json and \ - currRouter_lo_json['ipv6'] == 'auto': - currRouter_lo_json['ipv6'] = '{}{}:{}/{}'. \ - format(topo['lo_prefix']['ipv6'], number_to_row(curRouter), \ - number_to_column(curRouter), topo['lo_prefix']['v6mask']) + if "type" in data and data["type"] == "loopback": + if ( + "ipv4" in currRouter_lo_json + and currRouter_lo_json["ipv4"] == "auto" + ): + currRouter_lo_json["ipv4"] = "{}{}.{}/{}".format( + topo["lo_prefix"]["ipv4"], + number_to_row(curRouter), + number_to_column(curRouter), + topo["lo_prefix"]["v4mask"], + ) + if ( + "ipv6" in currRouter_lo_json + and currRouter_lo_json["ipv6"] == "auto" + ): + currRouter_lo_json["ipv6"] = "{}{}:{}/{}".format( + topo["lo_prefix"]["ipv6"], + number_to_row(curRouter), + number_to_column(curRouter), + topo["lo_prefix"]["v6mask"], + ) if "-" in destRouterLink: # Spliting and storing destRouterLink data in tempList @@ -119,52 +133,63 @@ def build_topo_from_json(tgen, topo): curRouterLink = curRouter if destRouter in listRouters: - currRouter_link_json = \ - topo['routers'][curRouter]['links'][destRouterLink] - destRouter_link_json = \ - topo['routers'][destRouter]['links'][curRouterLink] + currRouter_link_json = topo["routers"][curRouter]["links"][ + destRouterLink + ] + destRouter_link_json = topo["routers"][destRouter]["links"][ + curRouterLink + ] # Assigning name to interfaces - currRouter_link_json['interface'] = \ - '{}-{}-eth{}'.format(curRouter, destRouter, topo['routers'] \ - [curRouter]['nextIfname']) - destRouter_link_json['interface'] = \ - '{}-{}-eth{}'.format(destRouter, curRouter, topo['routers'] \ - [destRouter]['nextIfname']) + currRouter_link_json["interface"] = "{}-{}-eth{}".format( + curRouter, destRouter, topo["routers"][curRouter]["nextIfname"] + ) + destRouter_link_json["interface"] = "{}-{}-eth{}".format( + destRouter, curRouter, topo["routers"][destRouter]["nextIfname"] + ) - topo['routers'][curRouter]['nextIfname'] += 1 - topo['routers'][destRouter]['nextIfname'] += 1 + topo["routers"][curRouter]["nextIfname"] += 1 + topo["routers"][destRouter]["nextIfname"] += 1 # Linking routers to each other as defined in JSON file - tgen.gears[curRouter].add_link(tgen.gears[destRouter], - topo['routers'][curRouter]['links'][destRouterLink] \ - ['interface'], topo['routers'][destRouter]['links'] \ - [curRouterLink]['interface']) + tgen.gears[curRouter].add_link( + tgen.gears[destRouter], + topo["routers"][curRouter]["links"][destRouterLink][ + "interface" + ], + topo["routers"][destRouter]["links"][curRouterLink][ + "interface" + ], + ) # IPv4 - if 'ipv4' in currRouter_link_json: - if currRouter_link_json['ipv4'] == 'auto': - currRouter_link_json['ipv4'] = \ - '{}/{}'.format(ipv4Next, topo['link_ip_start'][ \ - 'v4mask']) - destRouter_link_json['ipv4'] = \ - '{}/{}'.format(ipv4Next + 1, topo['link_ip_start'][ \ - 'v4mask']) + if "ipv4" in currRouter_link_json: + if currRouter_link_json["ipv4"] == "auto": + currRouter_link_json["ipv4"] = "{}/{}".format( + ipv4Next, topo["link_ip_start"]["v4mask"] + ) + destRouter_link_json["ipv4"] = "{}/{}".format( + ipv4Next + 1, topo["link_ip_start"]["v4mask"] + ) ipv4Next += ipv4Step # IPv6 - if 'ipv6' in currRouter_link_json: - if currRouter_link_json['ipv6'] == 'auto': - currRouter_link_json['ipv6'] = \ - '{}/{}'.format(ipv6Next, topo['link_ip_start'][ \ - 'v6mask']) - destRouter_link_json['ipv6'] = \ - '{}/{}'.format(ipv6Next + 1, topo['link_ip_start'][ \ - 'v6mask']) + if "ipv6" in currRouter_link_json: + if currRouter_link_json["ipv6"] == "auto": + currRouter_link_json["ipv6"] = "{}/{}".format( + ipv6Next, topo["link_ip_start"]["v6mask"] + ) + destRouter_link_json["ipv6"] = "{}/{}".format( + ipv6Next + 1, topo["link_ip_start"]["v6mask"] + ) ipv6Next = ipaddr.IPv6Address(int(ipv6Next) + ipv6Step) - logger.debug("Generated link data for router: %s\n%s", curRouter, - json_dumps(topo["routers"][curRouter]["links"], - indent=4, sort_keys=True)) + logger.debug( + "Generated link data for router: %s\n%s", + curRouter, + json_dumps( + topo["routers"][curRouter]["links"], indent=4, sort_keys=True + ), + ) def build_config_from_json(tgen, topo, save_bkup=True): @@ -176,27 +201,27 @@ def build_config_from_json(tgen, topo, save_bkup=True): * `topo`: json file data """ - func_dict = OrderedDict([ - ("links", create_interfaces_cfg), - ("static_routes", create_static_routes), - ("prefix_lists", create_prefix_lists), - ("bgp_community_list", create_bgp_community_lists), - ("route_maps", create_route_maps), - ("bgp", create_router_bgp) - ]) + func_dict = OrderedDict( + [ + ("links", create_interfaces_cfg), + ("static_routes", create_static_routes), + ("prefix_lists", create_prefix_lists), + ("bgp_community_list", create_bgp_community_lists), + ("route_maps", create_route_maps), + ("bgp", create_router_bgp), + ] + ) data = topo["routers"] for func_type in func_dict.keys(): - logger.info('Checking for {} configuration in input data'.format( - func_type)) + logger.info("Checking for {} configuration in input data".format(func_type)) func_dict.get(func_type)(tgen, data, build=True) - for router in sorted(topo['routers'].keys()): - logger.debug('Configuring router {}...'.format(router)) + for router in sorted(topo["routers"].keys()): + logger.debug("Configuring router {}...".format(router)) result = load_config_to_router(tgen, router, save_bkup) if not result: logger.info("Failed while configuring {}".format(router)) pytest.exit(1) - diff --git a/tests/topotests/lib/topolog.py b/tests/topotests/lib/topolog.py index f149f34eb3..0dfa870930 100644 --- a/tests/topotests/lib/topolog.py +++ b/tests/topotests/lib/topolog.py @@ -31,22 +31,25 @@ import logging # Helper dictionary to convert Topogen logging levels to Python's logging. DEBUG_TOPO2LOGGING = { - 'debug': logging.DEBUG, - 'info': logging.INFO, - 'output': logging.INFO, - 'warning': logging.WARNING, - 'error': logging.ERROR, - 'critical': logging.CRITICAL, + "debug": logging.DEBUG, + "info": logging.INFO, + "output": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, } + class InfoFilter(logging.Filter): def filter(self, rec): return rec.levelno in (logging.DEBUG, logging.INFO) + # # Logger class definition # + class Logger(object): """ Logger class that encapsulates logging functions, internaly it uses Python @@ -58,32 +61,32 @@ class Logger(object): def __init__(self): # Create default global logger self.log_level = logging.INFO - self.logger = logging.Logger('topolog', level=self.log_level) + self.logger = logging.Logger("topolog", level=self.log_level) handler_stdout = logging.StreamHandler(sys.stdout) handler_stdout.setLevel(logging.DEBUG) handler_stdout.addFilter(InfoFilter()) handler_stdout.setFormatter( - logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s') + logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s") ) handler_stderr = logging.StreamHandler() handler_stderr.setLevel(logging.WARNING) handler_stderr.setFormatter( - logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s') + logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s") ) self.logger.addHandler(handler_stdout) self.logger.addHandler(handler_stderr) # Handle more loggers - self.loggers = {'topolog': self.logger} + self.loggers = {"topolog": self.logger} def set_log_level(self, level): "Set the logging level" self.log_level = DEBUG_TOPO2LOGGING.get(level) self.logger.setLevel(self.log_level) - def get_logger(self, name='topolog', log_level=None, target=sys.stdout): + def get_logger(self, name="topolog", log_level=None, target=sys.stdout): """ Get a new logger entry. Allows creating different loggers for formating, filtering or handling (file, stream or stdout/stderr). @@ -100,12 +103,13 @@ class Logger(object): handler = logging.StreamHandler(stream=target) handler.setFormatter( - logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s') + logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s") ) nlogger.addHandler(handler) self.loggers[name] = nlogger return nlogger + # # Global variables # diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py index 9e1d344687..fab101cb25 100644 --- a/tests/topotests/lib/topotest.py +++ b/tests/topotests/lib/topotest.py @@ -50,6 +50,7 @@ from mininet.log import setLogLevel, info from mininet.cli import CLI from mininet.link import Intf + class json_cmp_result(object): "json_cmp result class for better assertion messages" @@ -66,7 +67,7 @@ class json_cmp_result(object): return len(self.errors) > 0 def __str__(self): - return '\n'.join(self.errors) + return "\n".join(self.errors) def json_diff(d1, d2): @@ -74,12 +75,12 @@ def json_diff(d1, d2): Returns a string with the difference between JSON data. """ json_format_opts = { - 'indent': 4, - 'sort_keys': True, + "indent": 4, + "sort_keys": True, } dstr1 = json.dumps(d1, **json_format_opts) dstr2 = json.dumps(d2, **json_format_opts) - return difflines(dstr2, dstr1, title1='Expected value', title2='Current value', n=0) + return difflines(dstr2, dstr1, title1="Expected value", title2="Current value", n=0) def _json_list_cmp(list1, list2, parent, result): @@ -87,18 +88,21 @@ def _json_list_cmp(list1, list2, parent, result): # Check second list2 type if not isinstance(list1, type([])) or not isinstance(list2, type([])): result.add_error( - '{} has different type than expected '.format(parent) + - '(have {}, expected {}):\n{}'.format( - type(list1), type(list2), json_diff(list1, list2))) + "{} has different type than expected ".format(parent) + + "(have {}, expected {}):\n{}".format( + type(list1), type(list2), json_diff(list1, list2) + ) + ) return # Check list size if len(list2) > len(list1): result.add_error( - '{} too few items '.format(parent) + - '(have {}, expected {}:\n {})'.format( - len(list1), len(list2), - json_diff(list1, list2))) + "{} too few items ".format(parent) + + "(have {}, expected {}:\n {})".format( + len(list1), len(list2), json_diff(list1, list2) + ) + ) return # List all unmatched items errors @@ -106,7 +110,7 @@ def _json_list_cmp(list1, list2, parent, result): for expected in list2: matched = False for value in list1: - if json_cmp({'json': value}, {'json': expected}) is None: + if json_cmp({"json": value}, {"json": expected}) is None: matched = True break @@ -116,8 +120,8 @@ def _json_list_cmp(list1, list2, parent, result): # If there are unmatched items, error out. if unmatched: result.add_error( - '{} value is different (\n{})'.format( - parent, json_diff(list1, list2))) + "{} value is different (\n{})".format(parent, json_diff(list1, list2)) + ) def json_cmp(d1, d2): @@ -131,7 +135,7 @@ def json_cmp(d1, d2): Note: key absence can be tested by adding a key with value `None`. """ - squeue = [(d1, d2, 'json')] + squeue = [(d1, d2, "json")] result = json_cmp_result() for s in squeue: @@ -150,23 +154,33 @@ def json_cmp(d1, d2): s2_req = set([key for key in nd2 if nd2[key] is not None]) diff = s2_req - s1 if diff != set({}): - result.add_error('expected key(s) {} in {} (have {}):\n{}'.format( - str(list(diff)), parent, str(list(s1)), json_diff(nd1, nd2))) + result.add_error( + "expected key(s) {} in {} (have {}):\n{}".format( + str(list(diff)), parent, str(list(s1)), json_diff(nd1, nd2) + ) + ) for key in s2.intersection(s1): # Test for non existence of key in d2 if nd2[key] is None: - result.add_error('"{}" should not exist in {} (have {}):\n{}'.format( - key, parent, str(s1), json_diff(nd1[key], nd2[key]))) + result.add_error( + '"{}" should not exist in {} (have {}):\n{}'.format( + key, parent, str(s1), json_diff(nd1[key], nd2[key]) + ) + ) continue # If nd1 key is a dict, we have to recurse in it later. if isinstance(nd2[key], type({})): if not isinstance(nd1[key], type({})): result.add_error( - '{}["{}"] has different type than expected '.format(parent, key) + - '(have {}, expected {}):\n{}'.format( - type(nd1[key]), type(nd2[key]), json_diff(nd1[key], nd2[key]))) + '{}["{}"] has different type than expected '.format(parent, key) + + "(have {}, expected {}):\n{}".format( + type(nd1[key]), + type(nd2[key]), + json_diff(nd1[key], nd2[key]), + ) + ) continue nparent = '{}["{}"]'.format(parent, key) squeue.append((nd1[key], nd2[key], nparent)) @@ -181,7 +195,9 @@ def json_cmp(d1, d2): if nd1[key] != nd2[key]: result.add_error( '{}["{}"] value is different (\n{})'.format( - parent, key, json_diff(nd1[key], nd2[key]))) + parent, key, json_diff(nd1[key], nd2[key]) + ) + ) continue if result.has_errors(): @@ -194,10 +210,12 @@ def router_output_cmp(router, cmd, expected): """ Runs `cmd` in router and compares the output with `expected`. """ - return difflines(normalize_text(router.vtysh_cmd(cmd)), - normalize_text(expected), - title1="Current output", - title2="Expected output") + return difflines( + normalize_text(router.vtysh_cmd(cmd)), + normalize_text(expected), + title1="Current output", + title2="Expected output", + ) def router_json_cmp(router, cmd, data): @@ -232,7 +250,9 @@ def run_and_expect(func, what, count=20, wait=3): logger.info( "'{}' polling started (interval {} secs, maximum wait {} secs)".format( - func_name, wait, int(wait * count))) + func_name, wait, int(wait * count) + ) + ) while count > 0: result = func() @@ -242,13 +262,17 @@ def run_and_expect(func, what, count=20, wait=3): continue end_time = time.time() - logger.info("'{}' succeeded after {:.2f} seconds".format( - func_name, end_time - start_time)) + logger.info( + "'{}' succeeded after {:.2f} seconds".format( + func_name, end_time - start_time + ) + ) return (True, result) end_time = time.time() - logger.error("'{}' failed after {:.2f} seconds".format( - func_name, end_time - start_time)) + logger.error( + "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time) + ) return (False, result) @@ -273,12 +297,16 @@ def run_and_expect_type(func, etype, count=20, wait=3, avalue=None): logger.info( "'{}' polling started (interval {} secs, maximum wait {} secs)".format( - func_name, wait, int(wait * count))) + func_name, wait, int(wait * count) + ) + ) while count > 0: result = func() if not isinstance(result, etype): - logger.debug("Expected result type '{}' got '{}' instead".format(etype, type(result))) + logger.debug( + "Expected result type '{}' got '{}' instead".format(etype, type(result)) + ) time.sleep(wait) count -= 1 continue @@ -290,13 +318,17 @@ def run_and_expect_type(func, etype, count=20, wait=3, avalue=None): continue end_time = time.time() - logger.info("'{}' succeeded after {:.2f} seconds".format( - func_name, end_time - start_time)) + logger.info( + "'{}' succeeded after {:.2f} seconds".format( + func_name, end_time - start_time + ) + ) return (True, result) end_time = time.time() - logger.error("'{}' failed after {:.2f} seconds".format( - func_name, end_time - start_time)) + logger.error( + "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time) + ) return (False, result) @@ -305,12 +337,15 @@ def int2dpid(dpid): try: dpid = hex(dpid)[2:] - dpid = '0'*(16-len(dpid))+dpid + dpid = "0" * (16 - len(dpid)) + dpid return dpid except IndexError: - raise Exception('Unable to derive default datapath ID - ' - 'please either specify a dpid or use a ' - 'canonical switch name such as s23.') + raise Exception( + "Unable to derive default datapath ID - " + "please either specify a dpid or use a " + "canonical switch name such as s23." + ) + def pid_exists(pid): "Check whether pid exists in the current process table." @@ -333,70 +368,78 @@ def pid_exists(pid): else: return True + def get_textdiff(text1, text2, title1="", title2="", **opts): "Returns empty string if same or formatted diff" - diff = '\n'.join(difflib.unified_diff(text1, text2, - fromfile=title1, tofile=title2, **opts)) + diff = "\n".join( + difflib.unified_diff(text1, text2, fromfile=title1, tofile=title2, **opts) + ) # Clean up line endings diff = os.linesep.join([s for s in diff.splitlines() if s]) return diff -def difflines(text1, text2, title1='', title2='', **opts): + +def difflines(text1, text2, title1="", title2="", **opts): "Wrapper for get_textdiff to avoid string transformations." - text1 = ('\n'.join(text1.rstrip().splitlines()) + '\n').splitlines(1) - text2 = ('\n'.join(text2.rstrip().splitlines()) + '\n').splitlines(1) + text1 = ("\n".join(text1.rstrip().splitlines()) + "\n").splitlines(1) + text2 = ("\n".join(text2.rstrip().splitlines()) + "\n").splitlines(1) return get_textdiff(text1, text2, title1, title2, **opts) + def get_file(content): """ Generates a temporary file in '/tmp' with `content` and returns the file name. """ - fde = tempfile.NamedTemporaryFile(mode='w', delete=False) + fde = tempfile.NamedTemporaryFile(mode="w", delete=False) fname = fde.name fde.write(content) fde.close() return fname + def normalize_text(text): """ Strips formating spaces/tabs, carriage returns and trailing whitespace. """ - text = re.sub(r'[ \t]+', ' ', text) - text = re.sub(r'\r', '', text) + text = re.sub(r"[ \t]+", " ", text) + text = re.sub(r"\r", "", text) # Remove whitespace in the middle of text. - text = re.sub(r'[ \t]+\n', '\n', text) + text = re.sub(r"[ \t]+\n", "\n", text) # Remove whitespace at the end of the text. text = text.rstrip() return text + def module_present_linux(module, load): """ Returns whether `module` is present. If `load` is true, it will try to load it via modprobe. """ - with open('/proc/modules', 'r') as modules_file: - if module.replace('-','_') in modules_file.read(): + with open("/proc/modules", "r") as modules_file: + if module.replace("-", "_") in modules_file.read(): return True - cmd = '/sbin/modprobe {}{}'.format('' if load else '-n ', - module) + cmd = "/sbin/modprobe {}{}".format("" if load else "-n ", module) if os.system(cmd) != 0: return False else: return True + def module_present_freebsd(module, load): return True + def module_present(module, load=True): if sys.platform.startswith("linux"): return module_present_linux(module, load) elif sys.platform.startswith("freebsd"): return module_present_freebsd(module, load) + def version_cmp(v1, v2): """ Compare two version strings and returns: @@ -407,15 +450,15 @@ def version_cmp(v1, v2): Raises `ValueError` if versions are not well formated. """ - vregex = r'(?P<whole>\d+(\.(\d+))*)' + vregex = r"(?P<whole>\d+(\.(\d+))*)" v1m = re.match(vregex, v1) v2m = re.match(vregex, v2) if v1m is None or v2m is None: raise ValueError("got a invalid version string") # Split values - v1g = v1m.group('whole').split('.') - v2g = v2m.group('whole').split('.') + v1g = v1m.group("whole").split(".") + v2g = v2m.group("whole").split(".") # Get the longest version string vnum = len(v1g) @@ -452,35 +495,42 @@ def version_cmp(v1, v2): return -1 return 0 + def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None): if ifaceaction: - str_ifaceaction = 'no shutdown' + str_ifaceaction = "no shutdown" else: - str_ifaceaction = 'shutdown' + str_ifaceaction = "shutdown" if vrf_name == None: - cmd = 'vtysh -c \"configure terminal\" -c \"interface {0}\" -c \"{1}\"'.format(ifacename, str_ifaceaction) + cmd = 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format( + ifacename, str_ifaceaction + ) else: - cmd = 'vtysh -c \"configure terminal\" -c \"interface {0} vrf {1}\" -c \"{2}\"'.format(ifacename, vrf_name, str_ifaceaction) + cmd = 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format( + ifacename, vrf_name, str_ifaceaction + ) node.run(cmd) + def ip4_route_zebra(node, vrf_name=None): """ Gets an output of 'show ip route' command. It can be used with comparing the output to a reference """ if vrf_name == None: - tmp = node.vtysh_cmd('show ip route') + tmp = node.vtysh_cmd("show ip route") else: - tmp = node.vtysh_cmd('show ip route vrf {0}'.format(vrf_name)) + tmp = node.vtysh_cmd("show ip route vrf {0}".format(vrf_name)) output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp) lines = output.splitlines() header_found = False while lines and (not lines[0].strip() or not header_found): - if '> - selected route' in lines[0]: + if "> - selected route" in lines[0]: header_found = True lines = lines[1:] - return '\n'.join(lines) + return "\n".join(lines) + def ip6_route_zebra(node, vrf_name=None): """ @@ -489,40 +539,42 @@ def ip6_route_zebra(node, vrf_name=None): """ if vrf_name == None: - tmp = node.vtysh_cmd('show ipv6 route') + tmp = node.vtysh_cmd("show ipv6 route") else: - tmp = node.vtysh_cmd('show ipv6 route vrf {0}'.format(vrf_name)) + tmp = node.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name)) # Mask out timestamp output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp) # Mask out the link-local addresses - output = re.sub(r'fe80::[^ ]+,', 'fe80::XXXX:XXXX:XXXX:XXXX,', output) + output = re.sub(r"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output) lines = output.splitlines() header_found = False while lines and (not lines[0].strip() or not header_found): - if '> - selected route' in lines[0]: + if "> - selected route" in lines[0]: header_found = True lines = lines[1:] - return '\n'.join(lines) + return "\n".join(lines) def proto_name_to_number(protocol): return { - 'bgp': '186', - 'isis': '187', - 'ospf': '188', - 'rip': '189', - 'ripng': '190', - 'nhrp': '191', - 'eigrp': '192', - 'ldp': '193', - 'sharp': '194', - 'pbr': '195', - 'static': '196' - }.get(protocol, protocol) # default return same as input + "bgp": "186", + "isis": "187", + "ospf": "188", + "rip": "189", + "ripng": "190", + "nhrp": "191", + "eigrp": "192", + "ldp": "193", + "sharp": "194", + "pbr": "195", + "static": "196", + }.get( + protocol, protocol + ) # default return same as input def ip4_route(node): @@ -543,28 +595,29 @@ def ip4_route(node): } } """ - output = normalize_text(node.run('ip route')).splitlines() + output = normalize_text(node.run("ip route")).splitlines() result = {} for line in output: - columns = line.split(' ') + columns = line.split(" ") route = result[columns[0]] = {} prev = None for column in columns: - if prev == 'dev': - route['dev'] = column - if prev == 'via': - route['via'] = column - if prev == 'proto': + if prev == "dev": + route["dev"] = column + if prev == "via": + route["via"] = column + if prev == "proto": # translate protocol names back to numbers - route['proto'] = proto_name_to_number(column) - if prev == 'metric': - route['metric'] = column - if prev == 'scope': - route['scope'] = column + route["proto"] = proto_name_to_number(column) + if prev == "metric": + route["metric"] = column + if prev == "scope": + route["scope"] = column prev = column return result + def ip6_route(node): """ Gets a structured return of the command 'ip -6 route'. It can be used in @@ -582,80 +635,103 @@ def ip6_route(node): } } """ - output = normalize_text(node.run('ip -6 route')).splitlines() + output = normalize_text(node.run("ip -6 route")).splitlines() result = {} for line in output: - columns = line.split(' ') + columns = line.split(" ") route = result[columns[0]] = {} prev = None for column in columns: - if prev == 'dev': - route['dev'] = column - if prev == 'via': - route['via'] = column - if prev == 'proto': + if prev == "dev": + route["dev"] = column + if prev == "via": + route["via"] = column + if prev == "proto": # translate protocol names back to numbers - route['proto'] = proto_name_to_number(column) - if prev == 'metric': - route['metric'] = column - if prev == 'pref': - route['pref'] = column + route["proto"] = proto_name_to_number(column) + if prev == "metric": + route["metric"] = column + if prev == "pref": + route["pref"] = column prev = column return result + def sleep(amount, reason=None): """ Sleep wrapper that registers in the log the amount of sleep """ if reason is None: - logger.info('Sleeping for {} seconds'.format(amount)) + logger.info("Sleeping for {} seconds".format(amount)) else: - logger.info(reason + ' ({} seconds)'.format(amount)) + logger.info(reason + " ({} seconds)".format(amount)) time.sleep(amount) + def checkAddressSanitizerError(output, router, component): "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise" - addressSantizerError = re.search('(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ', output) + addressSantizerError = re.search( + "(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output + ) if addressSantizerError: - sys.stderr.write("%s: %s triggered an exception by AddressSanitizer\n" % (router, component)) + sys.stderr.write( + "%s: %s triggered an exception by AddressSanitizer\n" % (router, component) + ) # Sanitizer Error found in log pidMark = addressSantizerError.group(1) - addressSantizerLog = re.search('%s(.*)%s' % (pidMark, pidMark), output, re.DOTALL) + addressSantizerLog = re.search( + "%s(.*)%s" % (pidMark, pidMark), output, re.DOTALL + ) if addressSantizerLog: - callingTest = os.path.basename(sys._current_frames().values()[0].f_back.f_back.f_globals['__file__']) + callingTest = os.path.basename( + sys._current_frames().values()[0].f_back.f_back.f_globals["__file__"] + ) callingProc = sys._getframe(2).f_code.co_name with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile: - sys.stderr.write('\n'.join(addressSantizerLog.group(1).splitlines()) + '\n') + sys.stderr.write( + "\n".join(addressSantizerLog.group(1).splitlines()) + "\n" + ) addrSanFile.write("## Error: %s\n\n" % addressSantizerError.group(2)) - addrSanFile.write("### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n" % (callingTest, callingProc, router)) - addrSanFile.write(' '+ '\n '.join(addressSantizerLog.group(1).splitlines()) + '\n') + addrSanFile.write( + "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n" + % (callingTest, callingProc, router) + ) + addrSanFile.write( + " " + + "\n ".join(addressSantizerLog.group(1).splitlines()) + + "\n" + ) addrSanFile.write("\n---------------\n") return True return False + def addRouter(topo, name): "Adding a FRRouter (or Quagga) to Topology" - MyPrivateDirs = ['/etc/frr', - '/etc/quagga', - '/var/run/frr', - '/var/run/quagga', - '/var/log'] + MyPrivateDirs = [ + "/etc/frr", + "/etc/quagga", + "/var/run/frr", + "/var/run/quagga", + "/var/log", + ] if sys.platform.startswith("linux"): return topo.addNode(name, cls=LinuxRouter, privateDirs=MyPrivateDirs) elif sys.platform.startswith("freebsd"): return topo.addNode(name, cls=FreeBSDRouter, privateDirs=MyPrivateDirs) + def set_sysctl(node, sysctl, value): "Set a sysctl value and return None on success or an error string" - valuestr = '{}'.format(value) + valuestr = "{}".format(value) command = "sysctl {0}={1}".format(sysctl, valuestr) cmdret = node.cmd(command) - matches = re.search(r'([^ ]+) = ([^\s]+)', cmdret) + matches = re.search(r"([^ ]+) = ([^\s]+)", cmdret) if matches is None: return cmdret if matches.group(1) != sysctl: @@ -665,6 +741,7 @@ def set_sysctl(node, sysctl, value): return None + def assert_sysctl(node, sysctl, value): "Set and assert that the sysctl is set with the specified value." assert set_sysctl(node, sysctl, value) is None @@ -675,65 +752,81 @@ class Router(Node): def __init__(self, name, **params): super(Router, self).__init__(name, **params) - self.logdir = params.get('logdir') + self.logdir = params.get("logdir") # Backward compatibility: # Load configuration defaults like topogen. - self.config_defaults = configparser.ConfigParser({ - 'verbosity': 'info', - 'frrdir': '/usr/lib/frr', - 'quaggadir': '/usr/lib/quagga', - 'routertype': 'frr', - 'memleak_path': None, - }) + self.config_defaults = configparser.ConfigParser( + { + "verbosity": "info", + "frrdir": "/usr/lib/frr", + "quaggadir": "/usr/lib/quagga", + "routertype": "frr", + "memleak_path": None, + } + ) self.config_defaults.read( - os.path.join(os.path.dirname(os.path.realpath(__file__)), - '../pytest.ini') + os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini") ) # If this topology is using old API and doesn't have logdir # specified, then attempt to generate an unique logdir. if self.logdir is None: - cur_test = os.environ['PYTEST_CURRENT_TEST'] - self.logdir = ('/tmp/topotests/' + - cur_test[0:cur_test.find(".py")].replace('/', '.')) + cur_test = os.environ["PYTEST_CURRENT_TEST"] + self.logdir = "/tmp/topotests/" + cur_test[ + 0 : cur_test.find(".py") + ].replace("/", ".") # If the logdir is not created, then create it and set the # appropriated permissions. if not os.path.isdir(self.logdir): - os.system('mkdir -p ' + self.logdir + '/' + name) - os.system('chmod -R go+rw /tmp/topotests') + os.system("mkdir -p " + self.logdir + "/" + name) + os.system("chmod -R go+rw /tmp/topotests") self.daemondir = None self.hasmpls = False - self.routertype = 'frr' - self.daemons = {'zebra': 0, 'ripd': 0, 'ripngd': 0, 'ospfd': 0, - 'ospf6d': 0, 'isisd': 0, 'bgpd': 0, 'pimd': 0, - 'ldpd': 0, 'eigrpd': 0, 'nhrpd': 0, 'staticd': 0, - 'bfdd': 0, 'sharpd': 0} - self.daemons_options = {'zebra': ''} + self.routertype = "frr" + self.daemons = { + "zebra": 0, + "ripd": 0, + "ripngd": 0, + "ospfd": 0, + "ospf6d": 0, + "isisd": 0, + "bgpd": 0, + "pimd": 0, + "ldpd": 0, + "eigrpd": 0, + "nhrpd": 0, + "staticd": 0, + "bfdd": 0, + "sharpd": 0, + } + self.daemons_options = {"zebra": ""} self.reportCores = True self.version = None def _config_frr(self, **params): "Configure FRR binaries" - self.daemondir = params.get('frrdir') + self.daemondir = params.get("frrdir") if self.daemondir is None: - self.daemondir = self.config_defaults.get('topogen', 'frrdir') + self.daemondir = self.config_defaults.get("topogen", "frrdir") - zebra_path = os.path.join(self.daemondir, 'zebra') + zebra_path = os.path.join(self.daemondir, "zebra") if not os.path.isfile(zebra_path): raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path)) def _config_quagga(self, **params): "Configure Quagga binaries" - self.daemondir = params.get('quaggadir') + self.daemondir = params.get("quaggadir") if self.daemondir is None: - self.daemondir = self.config_defaults.get('topogen', 'quaggadir') + self.daemondir = self.config_defaults.get("topogen", "quaggadir") - zebra_path = os.path.join(self.daemondir, 'zebra') + zebra_path = os.path.join(self.daemondir, "zebra") if not os.path.isfile(zebra_path): - raise Exception("Quagga zebra binary doesn't exist at {}".format(zebra_path)) + raise Exception( + "Quagga zebra binary doesn't exist at {}".format(zebra_path) + ) # pylint: disable=W0221 # Some params are only meaningful for the parent class. @@ -741,28 +834,27 @@ class Router(Node): super(Router, self).config(**params) # User did not specify the daemons directory, try to autodetect it. - self.daemondir = params.get('daemondir') + self.daemondir = params.get("daemondir") if self.daemondir is None: - self.routertype = params.get('routertype', - self.config_defaults.get( - 'topogen', - 'routertype')) - if self.routertype == 'quagga': + self.routertype = params.get( + "routertype", self.config_defaults.get("topogen", "routertype") + ) + if self.routertype == "quagga": self._config_quagga(**params) else: self._config_frr(**params) else: # Test the provided path - zpath = os.path.join(self.daemondir, 'zebra') + zpath = os.path.join(self.daemondir, "zebra") if not os.path.isfile(zpath): - raise Exception('No zebra binary found in {}'.format(zpath)) + raise Exception("No zebra binary found in {}".format(zpath)) # Allow user to specify routertype when the path was specified. - if params.get('routertype') is not None: - self.routertype = params.get('routertype') + if params.get("routertype") is not None: + self.routertype = params.get("routertype") - self.cmd('ulimit -c unlimited') + self.cmd("ulimit -c unlimited") # Set ownership of config files - self.cmd('chown {0}:{0}vty /etc/{0}'.format(self.routertype)) + self.cmd("chown {0}:{0}vty /etc/{0}".format(self.routertype)) def terminate(self): # Delete Running Quagga or FRR Daemons @@ -772,62 +864,66 @@ class Router(Node): # self.cmd('kill -7 `cat %s`' % d.rstrip()) # self.waitOutput() # Disable forwarding - set_sysctl(self, 'net.ipv4.ip_forward', 0) - set_sysctl(self, 'net.ipv6.conf.all.forwarding', 0) + set_sysctl(self, "net.ipv4.ip_forward", 0) + set_sysctl(self, "net.ipv6.conf.all.forwarding", 0) super(Router, self).terminate() - os.system('chmod -R go+rw /tmp/topotests') + os.system("chmod -R go+rw /tmp/topotests") - def stopRouter(self, wait=True, assertOnError=True, minErrorVersion='5.1'): + def stopRouter(self, wait=True, assertOnError=True, minErrorVersion="5.1"): # Stop Running Quagga or FRR Daemons - rundaemons = self.cmd('ls -1 /var/run/%s/*.pid' % self.routertype) + rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype) errors = "" if re.search(r"No such file or directory", rundaemons): return errors if rundaemons is not None: numRunning = 0 for d in StringIO.StringIO(rundaemons): - daemonpid = self.cmd('cat %s' % d.rstrip()).rstrip() - if (daemonpid.isdigit() and pid_exists(int(daemonpid))): - logger.info('{}: stopping {}'.format( - self.name, - os.path.basename(d.rstrip().rsplit(".", 1)[0]) - )) - self.cmd('kill -TERM %s' % daemonpid) + daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip() + if daemonpid.isdigit() and pid_exists(int(daemonpid)): + logger.info( + "{}: stopping {}".format( + self.name, os.path.basename(d.rstrip().rsplit(".", 1)[0]) + ) + ) + self.cmd("kill -TERM %s" % daemonpid) self.waitOutput() if pid_exists(int(daemonpid)): numRunning += 1 if wait and numRunning > 0: - sleep(2, '{}: waiting for daemons stopping'.format(self.name)) + sleep(2, "{}: waiting for daemons stopping".format(self.name)) # 2nd round of kill if daemons didn't exit for d in StringIO.StringIO(rundaemons): - daemonpid = self.cmd('cat %s' % d.rstrip()).rstrip() - if (daemonpid.isdigit() and pid_exists(int(daemonpid))): - logger.info('{}: killing {}'.format( - self.name, - os.path.basename(d.rstrip().rsplit(".", 1)[0]) - )) - self.cmd('kill -7 %s' % daemonpid) + daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip() + if daemonpid.isdigit() and pid_exists(int(daemonpid)): + logger.info( + "{}: killing {}".format( + self.name, + os.path.basename(d.rstrip().rsplit(".", 1)[0]), + ) + ) + self.cmd("kill -7 %s" % daemonpid) self.waitOutput() - self.cmd('rm -- {}'.format(d.rstrip())) + self.cmd("rm -- {}".format(d.rstrip())) if wait: - errors = self.checkRouterCores(reportOnce=True) - if self.checkRouterVersion('<', minErrorVersion): - #ignore errors in old versions - errors = "" - if assertOnError and len(errors) > 0: - assert "Errors found - details follow:" == 0, errors + errors = self.checkRouterCores(reportOnce=True) + if self.checkRouterVersion("<", minErrorVersion): + # ignore errors in old versions + errors = "" + if assertOnError and len(errors) > 0: + assert "Errors found - details follow:" == 0, errors return errors def removeIPs(self): for interface in self.intfNames(): - self.cmd('ip address flush', interface) + self.cmd("ip address flush", interface) def checkCapability(self, daemon, param): if param is not None: daemon_path = os.path.join(self.daemondir, daemon) - daemon_search_option = param.replace('-','') - output = self.cmd('{0} -h | grep {1}'.format( - daemon_path, daemon_search_option)) + daemon_search_option = param.replace("-", "") + output = self.cmd( + "{0} -h | grep {1}".format(daemon_path, daemon_search_option) + ) if daemon_search_option not in output: return False return True @@ -839,74 +935,89 @@ class Router(Node): if param is not None: self.daemons_options[daemon] = param if source is None: - self.cmd('touch /etc/%s/%s.conf' % (self.routertype, daemon)) + self.cmd("touch /etc/%s/%s.conf" % (self.routertype, daemon)) self.waitOutput() else: - self.cmd('cp %s /etc/%s/%s.conf' % (source, self.routertype, daemon)) + self.cmd("cp %s /etc/%s/%s.conf" % (source, self.routertype, daemon)) self.waitOutput() - self.cmd('chmod 640 /etc/%s/%s.conf' % (self.routertype, daemon)) + self.cmd("chmod 640 /etc/%s/%s.conf" % (self.routertype, daemon)) self.waitOutput() - self.cmd('chown %s:%s /etc/%s/%s.conf' % (self.routertype, self.routertype, self.routertype, daemon)) + self.cmd( + "chown %s:%s /etc/%s/%s.conf" + % (self.routertype, self.routertype, self.routertype, daemon) + ) self.waitOutput() - if (daemon == 'zebra') and (self.daemons['staticd'] == 0): + if (daemon == "zebra") and (self.daemons["staticd"] == 0): # Add staticd with zebra - if it exists - staticd_path = os.path.join(self.daemondir, 'staticd') + staticd_path = os.path.join(self.daemondir, "staticd") if os.path.isfile(staticd_path): - self.daemons['staticd'] = 1 - self.daemons_options['staticd'] = '' + self.daemons["staticd"] = 1 + self.daemons_options["staticd"] = "" # Auto-Started staticd has no config, so it will read from zebra config else: - logger.info('No daemon {} known'.format(daemon)) + logger.info("No daemon {} known".format(daemon)) # print "Daemons after:", self.daemons def startRouter(self, tgen=None): # Disable integrated-vtysh-config - self.cmd('echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf' % self.routertype) - self.cmd('chown %s:%svty /etc/%s/vtysh.conf' % (self.routertype, self.routertype, self.routertype)) + self.cmd( + 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf' + % self.routertype + ) + self.cmd( + "chown %s:%svty /etc/%s/vtysh.conf" + % (self.routertype, self.routertype, self.routertype) + ) # TODO remove the following lines after all tests are migrated to Topogen. # Try to find relevant old logfiles in /tmp and delete them - map(os.remove, glob.glob('{}/{}/*.log'.format(self.logdir, self.name))) + map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name))) # Remove old core files - map(os.remove, glob.glob('{}/{}/*.dmp'.format(self.logdir, self.name))) + map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name))) # Remove IP addresses from OS first - we have them in zebra.conf self.removeIPs() # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher # No error - but return message and skip all the tests - if self.daemons['ldpd'] == 1: - ldpd_path = os.path.join(self.daemondir, 'ldpd') + if self.daemons["ldpd"] == 1: + ldpd_path = os.path.join(self.daemondir, "ldpd") if not os.path.isfile(ldpd_path): logger.info("LDP Test, but no ldpd compiled or installed") return "LDP Test, but no ldpd compiled or installed" - if version_cmp(platform.release(), '4.5') < 0: + if version_cmp(platform.release(), "4.5") < 0: logger.info("LDP Test need Linux Kernel 4.5 minimum") return "LDP Test need Linux Kernel 4.5 minimum" # Check if have mpls if tgen != None: self.hasmpls = tgen.hasmpls if self.hasmpls != True: - logger.info("LDP/MPLS Tests will be skipped, platform missing module(s)") + logger.info( + "LDP/MPLS Tests will be skipped, platform missing module(s)" + ) else: # Test for MPLS Kernel modules available self.hasmpls = False - if not module_present('mpls-router'): - logger.info('MPLS tests will not run (missing mpls-router kernel module)') - elif not module_present('mpls-iptunnel'): - logger.info('MPLS tests will not run (missing mpls-iptunnel kernel module)') + if not module_present("mpls-router"): + logger.info( + "MPLS tests will not run (missing mpls-router kernel module)" + ) + elif not module_present("mpls-iptunnel"): + logger.info( + "MPLS tests will not run (missing mpls-iptunnel kernel module)" + ) else: self.hasmpls = True if self.hasmpls != True: return "LDP/MPLS Tests need mpls kernel modules" - self.cmd('echo 100000 > /proc/sys/net/mpls/platform_labels') + self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels") - if self.daemons['eigrpd'] == 1: - eigrpd_path = os.path.join(self.daemondir, 'eigrpd') + if self.daemons["eigrpd"] == 1: + eigrpd_path = os.path.join(self.daemondir, "eigrpd") if not os.path.isfile(eigrpd_path): logger.info("EIGRP Test, but no eigrpd compiled or installed") return "EIGRP Test, but no eigrpd compiled or installed" - if self.daemons['bfdd'] == 1: - bfdd_path = os.path.join(self.daemondir, 'bfdd') + if self.daemons["bfdd"] == 1: + bfdd_path = os.path.join(self.daemondir, "bfdd") if not os.path.isfile(bfdd_path): logger.info("BFD Test, but no bfdd compiled or installed") return "BFD Test, but no bfdd compiled or installed" @@ -917,52 +1028,65 @@ class Router(Node): def restartRouter(self): # Starts actual daemons without init (ie restart) # cd to per node directory - self.cmd('cd {}/{}'.format(self.logdir, self.name)) - self.cmd('umask 000') - #Re-enable to allow for report per run + self.cmd("cd {}/{}".format(self.logdir, self.name)) + self.cmd("umask 000") + # Re-enable to allow for report per run self.reportCores = True if self.version == None: - self.version = self.cmd(os.path.join(self.daemondir, 'bgpd')+' -v').split()[2] - logger.info('{}: running version: {}'.format(self.name,self.version)) + self.version = self.cmd( + os.path.join(self.daemondir, "bgpd") + " -v" + ).split()[2] + logger.info("{}: running version: {}".format(self.name, self.version)) # Start Zebra first - if self.daemons['zebra'] == 1: - zebra_path = os.path.join(self.daemondir, 'zebra') - zebra_option = self.daemons_options['zebra'] - self.cmd('{0} {1} > zebra.out 2> zebra.err &'.format( - zebra_path, zebra_option, self.logdir, self.name - )) + if self.daemons["zebra"] == 1: + zebra_path = os.path.join(self.daemondir, "zebra") + zebra_option = self.daemons_options["zebra"] + self.cmd( + "{0} {1} > zebra.out 2> zebra.err &".format( + zebra_path, zebra_option, self.logdir, self.name + ) + ) self.waitOutput() - logger.debug('{}: {} zebra started'.format(self, self.routertype)) - sleep(1, '{}: waiting for zebra to start'.format(self.name)) + logger.debug("{}: {} zebra started".format(self, self.routertype)) + sleep(1, "{}: waiting for zebra to start".format(self.name)) # Start staticd next if required - if self.daemons['staticd'] == 1: - staticd_path = os.path.join(self.daemondir, 'staticd') - staticd_option = self.daemons_options['staticd'] - self.cmd('{0} {1} > staticd.out 2> staticd.err &'.format( - staticd_path, staticd_option, self.logdir, self.name - )) + if self.daemons["staticd"] == 1: + staticd_path = os.path.join(self.daemondir, "staticd") + staticd_option = self.daemons_options["staticd"] + self.cmd( + "{0} {1} > staticd.out 2> staticd.err &".format( + staticd_path, staticd_option, self.logdir, self.name + ) + ) self.waitOutput() - logger.debug('{}: {} staticd started'.format(self, self.routertype)) - # Fix Link-Local Addresses + logger.debug("{}: {} staticd started".format(self, self.routertype)) + # Fix Link-Local Addresses # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this - self.cmd('for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS=\':\'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done') + self.cmd( + "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done" + ) # Now start all the other daemons for daemon in self.daemons: # Skip disabled daemons and zebra - if self.daemons[daemon] == 0 or daemon == 'zebra' or daemon == 'staticd': + if self.daemons[daemon] == 0 or daemon == "zebra" or daemon == "staticd": continue daemon_path = os.path.join(self.daemondir, daemon) - self.cmd('{0} {1} > {2}.out 2> {2}.err &'.format( - daemon_path, self.daemons_options.get(daemon, ''), daemon - )) + self.cmd( + "{0} {1} > {2}.out 2> {2}.err &".format( + daemon_path, self.daemons_options.get(daemon, ""), daemon + ) + ) self.waitOutput() - logger.debug('{}: {} {} started'.format(self, self.routertype, daemon)) + logger.debug("{}: {} {} started".format(self, self.routertype, daemon)) + def getStdErr(self, daemon): - return self.getLog('err', daemon) + return self.getLog("err", daemon) + def getStdOut(self, daemon): - return self.getLog('out', daemon) + return self.getLog("out", daemon) + def getLog(self, log, daemon): - return self.cmd('cat {}/{}/{}.{}'.format(self.logdir, self.name, daemon, log)) + return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log)) def checkRouterCores(self, reportLeaks=True, reportOnce=False): if reportOnce and not self.reportCores: @@ -970,33 +1094,62 @@ class Router(Node): reportMade = False traces = "" for daemon in self.daemons: - if (self.daemons[daemon] == 1): + if self.daemons[daemon] == 1: # Look for core file - corefiles = glob.glob('{}/{}/{}_core*.dmp'.format( - self.logdir, self.name, daemon)) - if (len(corefiles) > 0): + corefiles = glob.glob( + "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon) + ) + if len(corefiles) > 0: daemon_path = os.path.join(self.daemondir, daemon) - backtrace = subprocess.check_output([ - "gdb {} {} --batch -ex bt 2> /dev/null".format(daemon_path, corefiles[0]) - ], shell=True) - sys.stderr.write("\n%s: %s crashed. Core file found - Backtrace follows:\n" % (self.name, daemon)) + backtrace = subprocess.check_output( + [ + "gdb {} {} --batch -ex bt 2> /dev/null".format( + daemon_path, corefiles[0] + ) + ], + shell=True, + ) + sys.stderr.write( + "\n%s: %s crashed. Core file found - Backtrace follows:\n" + % (self.name, daemon) + ) sys.stderr.write("%s" % backtrace) - traces = traces + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s" % (self.name, daemon, backtrace) + traces = ( + traces + + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s" + % (self.name, daemon, backtrace) + ) reportMade = True elif reportLeaks: log = self.getStdErr(daemon) if "memstats" in log: - sys.stderr.write("%s: %s has memory leaks:\n" % (self.name, daemon)) - traces = traces + "\n%s: %s has memory leaks:\n" % (self.name, daemon) + sys.stderr.write( + "%s: %s has memory leaks:\n" % (self.name, daemon) + ) + traces = traces + "\n%s: %s has memory leaks:\n" % ( + self.name, + daemon, + ) log = re.sub("core_handler: ", "", log) - log = re.sub(r"(showing active allocations in memory group [a-zA-Z0-9]+)", r"\n ## \1", log) + log = re.sub( + r"(showing active allocations in memory group [a-zA-Z0-9]+)", + r"\n ## \1", + log, + ) log = re.sub("memstats: ", " ", log) sys.stderr.write(log) reportMade = True # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found - if checkAddressSanitizerError(self.getStdErr(daemon), self.name, daemon): - sys.stderr.write("%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon)) - traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon) + if checkAddressSanitizerError( + self.getStdErr(daemon), self.name, daemon + ): + sys.stderr.write( + "%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon) + ) + traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % ( + self.name, + daemon, + ) reportMade = True if reportMade: self.reportCores = False @@ -1007,7 +1160,9 @@ class Router(Node): global fatal_error - daemonsRunning = self.cmd('vtysh -c "show logging" | grep "Logging configuration for"') + daemonsRunning = self.cmd( + 'vtysh -c "show logging" | grep "Logging configuration for"' + ) # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"): return "%s: vtysh killed by AddressSanitizer" % (self.name) @@ -1016,32 +1171,59 @@ class Router(Node): if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning): sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon)) if daemon is "staticd": - sys.stderr.write("You may have a copy of staticd installed but are attempting to test against\n") - sys.stderr.write("a version of FRR that does not have staticd, please cleanup the install dir\n") + sys.stderr.write( + "You may have a copy of staticd installed but are attempting to test against\n" + ) + sys.stderr.write( + "a version of FRR that does not have staticd, please cleanup the install dir\n" + ) # Look for core file - corefiles = glob.glob('{}/{}/{}_core*.dmp'.format( - self.logdir, self.name, daemon)) - if (len(corefiles) > 0): + corefiles = glob.glob( + "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon) + ) + if len(corefiles) > 0: daemon_path = os.path.join(self.daemondir, daemon) - backtrace = subprocess.check_output([ - "gdb {} {} --batch -ex bt 2> /dev/null".format(daemon_path, corefiles[0]) - ], shell=True) - sys.stderr.write("\n%s: %s crashed. Core file found - Backtrace follows:\n" % (self.name, daemon)) + backtrace = subprocess.check_output( + [ + "gdb {} {} --batch -ex bt 2> /dev/null".format( + daemon_path, corefiles[0] + ) + ], + shell=True, + ) + sys.stderr.write( + "\n%s: %s crashed. Core file found - Backtrace follows:\n" + % (self.name, daemon) + ) sys.stderr.write("%s\n" % backtrace) else: # No core found - If we find matching logfile in /tmp, then print last 20 lines from it. - if os.path.isfile('{}/{}/{}.log'.format(self.logdir, self.name, daemon)): - log_tail = subprocess.check_output([ - "tail -n20 {}/{}/{}.log 2> /dev/null".format( - self.logdir, self.name, daemon) - ], shell=True) - sys.stderr.write("\nFrom %s %s %s log file:\n" % (self.routertype, self.name, daemon)) + if os.path.isfile( + "{}/{}/{}.log".format(self.logdir, self.name, daemon) + ): + log_tail = subprocess.check_output( + [ + "tail -n20 {}/{}/{}.log 2> /dev/null".format( + self.logdir, self.name, daemon + ) + ], + shell=True, + ) + sys.stderr.write( + "\nFrom %s %s %s log file:\n" + % (self.routertype, self.name, daemon) + ) sys.stderr.write("%s\n" % log_tail) # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found - if checkAddressSanitizerError(self.getStdErr(daemon), self.name, daemon): - return "%s: Daemon %s not running - killed by AddressSanitizer" % (self.name, daemon) + if checkAddressSanitizerError( + self.getStdErr(daemon), self.name, daemon + ): + return "%s: Daemon %s not running - killed by AddressSanitizer" % ( + self.name, + daemon, + ) return "%s: Daemon %s not running" % (self.name, daemon) return "" @@ -1061,25 +1243,27 @@ class Router(Node): # Make sure we have version information first if self.version == None: - self.version = self.cmd(os.path.join(self.daemondir, 'bgpd')+' -v').split()[2] - logger.info('{}: running version: {}'.format(self.name,self.version)) + self.version = self.cmd( + os.path.join(self.daemondir, "bgpd") + " -v" + ).split()[2] + logger.info("{}: running version: {}".format(self.name, self.version)) rversion = self.version if rversion is None: return False result = version_cmp(rversion, version) - if cmpop == '>=': + if cmpop == ">=": return result >= 0 - if cmpop == '>': + if cmpop == ">": return result > 0 - if cmpop == '=': + if cmpop == "=": return result == 0 - if cmpop == '<': + if cmpop == "<": return result < 0 - if cmpop == '<': + if cmpop == "<": return result < 0 - if cmpop == '<=': + if cmpop == "<=": return result <= 0 def get_ipv6_linklocal(self): @@ -1087,37 +1271,41 @@ class Router(Node): linklocal = [] - ifaces = self.cmd('ip -6 address') + ifaces = self.cmd("ip -6 address") # Fix newlines (make them all the same) - ifaces = ('\n'.join(ifaces.splitlines()) + '\n').splitlines() - interface="" - ll_per_if_count=0 + ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines() + interface = "" + ll_per_if_count = 0 for line in ifaces: - m = re.search('[0-9]+: ([^:@]+)[@if0-9:]+ <', line) + m = re.search("[0-9]+: ([^:@]+)[@if0-9:]+ <", line) if m: interface = m.group(1) ll_per_if_count = 0 - m = re.search('inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link', line) + m = re.search( + "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link", + line, + ) if m: local = m.group(1) ll_per_if_count += 1 - if (ll_per_if_count > 1): + if ll_per_if_count > 1: linklocal += [["%s-%s" % (interface, ll_per_if_count), local]] else: linklocal += [[interface, local]] return linklocal + def daemon_available(self, daemon): "Check if specified daemon is installed (and for ldp if kernel supports MPLS)" daemon_path = os.path.join(self.daemondir, daemon) if not os.path.isfile(daemon_path): return False - if (daemon == 'ldpd'): - if version_cmp(platform.release(), '4.5') < 0: + if daemon == "ldpd": + if version_cmp(platform.release(), "4.5") < 0: return False - if not module_present('mpls-router', load=False): + if not module_present("mpls-router", load=False): return False - if not module_present('mpls-iptunnel', load=False): + if not module_present("mpls-iptunnel", load=False): return False return True @@ -1125,18 +1313,20 @@ class Router(Node): "Return the type of Router (frr or quagga)" return self.routertype + def report_memory_leaks(self, filename_prefix, testscript): "Report Memory Leaks to file prefixed with given string" leakfound = False filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt" for daemon in self.daemons: - if (self.daemons[daemon] == 1): + if self.daemons[daemon] == 1: log = self.getStdErr(daemon) if "memstats" in log: # Found memory leak - logger.info('\nRouter {} {} StdErr Log:\n{}'.format( - self.name, daemon, log)) + logger.info( + "\nRouter {} {} StdErr Log:\n{}".format(self.name, daemon, log) + ) if not leakfound: leakfound = True # Check if file already exists @@ -1144,17 +1334,25 @@ class Router(Node): leakfile = open(filename, "a") if not fileexists: # New file - add header - leakfile.write("# Memory Leak Detection for topotest %s\n\n" % testscript) + leakfile.write( + "# Memory Leak Detection for topotest %s\n\n" + % testscript + ) leakfile.write("## Router %s\n" % self.name) leakfile.write("### Process %s\n" % daemon) log = re.sub("core_handler: ", "", log) - log = re.sub(r"(showing active allocations in memory group [a-zA-Z0-9]+)", r"\n#### \1\n", log) + log = re.sub( + r"(showing active allocations in memory group [a-zA-Z0-9]+)", + r"\n#### \1\n", + log, + ) log = re.sub("memstats: ", " ", log) leakfile.write(log) leakfile.write("\n") if leakfound: leakfile.close() + class LinuxRouter(Router): "A Linux Router Node with IPv4/IPv6 forwarding enabled." @@ -1164,25 +1362,26 @@ class LinuxRouter(Router): def config(self, **params): Router.config(self, **params) # Enable forwarding on the router - assert_sysctl(self, 'net.ipv4.ip_forward', 1) - assert_sysctl(self, 'net.ipv6.conf.all.forwarding', 1) + assert_sysctl(self, "net.ipv4.ip_forward", 1) + assert_sysctl(self, "net.ipv6.conf.all.forwarding", 1) # Enable coredumps - assert_sysctl(self, 'kernel.core_uses_pid', 1) - assert_sysctl(self, 'fs.suid_dumpable', 1) - #this applies to the kernel not the namespace... - #original on ubuntu 17.x, but apport won't save as in namespace + assert_sysctl(self, "kernel.core_uses_pid", 1) + assert_sysctl(self, "fs.suid_dumpable", 1) + # this applies to the kernel not the namespace... + # original on ubuntu 17.x, but apport won't save as in namespace # |/usr/share/apport/apport %p %s %c %d %P - corefile = '%e_core-sig_%s-pid_%p.dmp' - assert_sysctl(self, 'kernel.core_pattern', corefile) + corefile = "%e_core-sig_%s-pid_%p.dmp" + assert_sysctl(self, "kernel.core_pattern", corefile) def terminate(self): """ Terminate generic LinuxRouter Mininet instance """ - set_sysctl(self, 'net.ipv4.ip_forward', 0) - set_sysctl(self, 'net.ipv6.conf.all.forwarding', 0) + set_sysctl(self, "net.ipv4.ip_forward", 0) + set_sysctl(self, "net.ipv6.conf.all.forwarding", 0) Router.terminate(self) + class FreeBSDRouter(Router): "A FreeBSD Router Node with IPv4/IPv6 forwarding enabled." @@ -1194,5 +1393,5 @@ class LegacySwitch(OVSSwitch): "A Legacy Switch without OpenFlow" def __init__(self, name, **params): - OVSSwitch.__init__(self, name, failMode='standalone', **params) + OVSSwitch.__init__(self, name, failMode="standalone", **params) self.switchIP = None diff --git a/tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py b/tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py index 56cd42ea57..92cebfe0b6 100755 --- a/tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py +++ b/tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py @@ -32,53 +32,56 @@ from functools import partial # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Required to instantiate the topology builder class. from mininet.topo import Topo + # Import topogen and topotest helpers from lib import topotest from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger + # and Finally pytest import pytest class OspfSrTopo(Topo): "Test topology builder" + def build(self): "Build function" tgen = get_topogen(self) # Check for mpls if tgen.hasmpls is not True: - tgen.set_error('MPLS not available, tests will be skipped') + tgen.set_error("MPLS not available, tests will be skipped") # Create 4 routers for routern in range(1, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # Interconect router 1 and 2 - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) # Interconect router 3 and 2 - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r3']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r2"]) # Interconect router 4 and 2 - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r4']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r4"]) + switch.add_link(tgen.gears["r2"]) def setup_module(mod): "Sets up the pytest environment" - logger.info('\n\n---- Starting OSPF Segment Routing tests ----\n') + logger.info("\n\n---- Starting OSPF Segment Routing tests ----\n") tgen = Topogen(OspfSrTopo, mod.__name__) tgen.start_topology() @@ -87,12 +90,10 @@ def setup_module(mod): for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_OSPF, - os.path.join(CWD, '{}/ospfd.conf'.format(rname)) + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) ) # Initialize all routers. @@ -101,14 +102,15 @@ def setup_module(mod): # Verify that version, MPLS and Segment Routing are OK for router in router_list.values(): # Check for Version - if router.has_version('<', '4'): - tgen.set_error('Unsupported FRR version') + if router.has_version("<", "4"): + tgen.set_error("Unsupported FRR version") break # Check that Segment Routing is available output = tgen.gears[router.name].vtysh_cmd( - "show ip ospf database segment-routing json") + "show ip ospf database segment-routing json" + ) if output.find("Unknown") != -1: - tgen.set_error('Segment Routing is not available') + tgen.set_error("Segment Routing is not available") def teardown_module(mod): @@ -117,7 +119,8 @@ def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() - logger.info('\n\n---- OSPF Segment Routing tests End ----\n') + logger.info("\n\n---- OSPF Segment Routing tests End ----\n") + # Shared test function to validate expected output. def compare_ospf_srdb(rname, expected): @@ -126,11 +129,10 @@ def compare_ospf_srdb(rname, expected): and compare the obtained result with the expected output. """ tgen = get_topogen() - current = tgen.gears[rname].vtysh_cmd( - 'show ip ospf database segment-routing json') - return topotest.difflines(current, expected, - title1="Current output", - title2="Expected output") + current = tgen.gears[rname].vtysh_cmd("show ip ospf database segment-routing json") + return topotest.difflines( + current, expected, title1="Current output", title2="Expected output" + ) def compare_mpls_table(rname, expected): @@ -139,10 +141,10 @@ def compare_mpls_table(rname, expected): result with the expected output. """ tgen = get_topogen() - current = tgen.gears[rname].vtysh_cmd('show mpls table json') - return topotest.difflines(current, expected, - title1="Current output", - title2="Expected output") + current = tgen.gears[rname].vtysh_cmd("show mpls table json") + return topotest.difflines( + current, expected, title1="Current output", title2="Expected output" + ) def test_ospf_sr(): @@ -151,24 +153,23 @@ def test_ospf_sr(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('--- test OSPF Segment Routing Data Base ---') + logger.info("--- test OSPF Segment Routing Data Base ---") for rnum in range(1, 5): - router = 'r{}'.format(rnum) + router = "r{}".format(rnum) logger.info('\tRouter "%s"', router) # Load expected results from the command - reffile = os.path.join(CWD, '{}/ospf_srdb.json'.format(router)) + reffile = os.path.join(CWD, "{}/ospf_srdb.json".format(router)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 60 seconds. test_func = partial(compare_ospf_srdb, router, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=25, wait=3) - assert result, ( - 'OSPF did not start Segment Routing on {}:\n{}' - ).format(router, diff) + result, diff = topotest.run_and_expect(test_func, "", count=25, wait=3) + assert result, ("OSPF did not start Segment Routing on {}:\n{}").format( + router, diff + ) def test_ospf_kernel_route(): @@ -177,34 +178,34 @@ def test_ospf_kernel_route(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('--- test OSPF Segment Routing MPLS tables ---') + logger.info("--- test OSPF Segment Routing MPLS tables ---") for rnum in range(1, 5): - router = 'r{}'.format(rnum) + router = "r{}".format(rnum) logger.info('\tRouter "%s"', router) # Load expected results from the command - reffile = os.path.join(CWD, '{}/zebra_mpls.json'.format(router)) + reffile = os.path.join(CWD, "{}/zebra_mpls.json".format(router)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 60 seconds. test_func = partial(compare_mpls_table, router, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=25, wait=3) - assert result, ( - 'OSPF did not properly instal MPLS table on {}:\n{}' - ).format(router, diff) + result, diff = topotest.run_and_expect(test_func, "", count=25, wait=3) + assert result, ("OSPF did not properly instal MPLS table on {}:\n{}").format( + router, diff + ) def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospf-topo1-vrf/r1/zebraroute.txt b/tests/topotests/ospf-topo1-vrf/r1/zebraroute.txt index 973db543fa..d72aa3b8e5 100644 --- a/tests/topotests/ospf-topo1-vrf/r1/zebraroute.txt +++ b/tests/topotests/ospf-topo1-vrf/r1/zebraroute.txt @@ -1,8 +1,8 @@ VRF r1-cust1: -O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, XX:XX:XX +O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX C>* 10.0.1.0/24 is directly connected, r1-eth0, XX:XX:XX -O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r1-eth1, XX:XX:XX -O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, XX:XX:XX +O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r1-eth1, weight 1, XX:XX:XX +O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, weight 1, XX:XX:XX C>* 10.0.3.0/24 is directly connected, r1-eth1, XX:XX:XX -O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r1-eth1, XX:XX:XX +O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r1-eth1, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf-topo1-vrf/r1/zebraroutedown.txt b/tests/topotests/ospf-topo1-vrf/r1/zebraroutedown.txt index 7bdccd0909..5ea6bdc04d 100644 --- a/tests/topotests/ospf-topo1-vrf/r1/zebraroutedown.txt +++ b/tests/topotests/ospf-topo1-vrf/r1/zebraroutedown.txt @@ -1,7 +1,7 @@ VRF r1-cust1: -O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, XX:XX:XX +O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX C>* 10.0.1.0/24 is directly connected, r1-eth0, XX:XX:XX -O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r1-eth1, XX:XX:XX -O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, XX:XX:XX +O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r1-eth1, weight 1, XX:XX:XX +O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, weight 1, XX:XX:XX C>* 10.0.3.0/24 is directly connected, r1-eth1, XX:XX:XX diff --git a/tests/topotests/ospf-topo1-vrf/r2/zebraroute.txt b/tests/topotests/ospf-topo1-vrf/r2/zebraroute.txt index 2916cb9274..ce5e5f3bab 100644 --- a/tests/topotests/ospf-topo1-vrf/r2/zebraroute.txt +++ b/tests/topotests/ospf-topo1-vrf/r2/zebraroute.txt @@ -1,8 +1,8 @@ VRF r2-cust1: -O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, XX:XX:XX -O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, XX:XX:XX +O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, weight 1, XX:XX:XX +O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX C>* 10.0.2.0/24 is directly connected, r2-eth0, XX:XX:XX -O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, XX:XX:XX +O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, weight 1, XX:XX:XX C>* 10.0.3.0/24 is directly connected, r2-eth1, XX:XX:XX -O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r2-eth1, XX:XX:XX +O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r2-eth1, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf-topo1-vrf/r2/zebraroutedown.txt b/tests/topotests/ospf-topo1-vrf/r2/zebraroutedown.txt index ccaf9abc31..157811ec77 100644 --- a/tests/topotests/ospf-topo1-vrf/r2/zebraroutedown.txt +++ b/tests/topotests/ospf-topo1-vrf/r2/zebraroutedown.txt @@ -1,7 +1,7 @@ VRF r2-cust1: -O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, XX:XX:XX -O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, XX:XX:XX +O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, weight 1, XX:XX:XX +O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX C>* 10.0.2.0/24 is directly connected, r2-eth0, XX:XX:XX -O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, XX:XX:XX +O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, weight 1, XX:XX:XX C>* 10.0.3.0/24 is directly connected, r2-eth1, XX:XX:XX diff --git a/tests/topotests/ospf-topo1-vrf/r3/zebraroute.txt b/tests/topotests/ospf-topo1-vrf/r3/zebraroute.txt index 70eae0a9fb..f40b7b09af 100644 --- a/tests/topotests/ospf-topo1-vrf/r3/zebraroute.txt +++ b/tests/topotests/ospf-topo1-vrf/r3/zebraroute.txt @@ -1,8 +1,8 @@ VRF r3-cust1: -O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r3-eth0, XX:XX:XX -O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r3-eth0, XX:XX:XX -O 10.0.3.0/24 [110/10] is directly connected, r3-eth0, XX:XX:XX +O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r3-eth0, weight 1, XX:XX:XX +O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r3-eth0, weight 1, XX:XX:XX +O 10.0.3.0/24 [110/10] is directly connected, r3-eth0, weight 1, XX:XX:XX C>* 10.0.3.0/24 is directly connected, r3-eth0, XX:XX:XX -O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, XX:XX:XX +O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, weight 1, XX:XX:XX C>* 10.0.10.0/24 is directly connected, r3-eth1, XX:XX:XX diff --git a/tests/topotests/ospf-topo1-vrf/r3/zebraroutedown.txt b/tests/topotests/ospf-topo1-vrf/r3/zebraroutedown.txt index 6d54782eff..89cd6f56c4 100644 --- a/tests/topotests/ospf-topo1-vrf/r3/zebraroutedown.txt +++ b/tests/topotests/ospf-topo1-vrf/r3/zebraroutedown.txt @@ -1,4 +1,4 @@ VRF r3-cust1: -O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, XX:XX:XX +O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, weight 1, XX:XX:XX C>* 10.0.10.0/24 is directly connected, r3-eth1, XX:XX:XX diff --git a/tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py b/tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py index fc4854454c..130d0c85f9 100755 --- a/tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py +++ b/tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py @@ -34,7 +34,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -45,33 +45,35 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class OSPFTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 3 routers for routern in range(1, 4): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # Create a empty network for router 1 - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) # Create a empty network for router 2 - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) # Interconect router 1, 2 and 3 - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) # Create empty netowrk for router3 - switch = tgen.add_switch('s4') - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["r3"]) def setup_module(mod): @@ -83,23 +85,26 @@ def setup_module(mod): # check for zebra capability for rname, router in router_list.iteritems(): - if router.check_capability( - TopoRouter.RD_ZEBRA, - '--vrfwnetns' - ) == False: - return pytest.skip('Skipping OSPF VRF NETNS feature. VRF NETNS backend not available on FRR') - - if os.system('ip netns list') != 0: - return pytest.skip('Skipping OSPF VRF NETNS Test. NETNS not available on System') + if router.check_capability(TopoRouter.RD_ZEBRA, "--vrfwnetns") == False: + return pytest.skip( + "Skipping OSPF VRF NETNS feature. VRF NETNS backend not available on FRR" + ) + + if os.system("ip netns list") != 0: + return pytest.skip( + "Skipping OSPF VRF NETNS Test. NETNS not available on System" + ) - logger.info('Testing with VRF Namespace support') + logger.info("Testing with VRF Namespace support") - cmds = ['if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi', - 'ip netns add {0}-cust1', - 'ip link set dev {0}-eth0 netns {0}-cust1', - 'ip netns exec {0}-cust1 ifconfig {0}-eth0 up', - 'ip link set dev {0}-eth1 netns {0}-cust1', - 'ip netns exec {0}-cust1 ifconfig {0}-eth1 up'] + cmds = [ + "if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi", + "ip netns add {0}-cust1", + "ip link set dev {0}-eth0 netns {0}-cust1", + "ip netns exec {0}-cust1 ifconfig {0}-eth0 up", + "ip link set dev {0}-eth1 netns {0}-cust1", + "ip netns exec {0}-cust1 ifconfig {0}-eth1 up", + ] for rname, router in router_list.iteritems(): @@ -109,19 +114,18 @@ def setup_module(mod): router.load_config( TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)), - '--vrfwnetns' + os.path.join(CWD, "{}/zebra.conf".format(rname)), + "--vrfwnetns", ) router.load_config( - TopoRouter.RD_OSPF, - os.path.join(CWD, '{}/ospfd.conf'.format(rname)) + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) ) # Initialize all routers. tgen.start_router() for router in router_list.values(): - if router.has_version('<', '4.0'): - tgen.set_error('unsupported version') + if router.has_version("<", "4.0"): + tgen.set_error("unsupported version") def teardown_module(mod): @@ -130,16 +134,19 @@ def teardown_module(mod): # move back rx-eth0 to default VRF # delete rx-vrf - cmds = ['ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1', - 'ip netns exec {0}-cust1 ip link set {0}-eth1 netns 1', - 'ip netns delete {0}-cust1'] - + cmds = [ + "ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1", + "ip netns exec {0}-cust1 ip link set {0}-eth1 netns 1", + "ip netns delete {0}-cust1", + ] + router_list = tgen.routers() for rname, router in router_list.iteritems(): for cmd in cmds: tgen.net[rname].cmd(cmd.format(rname)) tgen.stop_topology() + # Shared test function to validate expected output. def compare_show_ip_route_vrf(rname, expected): """ @@ -147,35 +154,37 @@ def compare_show_ip_route_vrf(rname, expected): result with the expected output. """ tgen = get_topogen() - vrf_name = '{0}-cust1'.format(rname) + vrf_name = "{0}-cust1".format(rname) current = topotest.ip4_route_zebra(tgen.gears[rname], vrf_name) - ret = topotest.difflines(current, expected, - title1="Current output", - title2="Expected output") + ret = topotest.difflines( + current, expected, title1="Current output", title2="Expected output" + ) return ret + def test_ospf_convergence(): "Test OSPF daemon convergence" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") for rname, router in tgen.routers().iteritems(): logger.info('Waiting for router "%s" convergence', rname) # Load expected results from the command - reffile = os.path.join(CWD, '{}/ospfroute.txt'.format(rname)) + reffile = os.path.join(CWD, "{}/ospfroute.txt".format(rname)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 60 seconds. - test_func = partial(topotest.router_output_cmp, - router, - 'show ip ospf vrf {0}-cust1 route'.format(rname), - expected) - result, diff = topotest.run_and_expect(test_func, '', - count=160, wait=0.5) - assertmsg = 'OSPF did not converge on {}:\n{}'.format(rname, diff) + test_func = partial( + topotest.router_output_cmp, + router, + "show ip ospf vrf {0}-cust1 route".format(rname), + expected, + ) + result, diff = topotest.run_and_expect(test_func, "", count=160, wait=0.5) + assertmsg = "OSPF did not converge on {}:\n{}".format(rname, diff) assert result, assertmsg @@ -184,19 +193,19 @@ def test_ospf_kernel_route(): tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") rlist = tgen.routers().values() for router in rlist: logger.info('Checking OSPF IPv4 kernel routes in "%s"', router.name) - reffile = os.path.join(CWD, '{}/zebraroute.txt'.format(router.name)) + reffile = os.path.join(CWD, "{}/zebraroute.txt".format(router.name)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 60 seconds. test_func = partial(compare_show_ip_route_vrf, router.name, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=140, wait=0.5) + result, diff = topotest.run_and_expect(test_func, "", count=140, wait=0.5) assertmsg = 'OSPF IPv4 route mismatch in router "{}": {}'.format( - router.name, diff) + router.name, diff + ) assert result, assertmsg @@ -205,52 +214,57 @@ def test_ospf_json(): tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") for rname, router in tgen.routers().iteritems(): - logger.info('Comparing router "%s" "show ip ospf vrf %s-cust1 json" output', router.name, router.name) + logger.info( + 'Comparing router "%s" "show ip ospf vrf %s-cust1 json" output', + router.name, + router.name, + ) expected = { - '{}-cust1'.format(router.name) : { - 'vrfName': '{}-cust1'.format(router.name), - 'routerId': '10.0.255.{}'.format(rname[1:]), - 'tosRoutesOnly': True, - 'rfc2328Conform': True, - 'spfScheduleDelayMsecs': 0, - 'holdtimeMinMsecs': 50, - 'holdtimeMaxMsecs': 5000, - 'lsaMinIntervalMsecs': 5000, - 'lsaMinArrivalMsecs': 1000, - 'writeMultiplier': 20, - 'refreshTimerMsecs': 10000, - 'asbrRouter': 'injectingExternalRoutingInformation', - 'attachedAreaCounter': 1, - 'areas': {} - } + "{}-cust1".format(router.name): { + "vrfName": "{}-cust1".format(router.name), + "routerId": "10.0.255.{}".format(rname[1:]), + "tosRoutesOnly": True, + "rfc2328Conform": True, + "spfScheduleDelayMsecs": 0, + "holdtimeMinMsecs": 50, + "holdtimeMaxMsecs": 5000, + "lsaMinIntervalMsecs": 5000, + "lsaMinArrivalMsecs": 1000, + "writeMultiplier": 20, + "refreshTimerMsecs": 10000, + "asbrRouter": "injectingExternalRoutingInformation", + "attachedAreaCounter": 1, + "areas": {}, } + } # Area specific additional checks - if router.name == 'r1' or router.name == 'r2' or router.name == 'r3': - expected['{}-cust1'.format(router.name)]['areas']['0.0.0.0'] = { - 'areaIfActiveCounter': 2, - 'areaIfTotalCounter': 2, - 'authentication': 'authenticationNone', - 'backbone': True, - 'lsaAsbrNumber': 0, - 'lsaNetworkNumber': 1, - 'lsaNssaNumber': 0, - 'lsaNumber': 4, - 'lsaOpaqueAreaNumber': 0, - 'lsaOpaqueLinkNumber': 0, - 'lsaRouterNumber': 3, - 'lsaSummaryNumber': 0, - 'nbrFullAdjacentCounter': 2, + if router.name == "r1" or router.name == "r2" or router.name == "r3": + expected["{}-cust1".format(router.name)]["areas"]["0.0.0.0"] = { + "areaIfActiveCounter": 2, + "areaIfTotalCounter": 2, + "authentication": "authenticationNone", + "backbone": True, + "lsaAsbrNumber": 0, + "lsaNetworkNumber": 1, + "lsaNssaNumber": 0, + "lsaNumber": 4, + "lsaOpaqueAreaNumber": 0, + "lsaOpaqueLinkNumber": 0, + "lsaRouterNumber": 3, + "lsaSummaryNumber": 0, + "nbrFullAdjacentCounter": 2, } - test_func = partial(topotest.router_json_cmp, - router, - 'show ip ospf vrf {0}-cust1 json'.format(rname), - expected) - _, diff = topotest.run_and_expect(test_func, None, - count=10, wait=0.5) + test_func = partial( + topotest.router_json_cmp, + router, + "show ip ospf vrf {0}-cust1 json".format(rname), + expected, + ) + _, diff = topotest.run_and_expect(test_func, None, count=10, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(rname) assert diff is None, assertmsg @@ -260,27 +274,30 @@ def test_ospf_link_down(): tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") # Simulate a network down event on router3 switch3 interface. - router3 = tgen.gears['r3'] - topotest.interface_set_status(router3, 'r3-eth0', ifaceaction=False, vrf_name='r3-cust1') + router3 = tgen.gears["r3"] + topotest.interface_set_status( + router3, "r3-eth0", ifaceaction=False, vrf_name="r3-cust1" + ) # Expect convergence on all routers for rname, router in tgen.routers().iteritems(): logger.info('Waiting for router "%s" convergence after link failure', rname) # Load expected results from the command - reffile = os.path.join(CWD, '{}/ospfroute_down.txt'.format(rname)) + reffile = os.path.join(CWD, "{}/ospfroute_down.txt".format(rname)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 60 seconds. - test_func = partial(topotest.router_output_cmp, - router, - 'show ip ospf vrf {0}-cust1 route'.format(rname), - expected) - result, diff = topotest.run_and_expect(test_func, '', - count=140, wait=0.5) - assertmsg = 'OSPF did not converge on {}:\n{}'.format(rname, diff) + test_func = partial( + topotest.router_output_cmp, + router, + "show ip ospf vrf {0}-cust1 route".format(rname), + expected, + ) + result, diff = topotest.run_and_expect(test_func, "", count=140, wait=0.5) + assertmsg = "OSPF did not converge on {}:\n{}".format(rname, diff) assert result, assertmsg @@ -289,21 +306,23 @@ def test_ospf_link_down_kernel_route(): tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") rlist = tgen.routers().values() for router in rlist: - logger.info('Checking OSPF IPv4 kernel routes in "%s" after link down', router.name) + logger.info( + 'Checking OSPF IPv4 kernel routes in "%s" after link down', router.name + ) - str='{0}-cust1'.format(router.name) - reffile = os.path.join(CWD, '{}/zebraroutedown.txt'.format(router.name)) + str = "{0}-cust1".format(router.name) + reffile = os.path.join(CWD, "{}/zebraroutedown.txt".format(router.name)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 60 seconds. test_func = partial(compare_show_ip_route_vrf, router.name, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=140, wait=0.5) + result, diff = topotest.run_and_expect(test_func, "", count=140, wait=0.5) assertmsg = 'OSPF IPv4 route mismatch in router "{}" after link down: {}'.format( - router.name, diff) + router.name, diff + ) assert result, assertmsg @@ -311,10 +330,11 @@ def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospf-topo1/test_ospf_topo1.py b/tests/topotests/ospf-topo1/test_ospf_topo1.py index 638e394153..d734f378e7 100755 --- a/tests/topotests/ospf-topo1/test_ospf_topo1.py +++ b/tests/topotests/ospf-topo1/test_ospf_topo1.py @@ -34,7 +34,7 @@ import pytest # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -45,70 +45,71 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class OSPFTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 4 routers for routern in range(1, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # Create a empty network for router 1 - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) # Create a empty network for router 2 - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) # Interconect router 1, 2 and 3 - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) # Create empty netowrk for router3 - switch = tgen.add_switch('s4') - switch.add_link(tgen.gears['r3']) + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["r3"]) # Interconect router 3 and 4 - switch = tgen.add_switch('s5') - switch.add_link(tgen.gears['r3']) - switch.add_link(tgen.gears['r4']) + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r4"]) # Create a empty network for router 4 - switch = tgen.add_switch('s6') - switch.add_link(tgen.gears['r4']) + switch = tgen.add_switch("s6") + switch.add_link(tgen.gears["r4"]) + def setup_module(mod): "Sets up the pytest environment" tgen = Topogen(OSPFTopo, mod.__name__) tgen.start_topology() - ospf6_config = 'ospf6d.conf' - if tgen.gears['r1'].has_version('<', '4.0'): - ospf6_config = 'ospf6d.conf-pre-v4' + ospf6_config = "ospf6d.conf" + if tgen.gears["r1"].has_version("<", "4.0"): + ospf6_config = "ospf6d.conf-pre-v4" router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_OSPF, - os.path.join(CWD, '{}/ospfd.conf'.format(rname)) + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_OSPF6, - os.path.join(CWD, '{}/{}'.format(rname, ospf6_config)) + TopoRouter.RD_OSPF6, os.path.join(CWD, "{}/{}".format(rname, ospf6_config)) ) # Initialize all routers. tgen.start_router() + def teardown_module(mod): "Teardown the pytest environment" tgen = get_topogen() @@ -121,46 +122,50 @@ def compare_show_ipv6_ospf6(rname, expected): result with the expected output. """ tgen = get_topogen() - current = tgen.gears[rname].vtysh_cmd('show ipv6 ospf6 route') + current = tgen.gears[rname].vtysh_cmd("show ipv6 ospf6 route") # Remove the link addresses - current = re.sub(r'fe80::[^ ]+', 'fe80::xxxx:xxxx:xxxx:xxxx', current) - expected = re.sub(r'fe80::[^ ]+', 'fe80::xxxx:xxxx:xxxx:xxxx', expected) + current = re.sub(r"fe80::[^ ]+", "fe80::xxxx:xxxx:xxxx:xxxx", current) + expected = re.sub(r"fe80::[^ ]+", "fe80::xxxx:xxxx:xxxx:xxxx", expected) # Remove the time - current = re.sub(r'\d+:\d{2}:\d{2}', '', current) - expected = re.sub(r'\d+:\d{2}:\d{2}', '', expected) + current = re.sub(r"\d+:\d{2}:\d{2}", "", current) + expected = re.sub(r"\d+:\d{2}:\d{2}", "", expected) + + return topotest.difflines( + topotest.normalize_text(current), + topotest.normalize_text(expected), + title1="Current output", + title2="Expected output", + ) - return topotest.difflines(topotest.normalize_text(current), - topotest.normalize_text(expected), - title1="Current output", - title2="Expected output") def test_ospf_convergence(): "Test OSPF daemon convergence" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") for router, rnode in tgen.routers().iteritems(): logger.info('Waiting for router "%s" convergence', router) # Load expected results from the command - reffile = os.path.join(CWD, '{}/ospfroute.txt'.format(router)) + reffile = os.path.join(CWD, "{}/ospfroute.txt".format(router)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 80 seconds. test_func = partial( - topotest.router_output_cmp, rnode, 'show ip ospf route', expected) - result, diff = topotest.run_and_expect(test_func, '', - count=160, wait=0.5) - assert result, 'OSPF did not converge on {}:\n{}'.format(router, diff) + topotest.router_output_cmp, rnode, "show ip ospf route", expected + ) + result, diff = topotest.run_and_expect(test_func, "", count=160, wait=0.5) + assert result, "OSPF did not converge on {}:\n{}".format(router, diff) + def test_ospf_kernel_route(): "Test OSPF kernel route installation" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") rlist = tgen.routers().values() for router in rlist: @@ -168,25 +173,26 @@ def test_ospf_kernel_route(): routes = topotest.ip4_route(router) expected = { - '10.0.1.0/24': {}, - '10.0.2.0/24': {}, - '10.0.3.0/24': {}, - '10.0.10.0/24': {}, - '172.16.0.0/24': {}, - '172.16.1.0/24': {}, + "10.0.1.0/24": {}, + "10.0.2.0/24": {}, + "10.0.3.0/24": {}, + "10.0.10.0/24": {}, + "172.16.0.0/24": {}, + "172.16.1.0/24": {}, } assertmsg = 'OSPF IPv4 route mismatch in router "{}"'.format(router.name) assert topotest.json_cmp(routes, expected) is None, assertmsg + def test_ospf6_convergence(): "Test OSPF6 daemon convergence" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") - ospf6route_file = '{}/ospf6route_ecmp.txt' + ospf6route_file = "{}/ospf6route_ecmp.txt" for rnum in range(1, 5): - router = 'r{}'.format(rnum) + router = "r{}".format(rnum) logger.info('Waiting for router "%s" IPv6 OSPF convergence', router) @@ -196,39 +202,37 @@ def test_ospf6_convergence(): # Run test function until we get an result. Wait at most 60 seconds. test_func = partial(compare_show_ipv6_ospf6, router, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=25, wait=3) + result, diff = topotest.run_and_expect(test_func, "", count=25, wait=3) if (not result) and (rnum == 1): # Didn't match the new ECMP version - try the old pre-ECMP format - ospf6route_file = '{}/ospf6route.txt' + ospf6route_file = "{}/ospf6route.txt" # Load expected results from the command reffile = os.path.join(CWD, ospf6route_file.format(router)) expected = open(reffile).read() test_func = partial(compare_show_ipv6_ospf6, router, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=1, wait=3) + result, diff = topotest.run_and_expect(test_func, "", count=1, wait=3) if not result: # Didn't match the old version - switch back to new ECMP version # and fail - ospf6route_file = '{}/ospf6route_ecmp.txt' + ospf6route_file = "{}/ospf6route_ecmp.txt" # Load expected results from the command reffile = os.path.join(CWD, ospf6route_file.format(router)) expected = open(reffile).read() test_func = partial(compare_show_ipv6_ospf6, router, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=1, wait=3) + result, diff = topotest.run_and_expect(test_func, "", count=1, wait=3) + + assert result, "OSPF6 did not converge on {}:\n{}".format(router, diff) - assert result, 'OSPF6 did not converge on {}:\n{}'.format(router, diff) def test_ospf6_kernel_route(): "Test OSPF kernel route installation" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") rlist = tgen.routers().values() for router in rlist: @@ -236,216 +240,231 @@ def test_ospf6_kernel_route(): routes = topotest.ip6_route(router) expected = { - '2001:db8:1::/64': {}, - '2001:db8:2::/64': {}, - '2001:db8:3::/64': {}, - '2001:db8:100::/64': {}, - '2001:db8:200::/64': {}, - '2001:db8:300::/64': {}, + "2001:db8:1::/64": {}, + "2001:db8:2::/64": {}, + "2001:db8:3::/64": {}, + "2001:db8:100::/64": {}, + "2001:db8:200::/64": {}, + "2001:db8:300::/64": {}, } assertmsg = 'OSPF IPv6 route mismatch in router "{}"'.format(router.name) assert topotest.json_cmp(routes, expected) is None, assertmsg + def test_ospf_json(): "Test 'show ip ospf json' output for coherency." tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") for rnum in range(1, 5): - router = tgen.gears['r{}'.format(rnum)] + router = tgen.gears["r{}".format(rnum)] logger.info('Comparing router "%s" "show ip ospf json" output', router.name) expected = { - 'routerId': '10.0.255.{}'.format(rnum), - 'tosRoutesOnly': True, - 'rfc2328Conform': True, - 'spfScheduleDelayMsecs': 0, - 'holdtimeMinMsecs': 50, - 'holdtimeMaxMsecs': 5000, - 'lsaMinIntervalMsecs': 5000, - 'lsaMinArrivalMsecs': 1000, - 'writeMultiplier': 20, - 'refreshTimerMsecs': 10000, - 'asbrRouter': 'injectingExternalRoutingInformation', - 'attachedAreaCounter': 1, - 'areas': {} + "routerId": "10.0.255.{}".format(rnum), + "tosRoutesOnly": True, + "rfc2328Conform": True, + "spfScheduleDelayMsecs": 0, + "holdtimeMinMsecs": 50, + "holdtimeMaxMsecs": 5000, + "lsaMinIntervalMsecs": 5000, + "lsaMinArrivalMsecs": 1000, + "writeMultiplier": 20, + "refreshTimerMsecs": 10000, + "asbrRouter": "injectingExternalRoutingInformation", + "attachedAreaCounter": 1, + "areas": {}, } # Area specific additional checks - if router.name == 'r1' or router.name == 'r2' or router.name == 'r3': - expected['areas']['0.0.0.0'] = { - 'areaIfActiveCounter': 2, - 'areaIfTotalCounter': 2, - 'authentication': 'authenticationNone', - 'backbone': True, - 'lsaAsbrNumber': 1, - 'lsaNetworkNumber': 1, - 'lsaNssaNumber': 0, - 'lsaNumber': 7, - 'lsaOpaqueAreaNumber': 0, - 'lsaOpaqueLinkNumber': 0, - 'lsaRouterNumber': 3, - 'lsaSummaryNumber': 2, - 'nbrFullAdjacentCounter': 2, + if router.name == "r1" or router.name == "r2" or router.name == "r3": + expected["areas"]["0.0.0.0"] = { + "areaIfActiveCounter": 2, + "areaIfTotalCounter": 2, + "authentication": "authenticationNone", + "backbone": True, + "lsaAsbrNumber": 1, + "lsaNetworkNumber": 1, + "lsaNssaNumber": 0, + "lsaNumber": 7, + "lsaOpaqueAreaNumber": 0, + "lsaOpaqueLinkNumber": 0, + "lsaRouterNumber": 3, + "lsaSummaryNumber": 2, + "nbrFullAdjacentCounter": 2, } - if router.name == 'r3' or router.name == 'r4': - expected['areas']['0.0.0.1'] = { - 'areaIfActiveCounter': 1, - 'areaIfTotalCounter': 1, - 'authentication': 'authenticationNone', - 'lsaAsbrNumber': 2, - 'lsaNetworkNumber': 1, - 'lsaNssaNumber': 0, - 'lsaNumber': 9, - 'lsaOpaqueAreaNumber': 0, - 'lsaOpaqueLinkNumber': 0, - 'lsaRouterNumber': 2, - 'lsaSummaryNumber': 4, - 'nbrFullAdjacentCounter': 1, + if router.name == "r3" or router.name == "r4": + expected["areas"]["0.0.0.1"] = { + "areaIfActiveCounter": 1, + "areaIfTotalCounter": 1, + "authentication": "authenticationNone", + "lsaAsbrNumber": 2, + "lsaNetworkNumber": 1, + "lsaNssaNumber": 0, + "lsaNumber": 9, + "lsaOpaqueAreaNumber": 0, + "lsaOpaqueLinkNumber": 0, + "lsaRouterNumber": 2, + "lsaSummaryNumber": 4, + "nbrFullAdjacentCounter": 1, } # r4 has more interfaces for area 0.0.0.1 - if router.name == 'r4': - expected['areas']['0.0.0.1'].update({ - 'areaIfActiveCounter': 2, - 'areaIfTotalCounter': 2, - }) + if router.name == "r4": + expected["areas"]["0.0.0.1"].update( + {"areaIfActiveCounter": 2, "areaIfTotalCounter": 2,} + ) # router 3 has an additional area - if router.name == 'r3': - expected['attachedAreaCounter'] = 2 + if router.name == "r3": + expected["attachedAreaCounter"] = 2 - output = router.vtysh_cmd('show ip ospf json', isjson=True) + output = router.vtysh_cmd("show ip ospf json", isjson=True) result = topotest.json_cmp(output, expected) - assert result is None, '"{}" JSON output mismatches the expected result'.format(router.name) + assert result is None, '"{}" JSON output mismatches the expected result'.format( + router.name + ) + def test_ospf_link_down(): "Test OSPF convergence after a link goes down" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") # Simulate a network down event on router3 switch3 interface. - router3 = tgen.gears['r3'] - router3.peer_link_enable('r3-eth0', False) + router3 = tgen.gears["r3"] + router3.peer_link_enable("r3-eth0", False) # Expect convergence on all routers for router, rnode in tgen.routers().iteritems(): logger.info('Waiting for router "%s" convergence after link failure', router) # Load expected results from the command - reffile = os.path.join(CWD, '{}/ospfroute_down.txt'.format(router)) + reffile = os.path.join(CWD, "{}/ospfroute_down.txt".format(router)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 80 seconds. test_func = partial( - topotest.router_output_cmp, rnode, 'show ip ospf route', expected) - result, diff = topotest.run_and_expect(test_func, '', - count=140, wait=0.5) - assert result, 'OSPF did not converge on {}:\n{}'.format(router, diff) + topotest.router_output_cmp, rnode, "show ip ospf route", expected + ) + result, diff = topotest.run_and_expect(test_func, "", count=140, wait=0.5) + assert result, "OSPF did not converge on {}:\n{}".format(router, diff) + def test_ospf_link_down_kernel_route(): "Test OSPF kernel route installation" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") rlist = tgen.routers().values() for router in rlist: - logger.info('Checking OSPF IPv4 kernel routes in "%s" after link down', router.name) + logger.info( + 'Checking OSPF IPv4 kernel routes in "%s" after link down', router.name + ) routes = topotest.ip4_route(router) expected = { - '10.0.1.0/24': {}, - '10.0.2.0/24': {}, - '10.0.3.0/24': {}, - '10.0.10.0/24': {}, - '172.16.0.0/24': {}, - '172.16.1.0/24': {}, + "10.0.1.0/24": {}, + "10.0.2.0/24": {}, + "10.0.3.0/24": {}, + "10.0.10.0/24": {}, + "172.16.0.0/24": {}, + "172.16.1.0/24": {}, } - if router.name == 'r1' or router.name == 'r2': - expected.update({ - '10.0.10.0/24': None, - '172.16.0.0/24': None, - '172.16.1.0/24': None, - }) - elif router.name == 'r3' or router.name == 'r4': - expected.update({ - '10.0.1.0/24': None, - '10.0.2.0/24': None, - }) + if router.name == "r1" or router.name == "r2": + expected.update( + {"10.0.10.0/24": None, "172.16.0.0/24": None, "172.16.1.0/24": None,} + ) + elif router.name == "r3" or router.name == "r4": + expected.update( + {"10.0.1.0/24": None, "10.0.2.0/24": None,} + ) # Route '10.0.3.0' is no longer available for r4 since it is down. - if router.name == 'r4': - expected.update({ - '10.0.3.0/24': None, - }) - assertmsg = 'OSPF IPv4 route mismatch in router "{}" after link down'.format(router.name) + if router.name == "r4": + expected.update( + {"10.0.3.0/24": None,} + ) + assertmsg = 'OSPF IPv4 route mismatch in router "{}" after link down'.format( + router.name + ) assert topotest.json_cmp(routes, expected) is None, assertmsg + def test_ospf6_link_down(): "Test OSPF6 daemon convergence after link goes down" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") for rnum in range(1, 5): - router = 'r{}'.format(rnum) + router = "r{}".format(rnum) - logger.info('Waiting for router "%s" IPv6 OSPF convergence after link down', router) + logger.info( + 'Waiting for router "%s" IPv6 OSPF convergence after link down', router + ) # Load expected results from the command - reffile = os.path.join(CWD, '{}/ospf6route_down.txt'.format(router)) + reffile = os.path.join(CWD, "{}/ospf6route_down.txt".format(router)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 60 seconds. test_func = partial(compare_show_ipv6_ospf6, router, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=25, wait=3) - assert result, 'OSPF6 did not converge on {}:\n{}'.format(router, diff) + result, diff = topotest.run_and_expect(test_func, "", count=25, wait=3) + assert result, "OSPF6 did not converge on {}:\n{}".format(router, diff) + def test_ospf6_link_down_kernel_route(): "Test OSPF kernel route installation" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") rlist = tgen.routers().values() for router in rlist: - logger.info('Checking OSPF IPv6 kernel routes in "%s" after link down', router.name) + logger.info( + 'Checking OSPF IPv6 kernel routes in "%s" after link down', router.name + ) routes = topotest.ip6_route(router) expected = { - '2001:db8:1::/64': {}, - '2001:db8:2::/64': {}, - '2001:db8:3::/64': {}, - '2001:db8:100::/64': {}, - '2001:db8:200::/64': {}, - '2001:db8:300::/64': {}, + "2001:db8:1::/64": {}, + "2001:db8:2::/64": {}, + "2001:db8:3::/64": {}, + "2001:db8:100::/64": {}, + "2001:db8:200::/64": {}, + "2001:db8:300::/64": {}, } - if router.name == 'r1' or router.name == 'r2': - expected.update({ - '2001:db8:100::/64': None, - '2001:db8:200::/64': None, - '2001:db8:300::/64': None, - }) - elif router.name == 'r3' or router.name == 'r4': - expected.update({ - '2001:db8:1::/64': None, - '2001:db8:2::/64': None, - }) + if router.name == "r1" or router.name == "r2": + expected.update( + { + "2001:db8:100::/64": None, + "2001:db8:200::/64": None, + "2001:db8:300::/64": None, + } + ) + elif router.name == "r3" or router.name == "r4": + expected.update( + {"2001:db8:1::/64": None, "2001:db8:2::/64": None,} + ) # Route '2001:db8:3::/64' is no longer available for r4 since it is down. - if router.name == 'r4': - expected.update({ - '2001:db8:3::/64': None, - }) - assertmsg = 'OSPF IPv6 route mismatch in router "{}" after link down'.format(router.name) + if router.name == "r4": + expected.update( + {"2001:db8:3::/64": None,} + ) + assertmsg = 'OSPF IPv6 route mismatch in router "{}" after link down'.format( + router.name + ) assert topotest.json_cmp(routes, expected) is None, assertmsg + def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospf6-topo1/r1/show_ipv6_route.ref b/tests/topotests/ospf6-topo1/r1/show_ipv6_route.ref index 2db6f620f9..a2ddf7c5ae 100644 --- a/tests/topotests/ospf6-topo1/r1/show_ipv6_route.ref +++ b/tests/topotests/ospf6-topo1/r1/show_ipv6_route.ref @@ -1,9 +1,9 @@ -O fc00:1:1:1::/64 [110/10] is directly connected, r1-stubnet, XX:XX:XX -O>* fc00:2:2:2::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX -O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX -O>* fc00:4:4:4::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX -O fc00:a:a:a::/64 [110/10] is directly connected, r1-sw5, XX:XX:XX -O>* fc00:b:b:b::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX -O>* fc00:2222:2222:2222::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX -O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX -O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX +O fc00:1:1:1::/64 [110/10] is directly connected, r1-stubnet, weight 1, XX:XX:XX +O>* fc00:2:2:2::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX +O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX +O>* fc00:4:4:4::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX +O fc00:a:a:a::/64 [110/10] is directly connected, r1-sw5, weight 1, XX:XX:XX +O>* fc00:b:b:b::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX +O>* fc00:2222:2222:2222::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX +O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX +O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf6-topo1/r2/show_ipv6_route.ref b/tests/topotests/ospf6-topo1/r2/show_ipv6_route.ref index 9060b0739f..1f642b1b22 100644 --- a/tests/topotests/ospf6-topo1/r2/show_ipv6_route.ref +++ b/tests/topotests/ospf6-topo1/r2/show_ipv6_route.ref @@ -1,10 +1,10 @@ -O>* fc00:1:1:1::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX -O fc00:2:2:2::/64 [110/10] is directly connected, r2-stubnet, XX:XX:XX -O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX -O>* fc00:4:4:4::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX -O fc00:a:a:a::/64 [110/10] is directly connected, r2-sw5, XX:XX:XX -O>* fc00:b:b:b::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX -O>* fc00:1111:1111:1111::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX -O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX -O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX +O>* fc00:1:1:1::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX +O fc00:2:2:2::/64 [110/10] is directly connected, r2-stubnet, weight 1, XX:XX:XX +O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX +O>* fc00:4:4:4::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX +O fc00:a:a:a::/64 [110/10] is directly connected, r2-sw5, weight 1, XX:XX:XX +O>* fc00:b:b:b::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX +O>* fc00:1111:1111:1111::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX +O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX +O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf6-topo1/r3/show_ipv6_route.ref b/tests/topotests/ospf6-topo1/r3/show_ipv6_route.ref index 9406f41e94..8e3afa583a 100644 --- a/tests/topotests/ospf6-topo1/r3/show_ipv6_route.ref +++ b/tests/topotests/ospf6-topo1/r3/show_ipv6_route.ref @@ -1,10 +1,10 @@ -O>* fc00:1:1:1::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, XX:XX:XX -O>* fc00:2:2:2::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, XX:XX:XX -O fc00:3:3:3::/64 [110/10] is directly connected, r3-stubnet, XX:XX:XX -O>* fc00:4:4:4::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, XX:XX:XX -O fc00:a:a:a::/64 [110/10] is directly connected, r3-sw5, XX:XX:XX -O fc00:b:b:b::/64 [110/10] is directly connected, r3-sw6, XX:XX:XX -O>* fc00:1111:1111:1111::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, XX:XX:XX -O>* fc00:2222:2222:2222::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, XX:XX:XX -O>* fc00:4444:4444:4444::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, XX:XX:XX +O>* fc00:1:1:1::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX +O>* fc00:2:2:2::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX +O fc00:3:3:3::/64 [110/10] is directly connected, r3-stubnet, weight 1, XX:XX:XX +O>* fc00:4:4:4::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, weight 1, XX:XX:XX +O fc00:a:a:a::/64 [110/10] is directly connected, r3-sw5, weight 1, XX:XX:XX +O fc00:b:b:b::/64 [110/10] is directly connected, r3-sw6, weight 1, XX:XX:XX +O>* fc00:1111:1111:1111::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX +O>* fc00:2222:2222:2222::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX +O>* fc00:4444:4444:4444::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf6-topo1/r4/show_ipv6_route.ref b/tests/topotests/ospf6-topo1/r4/show_ipv6_route.ref index 9bf032b5e7..0df652ffb3 100644 --- a/tests/topotests/ospf6-topo1/r4/show_ipv6_route.ref +++ b/tests/topotests/ospf6-topo1/r4/show_ipv6_route.ref @@ -1,9 +1,9 @@ -O>* fc00:1:1:1::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX -O>* fc00:2:2:2::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX -O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX -O fc00:4:4:4::/64 [110/10] is directly connected, r4-stubnet, XX:XX:XX -O>* fc00:a:a:a::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX -O fc00:b:b:b::/64 [110/10] is directly connected, r4-sw6, XX:XX:XX -O>* fc00:1111:1111:1111::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX -O>* fc00:2222:2222:2222::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX -O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX +O>* fc00:1:1:1::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX +O>* fc00:2:2:2::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX +O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX +O fc00:4:4:4::/64 [110/10] is directly connected, r4-stubnet, weight 1, XX:XX:XX +O>* fc00:a:a:a::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX +O fc00:b:b:b::/64 [110/10] is directly connected, r4-sw6, weight 1, XX:XX:XX +O>* fc00:1111:1111:1111::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX +O>* fc00:2222:2222:2222::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX +O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf6-topo1/test_ospf6_topo1.py b/tests/topotests/ospf6-topo1/test_ospf6_topo1.py index cb0c4af221..30c09ea606 100755 --- a/tests/topotests/ospf6-topo1/test_ospf6_topo1.py +++ b/tests/topotests/ospf6-topo1/test_ospf6_topo1.py @@ -82,7 +82,7 @@ from mininet.topo import Topo # Save the Current Working Directory to find configuration files later. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -97,6 +97,7 @@ import platform ## ##################################################### + class NetworkTopo(Topo): "OSPFv3 (IPv6) Test Topology 1" @@ -107,7 +108,7 @@ class NetworkTopo(Topo): # Create 4 routers for routern in range(1, 5): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # # Wire up the switches and routers @@ -115,31 +116,31 @@ class NetworkTopo(Topo): # # Create a empty network for router 1 - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1'], nodeif='r1-stubnet') + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"], nodeif="r1-stubnet") # Create a empty network for router 2 - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2'], nodeif='r2-stubnet') + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"], nodeif="r2-stubnet") # Create a empty network for router 3 - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r3'], nodeif='r3-stubnet') + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r3"], nodeif="r3-stubnet") # Create a empty network for router 4 - switch = tgen.add_switch('s4') - switch.add_link(tgen.gears['r4'], nodeif='r4-stubnet') + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["r4"], nodeif="r4-stubnet") # Interconnect routers 1, 2, and 3 - switch = tgen.add_switch('s5') - switch.add_link(tgen.gears['r1'], nodeif='r1-sw5') - switch.add_link(tgen.gears['r2'], nodeif='r2-sw5') - switch.add_link(tgen.gears['r3'], nodeif='r3-sw5') + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["r1"], nodeif="r1-sw5") + switch.add_link(tgen.gears["r2"], nodeif="r2-sw5") + switch.add_link(tgen.gears["r3"], nodeif="r3-sw5") # Interconnect routers 3 and 4 - switch = tgen.add_switch('s6') - switch.add_link(tgen.gears['r3'], nodeif='r3-sw6') - switch.add_link(tgen.gears['r4'], nodeif='r4-sw6') + switch = tgen.add_switch("s6") + switch.add_link(tgen.gears["r3"], nodeif="r3-sw6") + switch.add_link(tgen.gears["r4"], nodeif="r4-sw6") ##################################################### @@ -148,6 +149,7 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(mod): "Sets up the pytest environment" @@ -164,12 +166,10 @@ def setup_module(mod): router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_OSPF6, - os.path.join(CWD, '{}/ospf6d.conf'.format(rname)) + TopoRouter.RD_OSPF6, os.path.join(CWD, "{}/ospf6d.conf".format(rname)) ) # Initialize all routers. @@ -194,14 +194,14 @@ def test_ospf6_converged(): pytest.skip(tgen.errors) # For debugging, uncomment the next line - #tgen.mininet_cli() + # tgen.mininet_cli() # Wait for OSPF6 to converge (All Neighbors in either Full or TwoWay State) logger.info("Waiting for OSPF6 convergence") # Set up for regex - pat1 = re.compile('^[0-9]') - pat2 = re.compile('Full') + pat1 = re.compile("^[0-9]") + pat2 = re.compile("Full") timeout = 60 while timeout > 0: @@ -210,7 +210,7 @@ def test_ospf6_converged(): # Look for any node not yet converged for router, rnode in tgen.routers().iteritems(): - resStr = rnode.vtysh_cmd('show ipv6 ospf neigh') + resStr = rnode.vtysh_cmd("show ipv6 ospf neigh") isConverged = False @@ -225,12 +225,12 @@ def test_ospf6_converged(): break if isConverged == False: - logger.info('Waiting for {}'.format(router)) + logger.info("Waiting for {}".format(router)) sys.stdout.flush() break if isConverged: - logger.info('Done') + logger.info("Done") break else: sleep(5) @@ -238,7 +238,7 @@ def test_ospf6_converged(): if timeout == 0: # Bail out with error if a router fails to converge - ospfStatus = rnode.vtysh_cmd('show ipv6 ospf neigh') + ospfStatus = rnode.vtysh_cmd("show ipv6 ospf neigh") assert False, "OSPFv6 did not converge:\n{}".format(ospfStatus) logger.info("OSPFv3 converged.") @@ -250,6 +250,7 @@ def test_ospf6_converged(): if tgen.routers_have_failure(): assert tgen.errors == "", tgen.errors + def compare_show_ipv6(rname, expected): """ Calls 'show ipv6 route' for router `rname` and compare the obtained @@ -263,21 +264,24 @@ def compare_show_ipv6(rname, expected): # Use just the 'O'spf lines of the output linearr = [] for line in current.splitlines(): - if re.match('^O', line): + if re.match("^O", line): linearr.append(line) - current = '\n'.join(linearr) + current = "\n".join(linearr) + + return topotest.difflines( + topotest.normalize_text(current), + topotest.normalize_text(expected), + title1="Current output", + title2="Expected output", + ) - return topotest.difflines(topotest.normalize_text(current), - topotest.normalize_text(expected), - title1="Current output", - title2="Expected output") def test_ospfv3_routingTable(): tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") # For debugging, uncomment the next line # tgen.mininet_cli() @@ -287,15 +291,13 @@ def test_ospfv3_routingTable(): logger.info('Waiting for router "%s" convergence', router) # Load expected results from the command - reffile = os.path.join(CWD, '{}/show_ipv6_route.ref'.format(router)) + reffile = os.path.join(CWD, "{}/show_ipv6_route.ref".format(router)) expected = open(reffile).read() # Run test function until we get an result. Wait at most 60 seconds. - test_func = partial( - compare_show_ipv6, router, expected) - result, diff = topotest.run_and_expect(test_func, '', - count=120, wait=0.5) - assert result, 'OSPFv3 did not converge on {}:\n{}'.format(router, diff) + test_func = partial(compare_show_ipv6, router, expected) + result, diff = topotest.run_and_expect(test_func, "", count=120, wait=0.5) + assert result, "OSPFv3 did not converge on {}:\n{}".format(router, diff) def test_linux_ipv6_kernel_routingTable(): @@ -303,7 +305,7 @@ def test_linux_ipv6_kernel_routingTable(): tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") # Verify Linux Kernel Routing Table logger.info("Verifying Linux IPv6 Kernel Routing Table") @@ -314,23 +316,23 @@ def test_linux_ipv6_kernel_routingTable(): # each run and we need to translate them linklocals = [] for i in range(1, 5): - linklocals += tgen.net['r{}'.format(i)].get_ipv6_linklocal() + linklocals += tgen.net["r{}".format(i)].get_ipv6_linklocal() # Now compare the routing tables (after substituting link-local addresses) for i in range(1, 5): - if topotest.version_cmp(platform.release(), '5.3') < 0: - refTableFile = os.path.join(CWD, 'r{}/ip_6_address.ref'.format(i)) - else: - refTableFile = os.path.join(CWD, 'r{}/ip_6_address.nhg.ref'.format(i)) - if os.path.isfile(refTableFile): + # Actual output from router + actual = tgen.gears["r{}".format(i)].run("ip -6 route").rstrip() + if "nhid" in actual: + refTableFile = os.path.join(CWD, "r{}/ip_6_address.nhg.ref".format(i)) + else: + refTableFile = os.path.join(CWD, "r{}/ip_6_address.ref".format(i)) + if os.path.isfile(refTableFile): expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines())).splitlines(1) + expected = ("\n".join(expected.splitlines())).splitlines(1) - # Actual output from router - actual = tgen.gears['r{}'.format(i)].run('ip -6 route').rstrip() # Mask out Link-Local mac addresses for ll in linklocals: actual = actual.replace(ll[1], "fe80::__(%s)__" % ll[0]) @@ -338,20 +340,21 @@ def test_linux_ipv6_kernel_routingTable(): actual = re.sub(r"[ ]+proto [0-9a-z]+ +", " proto XXXX ", actual) actual = re.sub(r"[ ]+nhid [0-9]+ +", " nhid XXXX ", actual) # Remove ff00::/8 routes (seen on some kernels - not from FRR) - actual = re.sub(r'ff00::/8.*', '', actual) + actual = re.sub(r"ff00::/8.*", "", actual) # Strip empty lines actual = actual.lstrip() actual = actual.rstrip() - actual = re.sub(r' +', ' ', actual) + actual = re.sub(r" +", " ", actual) filtered_lines = [] for line in sorted(actual.splitlines()): - if line.startswith('fe80::/64 ') \ - or line.startswith('unreachable fe80::/64 '): + if line.startswith("fe80::/64 ") or line.startswith( + "unreachable fe80::/64 " + ): continue filtered_lines.append(line) - actual = '\n'.join(filtered_lines).splitlines(1) + actual = "\n".join(filtered_lines).splitlines(1) # Print Actual table # logger.info("Router r%s table" % i) @@ -359,18 +362,27 @@ def test_linux_ipv6_kernel_routingTable(): # logger.info(line.rstrip()) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual OSPFv3 IPv6 routing table", - title2="expected OSPFv3 IPv6 routing table") + title2="expected OSPFv3 IPv6 routing table", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed Linux IPv6 Kernel Routing Table Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed Linux IPv6 Kernel Routing Table Check:\n%s\n" + % (i, diff) + ) failures += 1 else: logger.info("r%s ok" % i) - assert failures == 0, "Linux Kernel IPv6 Routing Table verification failed for router r%s:\n%s" % (i, diff) + assert failures == 0, ( + "Linux Kernel IPv6 Routing Table verification failed for router r%s:\n%s" + % (i, diff) + ) def test_shutdown_check_stderr(): @@ -378,11 +390,13 @@ def test_shutdown_check_stderr(): tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - logger.info("SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n") - pytest.skip('Skipping test for Stderr output') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + logger.info( + "SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n" + ) + pytest.skip("Skipping test for Stderr output") net = tgen.net @@ -390,11 +404,11 @@ def test_shutdown_check_stderr(): logger.info("******************************************") for i in range(1, 5): - net['r%s' % i].stopRouter() - log = net['r%s' % i].getStdErr('ospf6d') + net["r%s" % i].stopRouter() + log = net["r%s" % i].getStdErr("ospf6d") if log: logger.info("\nRouter r%s OSPF6d StdErr Log:\n%s" % (i, log)) - log = net['r%s' % i].getStdErr('zebra') + log = net["r%s" % i].getStdErr("zebra") if log: logger.info("\nRouter r%s Zebra StdErr Log:\n%s" % (i, log)) @@ -402,22 +416,24 @@ def test_shutdown_check_stderr(): def test_shutdown_check_memleak(): "Run the memory leak test and report results." - if os.environ.get('TOPOTESTS_CHECK_MEMLEAK') is None: - logger.info("SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)") - pytest.skip('Skipping test for memory leaks') + if os.environ.get("TOPOTESTS_CHECK_MEMLEAK") is None: + logger.info( + "SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)" + ) + pytest.skip("Skipping test for memory leaks") tgen = get_topogen() net = tgen.net for i in range(1, 5): - net['r%s' % i].stopRouter() - net['r%s' % i].report_memory_leaks( - os.environ.get('TOPOTESTS_CHECK_MEMLEAK'), - os.path.basename(__file__)) + net["r%s" % i].stopRouter() + net["r%s" % i].report_memory_leaks( + os.environ.get("TOPOTESTS_CHECK_MEMLEAK"), os.path.basename(__file__) + ) -if __name__ == '__main__': +if __name__ == "__main__": # To suppress tracebacks, either use the following pytest call or # add "--tb=no" to cli diff --git a/tests/topotests/pim-basic/mcast-rx.py b/tests/topotests/pim-basic/mcast-rx.py index 9e3484e12a..8a3a44ecb1 100755 --- a/tests/topotests/pim-basic/mcast-rx.py +++ b/tests/topotests/pim-basic/mcast-rx.py @@ -36,8 +36,8 @@ import time def ifname_to_ifindex(ifname): output = subprocess.check_output("ip link show %s" % ifname, shell=True) - first_line = output.split('\n')[0] - re_index = re.search('^(\d+):', first_line) + first_line = output.split("\n")[0] + re_index = re.search("^(\d+):", first_line) if re_index: return int(re_index.group(1)) @@ -48,24 +48,28 @@ def ifname_to_ifindex(ifname): # Thou shalt be root if os.geteuid() != 0: - sys.stderr.write('ERROR: You must have root privileges\n') + sys.stderr.write("ERROR: You must have root privileges\n") sys.exit(1) -logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)5s: %(message)s') +logging.basicConfig( + level=logging.DEBUG, format="%(asctime)s %(levelname)5s: %(message)s" +) # Color the errors and warnings in red -logging.addLevelName(logging.ERROR, "\033[91m %s\033[0m" % logging.getLevelName(logging.ERROR)) -logging.addLevelName(logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING)) +logging.addLevelName( + logging.ERROR, "\033[91m %s\033[0m" % logging.getLevelName(logging.ERROR) +) +logging.addLevelName( + logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING) +) log = logging.getLogger(__name__) -parser = argparse.ArgumentParser(description='Multicast RX utility', - version='1.0.0') -parser.add_argument('group', help='Multicast IP') -parser.add_argument('ifname', help='Interface name') -parser.add_argument('--port', help='UDP port', default=1000) -parser.add_argument('--sleep', help='Time to sleep before we stop waiting', - default = 5) +parser = argparse.ArgumentParser(description="Multicast RX utility", version="1.0.0") +parser.add_argument("group", help="Multicast IP") +parser.add_argument("ifname", help="Interface name") +parser.add_argument("--port", help="UDP port", default=1000) +parser.add_argument("--sleep", help="Time to sleep before we stop waiting", default=5) args = parser.parse_args() # Create the datagram socket @@ -77,7 +81,9 @@ newpid = os.fork() if newpid == 0: ifindex = ifname_to_ifindex(args.ifname) - mreq = struct.pack("=4sLL", socket.inet_aton(args.group), socket.INADDR_ANY, ifindex) + mreq = struct.pack( + "=4sLL", socket.inet_aton(args.group), socket.INADDR_ANY, ifindex + ) sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) time.sleep(float(args.sleep)) sock.close() diff --git a/tests/topotests/pim-basic/mcast-tx.py b/tests/topotests/pim-basic/mcast-tx.py index c469e47d4c..ad6fdc1062 100755 --- a/tests/topotests/pim-basic/mcast-tx.py +++ b/tests/topotests/pim-basic/mcast-tx.py @@ -26,20 +26,28 @@ import struct import time -logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)5s: %(message)s') +logging.basicConfig( + level=logging.DEBUG, format="%(asctime)s %(levelname)5s: %(message)s" +) # Color the errors and warnings in red -logging.addLevelName(logging.ERROR, "\033[91m %s\033[0m" % logging.getLevelName(logging.ERROR)) -logging.addLevelName(logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING)) +logging.addLevelName( + logging.ERROR, "\033[91m %s\033[0m" % logging.getLevelName(logging.ERROR) +) +logging.addLevelName( + logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING) +) log = logging.getLogger(__name__) -parser = argparse.ArgumentParser(description='Multicast packet generator', version='1.0.0') -parser.add_argument('group', help='Multicast IP') -parser.add_argument('ifname', help='Interface name') -parser.add_argument('--port', type=int, help='UDP port number', default=1000) -parser.add_argument('--ttl', type=int, help='time-to-live', default=20) -parser.add_argument('--count', type=int, help='Packets to send', default=1) -parser.add_argument('--interval', type=int, help='ms between packets', default=100) +parser = argparse.ArgumentParser( + description="Multicast packet generator", version="1.0.0" +) +parser.add_argument("group", help="Multicast IP") +parser.add_argument("ifname", help="Interface name") +parser.add_argument("--port", type=int, help="UDP port number", default=1000) +parser.add_argument("--ttl", type=int, help="time-to-live", default=20) +parser.add_argument("--count", type=int, help="Packets to send", default=1) +parser.add_argument("--interval", type=int, help="ms between packets", default=100) args = parser.parse_args() # Create the datagram socket @@ -49,22 +57,24 @@ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # https://github.com/sivel/bonding/issues/10 # # Bind our socket to ifname -sock.setsockopt(socket.SOL_SOCKET, - 25, - struct.pack("%ds" % len(args.ifname), args.ifname)) +sock.setsockopt( + socket.SOL_SOCKET, 25, struct.pack("%ds" % len(args.ifname), args.ifname) +) # We need to make sure our sendto() finishes before we close the socket sock.setblocking(1) # Set the time-to-live -sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack('b', args.ttl)) +sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack("b", args.ttl)) ms = args.interval / 1000.0 # Send data to the multicast group for x in xrange(args.count): - log.info('TX multicast UDP packet to %s:%d on %s' % (args.group, args.port, args.ifname)) - sent = sock.sendto('foobar %d' % x, (args.group, args.port)) + log.info( + "TX multicast UDP packet to %s:%d on %s" % (args.group, args.port, args.ifname) + ) + sent = sock.sendto("foobar %d" % x, (args.group, args.port)) if args.count > 1 and ms: time.sleep(ms) diff --git a/tests/topotests/pim-basic/r1/pimd.conf b/tests/topotests/pim-basic/r1/pimd.conf index cec765699d..f64a46deb3 100644 --- a/tests/topotests/pim-basic/r1/pimd.conf +++ b/tests/topotests/pim-basic/r1/pimd.conf @@ -7,6 +7,10 @@ interface r1-eth0 interface r1-eth1 ip pim ! +interface r1-eth2 + ip igmp + ip pim +! interface lo ip pim ! diff --git a/tests/topotests/pim-basic/r1/zebra.conf b/tests/topotests/pim-basic/r1/zebra.conf index b0a25f12aa..e43041758b 100644 --- a/tests/topotests/pim-basic/r1/zebra.conf +++ b/tests/topotests/pim-basic/r1/zebra.conf @@ -6,6 +6,9 @@ interface r1-eth0 interface r1-eth1 ip address 10.0.30.1/24 ! +interface r1-eth2 + ip address 10.0.40.1/24 +! interface lo ip address 10.254.0.1/32 ! diff --git a/tests/topotests/pim-basic/r3/pimd.conf b/tests/topotests/pim-basic/r3/pimd.conf new file mode 100644 index 0000000000..f94ee99930 --- /dev/null +++ b/tests/topotests/pim-basic/r3/pimd.conf @@ -0,0 +1 @@ +hostname r3 diff --git a/tests/topotests/pim-basic/r3/zebra.conf b/tests/topotests/pim-basic/r3/zebra.conf new file mode 100644 index 0000000000..8e58e8c66a --- /dev/null +++ b/tests/topotests/pim-basic/r3/zebra.conf @@ -0,0 +1,8 @@ +hostname r3 +! +interface r3-eth0 + ip address 10.0.40.4/24 +! +interface lo + ip address 10.254.0.4/32 +! diff --git a/tests/topotests/pim-basic/rp/pimd.conf b/tests/topotests/pim-basic/rp/pimd.conf index 3f1b4d65c9..6e35c97971 100644 --- a/tests/topotests/pim-basic/rp/pimd.conf +++ b/tests/topotests/pim-basic/rp/pimd.conf @@ -7,3 +7,6 @@ interface lo ip pim ! ip pim rp 10.254.0.3 +ip pim register-accept-list ACCEPT + +ip prefix-list ACCEPT seq 5 permit 10.0.20.0/24 le 32 diff --git a/tests/topotests/pim-basic/test_pim.py b/tests/topotests/pim-basic/test_pim.py index 0e0569e234..2abee39176 100644 --- a/tests/topotests/pim-basic/test_pim.py +++ b/tests/topotests/pim-basic/test_pim.py @@ -32,7 +32,7 @@ import json from functools import partial CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest @@ -41,34 +41,44 @@ from lib.topolog import logger from mininet.topo import Topo + class PIMTopo(Topo): def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) - for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + for routern in range(1, 4): + tgen.add_router("r{}".format(routern)) - tgen.add_router('rp') + tgen.add_router("rp") + # rp ------ r1 -------- r2 + # \ + # --------- r3 # r1 -> .1 # r2 -> .2 # rp -> .3 + # r3 -> .4 # loopback network is 10.254.0.X/32 # # r1 <- sw1 -> r2 # r1-eth0 <-> r2-eth0 # 10.0.20.0/24 - sw = tgen.add_switch('sw1') - sw.add_link(tgen.gears['r1']) - sw.add_link(tgen.gears['r2']) + sw = tgen.add_switch("sw1") + sw.add_link(tgen.gears["r1"]) + sw.add_link(tgen.gears["r2"]) # r1 <- sw2 -> rp # r1-eth1 <-> rp-eth0 # 10.0.30.0/24 - sw = tgen.add_switch('sw2') - sw.add_link(tgen.gears['r1']) - sw.add_link(tgen.gears['rp']) + sw = tgen.add_switch("sw2") + sw.add_link(tgen.gears["r1"]) + sw.add_link(tgen.gears["rp"]) + + # 10.0.40.0/24 + sw = tgen.add_switch("sw3") + sw.add_link(tgen.gears["r1"]) + sw.add_link(tgen.gears["r3"]) def setup_module(mod): @@ -79,21 +89,18 @@ def setup_module(mod): # For all registered routers, load the zebra configuration file for rname, router in tgen.routers().iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_PIM, - os.path.join(CWD, '{}/pimd.conf'.format(rname)) + TopoRouter.RD_PIM, os.path.join(CWD, "{}/pimd.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) - ) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) # After loading the configurations, this function loads configured daemons. tgen.start_router() - #tgen.mininet_cli() + # tgen.mininet_cli() def teardown_module(mod): @@ -103,22 +110,25 @@ def teardown_module(mod): # This function tears down the whole topology. tgen.stop_topology() + def test_pim_rp_setup(): "Ensure basic routing has come up and the rp has an outgoing interface" - #Ensure rp and r1 establish pim neighbor ship and bgp has come up - #Finally ensure that the rp has an outgoing interface on r1 + # Ensure rp and r1 establish pim neighbor ship and bgp has come up + # Finally ensure that the rp has an outgoing interface on r1 tgen = get_topogen() - r1 = tgen.gears['r1'] - json_file = '{}/{}/rp-info.json'.format(CWD, r1.name) + r1 = tgen.gears["r1"] + json_file = "{}/{}/rp-info.json".format(CWD, r1.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show ip pim rp-info json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show ip pim rp-info json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=15, wait=5) assertmsg = '"{}" JSON output mismatches'.format(r1.name) assert result is None, assertmsg - #tgen.mininet_cli() + # tgen.mininet_cli() + def test_pim_send_mcast_stream(): "Establish a Multicast stream from r2 -> r1 and then ensure S,G is created as appropriate" @@ -129,44 +139,58 @@ def test_pim_send_mcast_stream(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - rp = tgen.gears['rp'] - r2 = tgen.gears['r2'] - r1 = tgen.gears['r1'] + rp = tgen.gears["rp"] + r3 = tgen.gears["r3"] + r2 = tgen.gears["r2"] + r1 = tgen.gears["r1"] # Let's establish a S,G stream from r2 -> r1 CWD = os.path.dirname(os.path.realpath(__file__)) - r2.run("{}/mcast-tx.py --ttl 5 --count 5 --interval 10 229.1.1.1 r2-eth0 > /tmp/bar".format(CWD)) + r2.run( + "{}/mcast-tx.py --ttl 5 --count 5 --interval 10 229.1.1.1 r2-eth0 > /tmp/bar".format( + CWD + ) + ) + # And from r3 -> r1 + r3.run( + "{}/mcast-tx.py --ttl 5 --count 5 --interval 10 229.1.1.1 r3-eth0 > /tmp/bar".format( + CWD + ) + ) # Let's see that it shows up and we have established some basic state out = r1.vtysh_cmd("show ip pim upstream json", isjson=True) expected = { - '229.1.1.1': { - '10.0.20.2': { - 'firstHopRouter': 1, - 'joinState': 'NotJoined', - 'regState': 'RegPrune', - 'inboundInterface': 'r1-eth0', + "229.1.1.1": { + "10.0.20.2": { + "firstHopRouter": 1, + "joinState": "NotJoined", + "regState": "RegPrune", + "inboundInterface": "r1-eth0", } } } - assert topotest.json_cmp(out, expected) is None, 'failed to converge pim' - #tgen.mininet_cli() + assert topotest.json_cmp(out, expected) is None, "failed to converge pim" + # tgen.mininet_cli() + def test_pim_rp_sees_stream(): "Ensure that the RP sees the stream and has acted accordingly" tgen = get_topogen() - rp = tgen.gears['rp'] - json_file = '{}/{}/upstream.json'.format(CWD, rp.name) + rp = tgen.gears["rp"] + json_file = "{}/{}/upstream.json".format(CWD, rp.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - rp, 'show ip pim upstream json', expected) - _, result = topotest.run_and_expect(test_func, None, count=20, wait=.5) + test_func = partial( + topotest.router_json_cmp, rp, "show ip pim upstream json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(rp.name) assert result is None, assertmsg + def test_pim_igmp_report(): "Send a igmp report from r2->r1 and ensure that the *,G state is created on r1" logger.info("Send a igmp report from r2-r1 and ensure *,G created") @@ -176,8 +200,8 @@ def test_pim_igmp_report(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - r2 = tgen.gears['r2'] - r1 = tgen.gears['r1'] + r2 = tgen.gears["r2"] + r1 = tgen.gears["r1"] # Let's send a igmp report from r2->r1 CWD = os.path.dirname(os.path.realpath(__file__)) @@ -185,28 +209,28 @@ def test_pim_igmp_report(): out = r1.vtysh_cmd("show ip pim upstream json", isjson=True) expected = { - '229.1.1.2': { - '*': { - 'sourceIgmp': 1, - 'joinState': 'Joined', - 'regState': 'RegNoInfo', - 'sptBit': 0, + "229.1.1.2": { + "*": { + "sourceIgmp": 1, + "joinState": "Joined", + "regState": "RegNoInfo", + "sptBit": 0, } } } - assert topotest.json_cmp(out, expected) is None, 'failed to converge pim' + assert topotest.json_cmp(out, expected) is None, "failed to converge pim" def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/pytest.ini b/tests/topotests/pytest.ini index 62c825341f..ade5bfd501 100644 --- a/tests/topotests/pytest.ini +++ b/tests/topotests/pytest.ini @@ -1,6 +1,6 @@ # Skip pytests example directory [pytest] -norecursedirs = .git example-test example-topojson-test lib docker bgp_rr_ibgp +norecursedirs = .git example-test example-topojson-test lib docker [topogen] # Default configuration values diff --git a/tests/topotests/rip-topo1/r1/rip_status.ref b/tests/topotests/rip-topo1/r1/rip_status.ref index d75fbe85bb..31ad46ab2e 100644 --- a/tests/topotests/rip-topo1/r1/rip_status.ref +++ b/tests/topotests/rip-topo1/r1/rip_status.ref @@ -8,8 +8,14 @@ Routing Protocol is "rip" Default version control: send version 2, receive version 2 Interface Send Recv Key-chain r1-eth1 2 2 + r1-eth2 2 2 + r1-eth3 2 2 Routing for Networks: 193.1.1.0/26 + r1-eth2 + r1-eth3 + Passive Interface(s): + r1-eth3 Routing Information Sources: Gateway BadPackets BadRoutes Distance Last Update 193.1.1.2 0 0 120 XX:XX:XX diff --git a/tests/topotests/rip-topo1/r1/ripd.conf b/tests/topotests/rip-topo1/r1/ripd.conf index 935ec312e5..54f1774214 100644 --- a/tests/topotests/rip-topo1/r1/ripd.conf +++ b/tests/topotests/rip-topo1/r1/ripd.conf @@ -4,6 +4,9 @@ router rip timers basic 5 180 5 version 2 network 193.1.1.0/26 + network r1-eth2 + network r1-eth3 + passive-interface r1-eth3 ! line vty ! diff --git a/tests/topotests/rip-topo1/r1/show_ip_rip.ref b/tests/topotests/rip-topo1/r1/show_ip_rip.ref index 561560f230..a0b77c886e 100644 --- a/tests/topotests/rip-topo1/r1/show_ip_rip.ref +++ b/tests/topotests/rip-topo1/r1/show_ip_rip.ref @@ -6,5 +6,7 @@ Sub-codes: Network Next Hop Metric From Tag Time R(n) 192.168.2.0/24 193.1.1.2 3 193.1.1.2 0 XX:XX R(n) 192.168.3.0/24 193.1.1.2 3 193.1.1.2 0 XX:XX +C(i) 192.168.98.0/24 0.0.0.0 1 self 0 +C(i) 192.168.99.0/24 0.0.0.0 1 self 0 C(i) 193.1.1.0/26 0.0.0.0 1 self 0 R(n) 193.1.2.0/24 193.1.1.2 2 193.1.1.2 0 XX:XX diff --git a/tests/topotests/rip-topo1/r1/show_ip_route.ref b/tests/topotests/rip-topo1/r1/show_ip_route.ref index 62d71f0ab6..2ff26045aa 100644 --- a/tests/topotests/rip-topo1/r1/show_ip_route.ref +++ b/tests/topotests/rip-topo1/r1/show_ip_route.ref @@ -1,3 +1,3 @@ -R>* 192.168.2.0/24 [120/3] via 193.1.1.2, r1-eth1 -R>* 192.168.3.0/24 [120/3] via 193.1.1.2, r1-eth1 -R>* 193.1.2.0/24 [120/2] via 193.1.1.2, r1-eth1 +R>* 192.168.2.0/24 [120/3] via 193.1.1.2, r1-eth1, weight 1 +R>* 192.168.3.0/24 [120/3] via 193.1.1.2, r1-eth1, weight 1 +R>* 193.1.2.0/24 [120/2] via 193.1.1.2, r1-eth1, weight 1 diff --git a/tests/topotests/rip-topo1/r1/zebra.conf b/tests/topotests/rip-topo1/r1/zebra.conf index 8537f6dd80..7c8f2c502b 100644 --- a/tests/topotests/rip-topo1/r1/zebra.conf +++ b/tests/topotests/rip-topo1/r1/zebra.conf @@ -5,6 +5,13 @@ hostname r1 interface r1-eth0 ip address 192.168.1.1/24 ! +interface r1-eth2 + ip address 192.168.99.1/24 +! +interface r1-eth3 + ip address 192.168.98.1/24 +! + interface r1-eth1 description to sw2 - RIPv2 interface ip address 193.1.1.1/26 diff --git a/tests/topotests/rip-topo1/r2/rip_status.ref b/tests/topotests/rip-topo1/r2/rip_status.ref index da1abd041a..99841a62b0 100644 --- a/tests/topotests/rip-topo1/r2/rip_status.ref +++ b/tests/topotests/rip-topo1/r2/rip_status.ref @@ -14,5 +14,6 @@ Routing Protocol is "rip" 193.1.2.0/24 Routing Information Sources: Gateway BadPackets BadRoutes Distance Last Update + 193.1.1.1 0 0 120 XX:XX:XX 193.1.2.2 0 0 120 XX:XX:XX Distance: (default is 120) diff --git a/tests/topotests/rip-topo1/r2/show_ip_rip.ref b/tests/topotests/rip-topo1/r2/show_ip_rip.ref index 58ab052160..b61fb45eac 100644 --- a/tests/topotests/rip-topo1/r2/show_ip_rip.ref +++ b/tests/topotests/rip-topo1/r2/show_ip_rip.ref @@ -6,5 +6,7 @@ Sub-codes: Network Next Hop Metric From Tag Time R(n) 192.168.2.0/24 193.1.2.2 2 193.1.2.2 0 XX:XX R(n) 192.168.3.0/24 193.1.2.2 2 193.1.2.2 0 XX:XX +R(n) 192.168.98.0/24 193.1.1.1 2 193.1.1.1 0 XX:XX +R(n) 192.168.99.0/24 193.1.1.1 2 193.1.1.1 0 XX:XX C(i) 193.1.1.0/26 0.0.0.0 1 self 0 C(i) 193.1.2.0/24 0.0.0.0 1 self 0 diff --git a/tests/topotests/rip-topo1/r2/show_ip_route.ref b/tests/topotests/rip-topo1/r2/show_ip_route.ref index 4b34939aa5..80f51a92c7 100644 --- a/tests/topotests/rip-topo1/r2/show_ip_route.ref +++ b/tests/topotests/rip-topo1/r2/show_ip_route.ref @@ -1,2 +1,4 @@ -R>* 192.168.2.0/24 [120/2] via 193.1.2.2, r2-eth1 -R>* 192.168.3.0/24 [120/2] via 193.1.2.2, r2-eth1 +R>* 192.168.2.0/24 [120/2] via 193.1.2.2, r2-eth1, weight 1 +R>* 192.168.3.0/24 [120/2] via 193.1.2.2, r2-eth1, weight 1 +R>* 192.168.98.0/24 [120/2] via 193.1.1.1, r2-eth0, weight 1 +R>* 192.168.99.0/24 [120/2] via 193.1.1.1, r2-eth0, weight 1 diff --git a/tests/topotests/rip-topo1/r3/show_ip_rip.ref b/tests/topotests/rip-topo1/r3/show_ip_rip.ref index cf672712a8..1df299b5e6 100644 --- a/tests/topotests/rip-topo1/r3/show_ip_rip.ref +++ b/tests/topotests/rip-topo1/r3/show_ip_rip.ref @@ -6,5 +6,7 @@ Sub-codes: Network Next Hop Metric From Tag Time S(r) 192.168.2.0/24 192.168.3.10 1 self 0 C(r) 192.168.3.0/24 0.0.0.0 1 self 0 +R(n) 192.168.98.0/24 193.1.2.1 3 193.1.2.1 0 XX:XX +R(n) 192.168.99.0/24 193.1.2.1 3 193.1.2.1 0 XX:XX R(n) 193.1.1.0/26 193.1.2.1 2 193.1.2.1 0 XX:XX C(i) 193.1.2.0/24 0.0.0.0 1 self 0 diff --git a/tests/topotests/rip-topo1/r3/show_ip_route.ref b/tests/topotests/rip-topo1/r3/show_ip_route.ref index 835e1229c8..2b739f0489 100644 --- a/tests/topotests/rip-topo1/r3/show_ip_route.ref +++ b/tests/topotests/rip-topo1/r3/show_ip_route.ref @@ -1 +1,3 @@ -R>* 193.1.1.0/26 [120/2] via 193.1.2.1, r3-eth1 +R>* 192.168.98.0/24 [120/3] via 193.1.2.1, r3-eth1, weight 1 +R>* 192.168.99.0/24 [120/3] via 193.1.2.1, r3-eth1, weight 1 +R>* 193.1.1.0/26 [120/2] via 193.1.2.1, r3-eth1, weight 1 diff --git a/tests/topotests/rip-topo1/test_rip_topo1.py b/tests/topotests/rip-topo1/test_rip_topo1.py index 8f3c25e910..3098812a24 100755 --- a/tests/topotests/rip-topo1/test_rip_topo1.py +++ b/tests/topotests/rip-topo1/test_rip_topo1.py @@ -54,6 +54,7 @@ fatal_error = "" ## ##################################################### + class NetworkTopo(Topo): "RIP Topology 1" @@ -63,33 +64,38 @@ class NetworkTopo(Topo): router = {} # # Setup Main Router - router[1] = topotest.addRouter(self, 'r1') + router[1] = topotest.addRouter(self, "r1") # # Setup RIP Routers for i in range(2, 4): - router[i] = topotest.addRouter(self, 'r%s' % i) + router[i] = topotest.addRouter(self, "r%s" % i) # # Setup Switches switch = {} # # On main router # First switch is for a dummy interface (for local network) - switch[1] = self.addSwitch('sw1', cls=topotest.LegacySwitch) - self.addLink(switch[1], router[1], intfName2='r1-eth0') + switch[1] = self.addSwitch("sw1", cls=topotest.LegacySwitch) + self.addLink(switch[1], router[1], intfName2="r1-eth0") # # Switches for RIP # switch 2 switch is for connection to RIP router - switch[2] = self.addSwitch('sw2', cls=topotest.LegacySwitch) - self.addLink(switch[2], router[1], intfName2='r1-eth1') - self.addLink(switch[2], router[2], intfName2='r2-eth0') + switch[2] = self.addSwitch("sw2", cls=topotest.LegacySwitch) + self.addLink(switch[2], router[1], intfName2="r1-eth1") + self.addLink(switch[2], router[2], intfName2="r2-eth0") # switch 3 is between RIP routers - switch[3] = self.addSwitch('sw3', cls=topotest.LegacySwitch) - self.addLink(switch[3], router[2], intfName2='r2-eth1') - self.addLink(switch[3], router[3], intfName2='r3-eth1') + switch[3] = self.addSwitch("sw3", cls=topotest.LegacySwitch) + self.addLink(switch[3], router[2], intfName2="r2-eth1") + self.addLink(switch[3], router[3], intfName2="r3-eth1") # switch 4 is stub on remote RIP router - switch[4] = self.addSwitch('sw4', cls=topotest.LegacySwitch) - self.addLink(switch[4], router[3], intfName2='r3-eth0') + switch[4] = self.addSwitch("sw4", cls=topotest.LegacySwitch) + self.addLink(switch[4], router[3], intfName2="r3-eth0") + + switch[5] = self.addSwitch("sw5", cls=topotest.LegacySwitch) + self.addLink(switch[5], router[1], intfName2="r1-eth2") + switch[6] = self.addSwitch("sw6", cls=topotest.LegacySwitch) + self.addLink(switch[6], router[1], intfName2="r1-eth3") ##################################################### @@ -98,6 +104,7 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(module): global topo, net @@ -105,7 +112,7 @@ def setup_module(module): print("******************************************\n") print("Cleanup old Mininet runs") - os.system('sudo mn -c > /dev/null 2>&1') + os.system("sudo mn -c > /dev/null 2>&1") thisDir = os.path.dirname(os.path.realpath(__file__)) topo = NetworkTopo() @@ -116,9 +123,9 @@ def setup_module(module): # Starting Routers # for i in range(1, 4): - net['r%s' % i].loadConf('zebra', '%s/r%s/zebra.conf' % (thisDir, i)) - net['r%s' % i].loadConf('ripd', '%s/r%s/ripd.conf' % (thisDir, i)) - net['r%s' % i].startRouter() + net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i)) + net["r%s" % i].loadConf("ripd", "%s/r%s/ripd.conf" % (thisDir, i)) + net["r%s" % i].startRouter() # For debugging after starting Quagga/FRR daemons, uncomment the next line # CLI(net) @@ -139,7 +146,7 @@ def test_router_running(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) print("\n\n** Check if FRR/Quagga is running on each Router node") @@ -147,7 +154,7 @@ def test_router_running(): # Make sure that all daemons are running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -159,7 +166,7 @@ def test_converge_protocols(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -172,7 +179,7 @@ def test_converge_protocols(): # Make sure that all daemons are still running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -184,7 +191,7 @@ def test_rip_status(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -194,30 +201,37 @@ def test_rip_status(): print("******************************************\n") failures = 0 for i in range(1, 4): - refTableFile = '%s/r%s/rip_status.ref' % (thisDir, i) + refTableFile = "%s/r%s/rip_status.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ip rip status" 2> /dev/null').rstrip() - # Drop time in next due + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show ip rip status" 2> /dev/null') + .rstrip() + ) + # Drop time in next due actual = re.sub(r"in [0-9]+ seconds", "in XX seconds", actual) # Drop time in last update actual = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual IP RIP status", - title2="expected IP RIP status") + title2="expected IP RIP status", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed IP RIP status check:\n%s\n' % (i, diff)) + sys.stderr.write("r%s failed IP RIP status check:\n%s\n" % (i, diff)) failures += 1 else: print("r%s ok" % i) @@ -226,7 +240,7 @@ def test_rip_status(): # Make sure that all daemons are still running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -238,7 +252,7 @@ def test_rip_routes(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -248,28 +262,31 @@ def test_rip_routes(): print("******************************************\n") failures = 0 for i in range(1, 4): - refTableFile = '%s/r%s/show_ip_rip.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_ip_rip.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ip rip" 2> /dev/null').rstrip() + actual = net["r%s" % i].cmd('vtysh -c "show ip rip" 2> /dev/null').rstrip() # Drop Time actual = re.sub(r"[0-9][0-9]:[0-5][0-9]", "XX:XX", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual SHOW IP RIP", - title2="expected SHOW IP RIP") + title2="expected SHOW IP RIP", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed SHOW IP RIP check:\n%s\n' % (i, diff)) + sys.stderr.write("r%s failed SHOW IP RIP check:\n%s\n" % (i, diff)) failures += 1 else: print("r%s ok" % i) @@ -278,7 +295,7 @@ def test_rip_routes(): # Make sure that all daemons are still running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -290,7 +307,7 @@ def test_zebra_ipv4_routingTable(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -300,37 +317,49 @@ def test_zebra_ipv4_routingTable(): print("******************************************\n") failures = 0 for i in range(1, 4): - refTableFile = '%s/r%s/show_ip_route.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_ip_route.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ip route" 2> /dev/null | grep "^R"').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show ip route" 2> /dev/null | grep "^R"') + .rstrip() + ) # Drop timers on end of line (older Quagga Versions) actual = re.sub(r", [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", "", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual Zebra IPv4 routing table", - title2="expected Zebra IPv4 routing table") + title2="expected Zebra IPv4 routing table", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed Zebra IPv4 Routing Table Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed Zebra IPv4 Routing Table Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "Zebra IPv4 Routing Table verification failed for router r%s:\n%s" % (i, diff) + assert failures == 0, ( + "Zebra IPv4 Routing Table verification failed for router r%s:\n%s" + % (i, diff) + ) # Make sure that all daemons are still running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -342,30 +371,30 @@ def test_shutdown_check_stderr(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - pytest.skip('Skipping test for Stderr output and memory leaks') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + pytest.skip("Skipping test for Stderr output and memory leaks") thisDir = os.path.dirname(os.path.realpath(__file__)) print("\n\n** Verifing unexpected STDERR output from daemons") print("******************************************\n") - net['r1'].stopRouter() + net["r1"].stopRouter() - log = net['r1'].getStdErr('ripd') + log = net["r1"].getStdErr("ripd") if log: print("\nRIPd StdErr Log:\n" + log) - log = net['r1'].getStdErr('zebra') + log = net["r1"].getStdErr("zebra") if log: print("\nZebra StdErr Log:\n" + log) -if __name__ == '__main__': +if __name__ == "__main__": - setLogLevel('info') + setLogLevel("info") # To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli # retval = pytest.main(["-s", "--tb=no"]) retval = pytest.main(["-s"]) diff --git a/tests/topotests/ripng-topo1/r1/ripng_status.ref b/tests/topotests/ripng-topo1/r1/ripng_status.ref index e6197f179b..b02cc69d0e 100644 --- a/tests/topotests/ripng-topo1/r1/ripng_status.ref +++ b/tests/topotests/ripng-topo1/r1/ripng_status.ref @@ -8,8 +8,12 @@ Routing Protocol is "RIPng" Default version control: send version 1, receive version 1 Interface Send Recv r1-eth1 1 1 + r1-eth2 1 1 + r1-eth3 1 1 Routing for Networks: fc00:5::/64 + r1-eth2 + r1-eth3 Routing Information Sources: Gateway BadPackets BadRoutes Distance Last Update fe80::XXXX:XXXX:XXXX:XXXX diff --git a/tests/topotests/ripng-topo1/r1/ripngd.conf b/tests/topotests/ripng-topo1/r1/ripngd.conf index dd54c43557..07ed7296d9 100644 --- a/tests/topotests/ripng-topo1/r1/ripngd.conf +++ b/tests/topotests/ripng-topo1/r1/ripngd.conf @@ -7,6 +7,9 @@ debug ripng zebra router ripng timers basic 5 180 5 network fc00:5::/64 + network r1-eth2 + network r1-eth3 + passive-interface r1-eth3 ! line vty ! diff --git a/tests/topotests/ripng-topo1/r1/show_ipv6_ripng.ref b/tests/topotests/ripng-topo1/r1/show_ipv6_ripng.ref index 18d026a8fd..30d0f31e18 100644 --- a/tests/topotests/ripng-topo1/r1/show_ipv6_ripng.ref +++ b/tests/topotests/ripng-topo1/r1/show_ipv6_ripng.ref @@ -12,3 +12,7 @@ R(n) fc00:7::/64 fe80::XXXX:XXXX:XXXX:XXXX r1-eth1 3 0 XX:XX R(n) fc00:7:1111::/64 fe80::XXXX:XXXX:XXXX:XXXX r1-eth1 3 0 XX:XX +C(i) fc00:98:0:1::/64 + :: self 1 0 +C(i) fc00:99:0:1::/64 + :: self 1 0 diff --git a/tests/topotests/ripng-topo1/r1/show_ipv6_route.ref b/tests/topotests/ripng-topo1/r1/show_ipv6_route.ref index 7e5fc3f0f5..55fbbc34f3 100644 --- a/tests/topotests/ripng-topo1/r1/show_ipv6_route.ref +++ b/tests/topotests/ripng-topo1/r1/show_ipv6_route.ref @@ -1,3 +1,3 @@ -R>* fc00:6::/62 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1 -R>* fc00:7::/64 [120/3] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1 -R>* fc00:7:1111::/64 [120/3] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1 +R>* fc00:6::/62 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1, weight 1 +R>* fc00:7::/64 [120/3] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1, weight 1 +R>* fc00:7:1111::/64 [120/3] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1, weight 1 diff --git a/tests/topotests/ripng-topo1/r1/zebra.conf b/tests/topotests/ripng-topo1/r1/zebra.conf index 1a10343044..11c1cdc5b9 100644 --- a/tests/topotests/ripng-topo1/r1/zebra.conf +++ b/tests/topotests/ripng-topo1/r1/zebra.conf @@ -10,6 +10,12 @@ interface r1-eth1 ipv6 address fc00:5::1/64 no link-detect ! +interface r1-eth2 + ipv6 address fc00:99:0:1::1/64 +! +interface r1-eth3 + ipv6 address fc00:98:0:1::1/64 +! ip forwarding ipv6 forwarding ! diff --git a/tests/topotests/ripng-topo1/r2/show_ipv6_ripng.ref b/tests/topotests/ripng-topo1/r2/show_ipv6_ripng.ref index 765efd07a2..fe5bcc8b31 100644 --- a/tests/topotests/ripng-topo1/r2/show_ipv6_ripng.ref +++ b/tests/topotests/ripng-topo1/r2/show_ipv6_ripng.ref @@ -12,3 +12,7 @@ R(n) fc00:7::/64 fe80::XXXX:XXXX:XXXX:XXXX r2-eth1 2 0 XX:XX R(n) fc00:7:1111::/64 fe80::XXXX:XXXX:XXXX:XXXX r2-eth1 2 0 XX:XX +R(n) fc00:98:0:1::/64 + fe80::XXXX:XXXX:XXXX:XXXX r2-eth0 2 0 XX:XX +R(n) fc00:99:0:1::/64 + fe80::XXXX:XXXX:XXXX:XXXX r2-eth0 2 0 XX:XX diff --git a/tests/topotests/ripng-topo1/r2/show_ipv6_route.ref b/tests/topotests/ripng-topo1/r2/show_ipv6_route.ref index 688e77e7ed..72e1f926a2 100644 --- a/tests/topotests/ripng-topo1/r2/show_ipv6_route.ref +++ b/tests/topotests/ripng-topo1/r2/show_ipv6_route.ref @@ -1,2 +1,4 @@ -R>* fc00:7::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r2-eth1 -R>* fc00:7:1111::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r2-eth1 +R>* fc00:7::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r2-eth1, weight 1 +R>* fc00:7:1111::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r2-eth1, weight 1 +R>* fc00:98:0:1::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r2-eth0, weight 1 +R>* fc00:99:0:1::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r2-eth0, weight 1 diff --git a/tests/topotests/ripng-topo1/r3/show_ipv6_ripng.ref b/tests/topotests/ripng-topo1/r3/show_ipv6_ripng.ref index 81e76b97a6..909ad663ba 100644 --- a/tests/topotests/ripng-topo1/r3/show_ipv6_ripng.ref +++ b/tests/topotests/ripng-topo1/r3/show_ipv6_ripng.ref @@ -12,3 +12,7 @@ C(r) fc00:7::/64 :: self 1 0 S(r) fc00:7:1111::/64 :: self 1 0 +R(n) fc00:98:0:1::/64 + fe80::XXXX:XXXX:XXXX:XXXX r3-eth1 3 0 XX:XX +R(n) fc00:99:0:1::/64 + fe80::XXXX:XXXX:XXXX:XXXX r3-eth1 3 0 XX:XX diff --git a/tests/topotests/ripng-topo1/r3/show_ipv6_route.ref b/tests/topotests/ripng-topo1/r3/show_ipv6_route.ref index 8e46e39921..25a7440111 100644 --- a/tests/topotests/ripng-topo1/r3/show_ipv6_route.ref +++ b/tests/topotests/ripng-topo1/r3/show_ipv6_route.ref @@ -1 +1,3 @@ -R>* fc00:5::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r3-eth1 +R>* fc00:5::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r3-eth1, weight 1 +R>* fc00:98:0:1::/64 [120/3] via fe80::XXXX:XXXX:XXXX:XXXX, r3-eth1, weight 1 +R>* fc00:99:0:1::/64 [120/3] via fe80::XXXX:XXXX:XXXX:XXXX, r3-eth1, weight 1 diff --git a/tests/topotests/ripng-topo1/test_ripng_topo1.py b/tests/topotests/ripng-topo1/test_ripng_topo1.py index 32b137240c..23e689235c 100755 --- a/tests/topotests/ripng-topo1/test_ripng_topo1.py +++ b/tests/topotests/ripng-topo1/test_ripng_topo1.py @@ -55,6 +55,7 @@ fatal_error = "" ## ##################################################### + class NetworkTopo(Topo): "RIPng Topology 1" @@ -64,33 +65,37 @@ class NetworkTopo(Topo): router = {} # # Setup Main Router - router[1] = topotest.addRouter(self, 'r1') + router[1] = topotest.addRouter(self, "r1") # # Setup RIPng Routers for i in range(2, 4): - router[i] = topotest.addRouter(self, 'r%s' % i) + router[i] = topotest.addRouter(self, "r%s" % i) # Setup Switches switch = {} # # On main router # First switch is for a dummy interface (for local network) - switch[1] = self.addSwitch('sw1', cls=topotest.LegacySwitch) - self.addLink(switch[1], router[1], intfName2='r1-eth0') + switch[1] = self.addSwitch("sw1", cls=topotest.LegacySwitch) + self.addLink(switch[1], router[1], intfName2="r1-eth0") # # Switches for RIPng # switch 2 switch is for connection to RIP router - switch[2] = self.addSwitch('sw2', cls=topotest.LegacySwitch) - self.addLink(switch[2], router[1], intfName2='r1-eth1') - self.addLink(switch[2], router[2], intfName2='r2-eth0') + switch[2] = self.addSwitch("sw2", cls=topotest.LegacySwitch) + self.addLink(switch[2], router[1], intfName2="r1-eth1") + self.addLink(switch[2], router[2], intfName2="r2-eth0") # switch 3 is between RIP routers - switch[3] = self.addSwitch('sw3', cls=topotest.LegacySwitch) - self.addLink(switch[3], router[2], intfName2='r2-eth1') - self.addLink(switch[3], router[3], intfName2='r3-eth1') + switch[3] = self.addSwitch("sw3", cls=topotest.LegacySwitch) + self.addLink(switch[3], router[2], intfName2="r2-eth1") + self.addLink(switch[3], router[3], intfName2="r3-eth1") # switch 4 is stub on remote RIP router - switch[4] = self.addSwitch('sw4', cls=topotest.LegacySwitch) - self.addLink(switch[4], router[3], intfName2='r3-eth0') + switch[4] = self.addSwitch("sw4", cls=topotest.LegacySwitch) + self.addLink(switch[4], router[3], intfName2="r3-eth0") + switch[5] = self.addSwitch("sw5", cls=topotest.LegacySwitch) + self.addLink(switch[5], router[1], intfName2="r1-eth2") + switch[6] = self.addSwitch("sw6", cls=topotest.LegacySwitch) + self.addLink(switch[6], router[1], intfName2="r1-eth3") ##################################################### @@ -99,6 +104,7 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(module): global topo, net @@ -106,7 +112,7 @@ def setup_module(module): print("******************************************\n") print("Cleanup old Mininet runs") - os.system('sudo mn -c > /dev/null 2>&1') + os.system("sudo mn -c > /dev/null 2>&1") thisDir = os.path.dirname(os.path.realpath(__file__)) topo = NetworkTopo() @@ -117,9 +123,9 @@ def setup_module(module): # Starting Routers # for i in range(1, 4): - net['r%s' % i].loadConf('zebra', '%s/r%s/zebra.conf' % (thisDir, i)) - net['r%s' % i].loadConf('ripngd', '%s/r%s/ripngd.conf' % (thisDir, i)) - net['r%s' % i].startRouter() + net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i)) + net["r%s" % i].loadConf("ripngd", "%s/r%s/ripngd.conf" % (thisDir, i)) + net["r%s" % i].startRouter() # For debugging after starting Quagga/FRR daemons, uncomment the next line # CLI(net) @@ -140,7 +146,7 @@ def test_router_running(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) print("\n\n** Check if FRR/Quagga is running on each Router node") @@ -148,7 +154,7 @@ def test_router_running(): # Starting Routers for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -160,7 +166,7 @@ def test_converge_protocols(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -173,11 +179,11 @@ def test_converge_protocols(): # Make sure that all daemons are running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line - #CLI(net) + # CLI(net) def test_ripng_status(): @@ -185,7 +191,7 @@ def test_ripng_status(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -195,41 +201,53 @@ def test_ripng_status(): print("******************************************\n") failures = 0 for i in range(1, 4): - refTableFile = '%s/r%s/ripng_status.ref' % (thisDir, i) + refTableFile = "%s/r%s/ripng_status.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ipv6 ripng status" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show ipv6 ripng status" 2> /dev/null') + .rstrip() + ) # Mask out Link-Local mac address portion. They are random... actual = re.sub(r" fe80::[0-9a-f:]+", " fe80::XXXX:XXXX:XXXX:XXXX", actual) - # Drop time in next due + # Drop time in next due actual = re.sub(r"in [0-9]+ seconds", "in XX seconds", actual) # Drop time in last update actual = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual IPv6 RIPng status", - title2="expected IPv6 RIPng status") + title2="expected IPv6 RIPng status", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed IPv6 RIPng status check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed IPv6 RIPng status check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "IPv6 RIPng status failed for router r%s:\n%s" % (i, diff) + assert failures == 0, "IPv6 RIPng status failed for router r%s:\n%s" % ( + i, + diff, + ) # Make sure that all daemons are running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -241,7 +259,7 @@ def test_ripng_routes(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -251,42 +269,52 @@ def test_ripng_routes(): print("******************************************\n") failures = 0 for i in range(1, 4): - refTableFile = '%s/r%s/show_ipv6_ripng.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_ipv6_ripng.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ipv6 ripng" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i].cmd('vtysh -c "show ipv6 ripng" 2> /dev/null').rstrip() + ) # Drop Time actual = re.sub(r" [0-9][0-9]:[0-5][0-9]", " XX:XX", actual) # Mask out Link-Local mac address portion. They are random... - actual = re.sub(r" fe80::[0-9a-f: ]+", " fe80::XXXX:XXXX:XXXX:XXXX ", actual) + actual = re.sub( + r" fe80::[0-9a-f: ]+", " fe80::XXXX:XXXX:XXXX:XXXX ", actual + ) # Remove trailing spaces on all lines - actual = '\n'.join([line.rstrip() for line in actual.splitlines()]) + actual = "\n".join([line.rstrip() for line in actual.splitlines()]) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual SHOW IPv6 RIPng", - title2="expected SHOW IPv6 RIPng") + title2="expected SHOW IPv6 RIPng", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed SHOW IPv6 RIPng check:\n%s\n' % (i, diff)) + sys.stderr.write("r%s failed SHOW IPv6 RIPng check:\n%s\n" % (i, diff)) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "SHOW IPv6 RIPng failed for router r%s:\n%s" % (i, diff) + assert failures == 0, "SHOW IPv6 RIPng failed for router r%s:\n%s" % ( + i, + diff, + ) # Make sure that all daemons are running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -298,7 +326,7 @@ def test_zebra_ipv6_routingTable(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -308,39 +336,51 @@ def test_zebra_ipv6_routingTable(): print("******************************************\n") failures = 0 for i in range(1, 4): - refTableFile = '%s/r%s/show_ipv6_route.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_ipv6_route.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ipv6 route" 2> /dev/null | grep "^R"').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show ipv6 route" 2> /dev/null | grep "^R"') + .rstrip() + ) # Mask out Link-Local mac address portion. They are random... actual = re.sub(r" fe80::[0-9a-f:]+", " fe80::XXXX:XXXX:XXXX:XXXX", actual) # Drop timers on end of line (older Quagga Versions) actual = re.sub(r", [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", "", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual Zebra IPv6 routing table", - title2="expected Zebra IPv6 routing table") + title2="expected Zebra IPv6 routing table", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed Zebra IPv6 Routing Table Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed Zebra IPv6 Routing Table Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "Zebra IPv6 Routing Table verification failed for router r%s:\n%s" % (i, diff) + assert failures == 0, ( + "Zebra IPv6 Routing Table verification failed for router r%s:\n%s" + % (i, diff) + ) # Make sure that all daemons are running for i in range(1, 4): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR/Quagga daemons, uncomment the next line @@ -352,24 +392,26 @@ def test_shutdown_check_stderr(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - print("SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n") - pytest.skip('Skipping test for Stderr output') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + print( + "SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n" + ) + pytest.skip("Skipping test for Stderr output") thisDir = os.path.dirname(os.path.realpath(__file__)) print("\n\n** Verifying unexpected STDERR output from daemons") print("******************************************\n") - net['r1'].stopRouter() + net["r1"].stopRouter() - log = net['r1'].getStdErr('ripngd') + log = net["r1"].getStdErr("ripngd") if log: print("\nRIPngd StdErr Log:\n" + log) - log = net['r1'].getStdErr('zebra') + log = net["r1"].getStdErr("zebra") if log: print("\nZebra StdErr Log:\n" + log) @@ -379,22 +421,26 @@ def test_shutdown_check_memleak(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) - if os.environ.get('TOPOTESTS_CHECK_MEMLEAK') is None: - print("SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n") - pytest.skip('Skipping test for memory leaks') - + if os.environ.get("TOPOTESTS_CHECK_MEMLEAK") is None: + print( + "SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n" + ) + pytest.skip("Skipping test for memory leaks") + thisDir = os.path.dirname(os.path.realpath(__file__)) - net['r1'].stopRouter() - net['r1'].report_memory_leaks(os.environ.get('TOPOTESTS_CHECK_MEMLEAK'), os.path.basename(__file__)) + net["r1"].stopRouter() + net["r1"].report_memory_leaks( + os.environ.get("TOPOTESTS_CHECK_MEMLEAK"), os.path.basename(__file__) + ) -if __name__ == '__main__': +if __name__ == "__main__": - setLogLevel('info') + setLogLevel("info") # To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli # retval = pytest.main(["-s", "--tb=no"]) retval = pytest.main(["-s"]) diff --git a/tests/topotests/zebra_rib/test_zebra_rib.py b/tests/topotests/zebra_rib/test_zebra_rib.py index d73f613f95..17eb736cab 100755 --- a/tests/topotests/zebra_rib/test_zebra_rib.py +++ b/tests/topotests/zebra_rib/test_zebra_rib.py @@ -34,7 +34,7 @@ import json # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -45,24 +45,27 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class ZebraTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) - tgen.add_router('r1') + tgen.add_router("r1") # Create a empty network for router 1 - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r1']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r1"]) + def setup_module(mod): "Sets up the pytest environment" @@ -72,78 +75,86 @@ def setup_module(mod): router_list = tgen.routers() for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) # Initialize all routers. tgen.start_router() + def teardown_module(mod): "Teardown the pytest environment" tgen = get_topogen() tgen.stop_topology() + def test_zebra_kernel_admin_distance(): "Test some basic kernel routes added that should be accepted" logger.info("Test some basic kernel routes that should be accepted") tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") - r1 = tgen.gears['r1'] + r1 = tgen.gears["r1"] # Route with 255/8192 metric - r1.run('ip route add 4.5.1.0/24 via 192.168.210.2 dev r1-eth0 metric 4278198272') + r1.run("ip route add 4.5.1.0/24 via 192.168.210.2 dev r1-eth0 metric 4278198272") # Route with 1/1 metric - r1.run('ip route add 4.5.2.0/24 via 192.168.211.2 dev r1-eth1 metric 16777217') + r1.run("ip route add 4.5.2.0/24 via 192.168.211.2 dev r1-eth1 metric 16777217") # Route with 10/1 metric - r1.run('ip route add 4.5.3.0/24 via 192.168.212.2 dev r1-eth2 metric 167772161') + r1.run("ip route add 4.5.3.0/24 via 192.168.212.2 dev r1-eth2 metric 167772161") # Same route with a 160/1 metric - r1.run('ip route add 4.5.3.0/24 via 192.168.213.2 dev r1-eth3 metric 2684354561') + r1.run("ip route add 4.5.3.0/24 via 192.168.213.2 dev r1-eth3 metric 2684354561") - #Currently I believe we have a bug here with the same route and different - #metric. That needs to be properly resolved. Making a note for - #coming back around later and fixing this. - #tgen.mininet_cli() + # Currently I believe we have a bug here with the same route and different + # metric. That needs to be properly resolved. Making a note for + # coming back around later and fixing this. + # tgen.mininet_cli() for i in range(1, 2): - json_file = '{}/r1/v4_route_{}.json'.format(CWD, i) + json_file = "{}/r1/v4_route_{}.json".format(CWD, i) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, - 'show ip route 4.5.{}.0 json'.format(i), - expected) - _, result = topotest.run_and_expect(test_func, None, count=2, wait=.5) + test_func = partial( + topotest.router_json_cmp, + r1, + "show ip route 4.5.{}.0 json".format(i), + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5) assertmsg = '"r1" JSON output mismatches' assert result is None, assertmsg - #tgen.mininet_cli() + # tgen.mininet_cli() + def test_zebra_kernel_override(): "Test that a FRR route with a lower admin distance takes over" logger.info("Test kernel override with a better admin distance") tgen = get_topogen() - if (tgen.routers_have_failure()): + if tgen.routers_have_failure(): ptyest.skip("skipped because of preview test failure") - r1 = tgen.gears['r1'] + r1 = tgen.gears["r1"] r1.vtysh_cmd("conf\nip route 4.5.1.0/24 192.168.216.3") - json_file = '{}/r1/v4_route_1_static_override.json'.format(CWD) + json_file = "{}/r1/v4_route_1_static_override.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show ip route 4.5.1.0 json', expected) - _, result = topotest.run_and_expect(test_func, None, count=2, wait=.5) + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 4.5.1.0 json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5) assert result is None, '"r1" JSON output mismatches' - logger.info("Test that the removal of the static route allows the kernel to take back over") + logger.info( + "Test that the removal of the static route allows the kernel to take back over" + ) r1.vtysh_cmd("conf\nno ip route 4.5.1.0/24 192.168.216.3") - json_file = '{}/r1/v4_route_1.json'.format(CWD) + json_file = "{}/r1/v4_route_1.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show ip route 4.5.1.0 json', expected) - _, result = topotest.run_and_expect(test_func, None, count=2, wait=.5) + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 4.5.1.0 json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5) assert result is None, '"r1" JSON output mismatches' @@ -151,10 +162,11 @@ def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tools/coccinelle/cast_to_larger_sizes.cocci b/tools/coccinelle/cast_to_larger_sizes.cocci new file mode 100644 index 0000000000..d97e1f9c33 --- /dev/null +++ b/tools/coccinelle/cast_to_larger_sizes.cocci @@ -0,0 +1,20 @@ +// spatch -sp_file tools/coccinelle/cast_to_larger_sizes.cocci --recursive-includes ./ + +@r@ +typedef uint8_t; +typedef uint16_t; +typedef uint32_t; +typedef uint64_t; +uint8_t *i8; +position p; +@@ + + \( + (uint64_t *) i8@p\|(uint32_t *) i8@p\|(uint16_t *) i8@p + \) + +@script:python@ +p << r.p; +@@ + +coccilib.report.print_report(p[0],"Bad typecast to larger size") diff --git a/tools/coccinelle/int_to_bool_function.cocci b/tools/coccinelle/int_to_bool_function.cocci new file mode 100644 index 0000000000..f86fe70be2 --- /dev/null +++ b/tools/coccinelle/int_to_bool_function.cocci @@ -0,0 +1,24 @@ +@@ +identifier fn; +typedef bool; +symbol false; +symbol true; +identifier I; +struct thread *thread; +@@ + +- int ++ bool +fn (...) +{ +... when strict + when != I = THREAD_ARG(thread); +( +- return 0; ++ return false; +| +- return 1; ++ return true; +) +?... +} diff --git a/tools/coccinelle/same_type_casting.cocci b/tools/coccinelle/same_type_casting.cocci new file mode 100644 index 0000000000..58fd7569af --- /dev/null +++ b/tools/coccinelle/same_type_casting.cocci @@ -0,0 +1,7 @@ +@@ +type T; +T *ptr; +@@ + +- (T *)ptr ++ ptr diff --git a/tools/gcc-plugins/.gitignore b/tools/gcc-plugins/.gitignore new file mode 100644 index 0000000000..dd8d0cb7ee --- /dev/null +++ b/tools/gcc-plugins/.gitignore @@ -0,0 +1,7 @@ +*.so +*.o +debian/.debhelper +debian/files +debian/*.substvars +debian/gcc-9-frr-plugin +!gcc-retain-typeinfo.patch diff --git a/tools/gcc-plugins/COPYING.GPLv3 b/tools/gcc-plugins/COPYING.GPLv3 new file mode 100644 index 0000000000..94a9ed024d --- /dev/null +++ b/tools/gcc-plugins/COPYING.GPLv3 @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + <program> Copyright (C) <year> <name of author> + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +<http://www.gnu.org/licenses/>. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +<http://www.gnu.org/philosophy/why-not-lgpl.html>. diff --git a/tools/gcc-plugins/Makefile b/tools/gcc-plugins/Makefile new file mode 100644 index 0000000000..d6edd745ce --- /dev/null +++ b/tools/gcc-plugins/Makefile @@ -0,0 +1,19 @@ +all: frr-format.so + +CXX=g++-9 + +PLUGBASE=`$(CXX) -print-file-name=plugin` +CPPFLAGS=-I$(PLUGBASE)/include -I$(PLUGBASE)/include/c-family + +frr-format.so: frr-format.o + $(CXX) -g -shared -o $@ $^ + +frr-format.o: frr-format.c gcc-common.h + $(CXX) -g $(CPPFLAGS) -fPIC -Wall -Wextra -Wno-unused-parameter -c -o $@ $< + +install: + install -d $(DESTDIR)$(PLUGBASE) + install frr-format.so $(DESTDIR)$(PLUGBASE) + +clean: + rm -f frr-format.so frr-format.o diff --git a/tools/gcc-plugins/README.md b/tools/gcc-plugins/README.md new file mode 100644 index 0000000000..94a9635e76 --- /dev/null +++ b/tools/gcc-plugins/README.md @@ -0,0 +1,99 @@ +frr-format GCC plugin +===================== + +Context +------- + +This plugin provides improved type checking for Linux kernel style printf +extensions (i.e. `%pI4` printing `struct in_addr *` as `1.2.3.4`.) + +Other than additional warnings, (non-)usage of this plugin should not affect +the build outcome. It is perfectly fine to build FRR without this plugin. + + +Binary Debian packages +---------------------- + +Can be found at [https://deb.nox.tf/devel/]. + + +GCC requirements +---------------- + +To use this plugin, you need a **patched 9.3.0** version of GCC using the +[gcc-retain-typeinfo.patch] provided in this repo. Without this patch, GCC +strips type information too early during compilation, leaving to the plugin +being unable to perform more meaningful type checks. (Specifically, all +`typedef` types will be "cooked down" to their final type.) + +(@eqvinox has discussed this one-line diff with some GCC people on their +IRC channel around mid 2019, the consensus was that the line is an "early +optimization" and removing it should not be harmful. However, doing so is +likely to break GCC's unit tests since warnings would print different types.) + +Other versions of gcc are not supported. gcc 8 previously did work but isn't +actively tested/maintained. gcc 10 is not supported yet but may work. + + +Usage +----- + +First, all plugin-specific statements should be wrapped by an ifdef: + +``` +#ifdef _FRR_ATTRIBUTE_PRINTFRR +... +#endif +``` + +`_FRR_ATTRIBUTE_PRINTFRR` will be defined to the plugin's version (currently +0x10000) whenever the plugin is loaded. + +Then, annotate extended printf functions with the `frr_format` attribute. +This works exactly like the `format` attribute: + +``` +int printfn(const char *fmt, ...) __attribute__((frr_format("frr_printf", 1, 2))); +``` + +In the FRR codebase, use the `PRINTFRR` macro provided in +[../../lib/compiler.h]. + +Lastly, "declare" extensions with `#pragma FRR printfrr_ext`: +``` +#ifdef _FRR_ATTRIBUTE_PRINTFRR +#pragma FRR printfrr_ext "%pI4" (struct in_addr *) +#pragma FRR printfrr_ext "%pI4" (in_addr_t *) +#endif +``` + +Note that you can use multiple such lines if a particular extended printer +works for more than one type (as seen above.) + +The pragma type "parameter" looks like a C cast but unfortunately due to GCC +not exporting a good interface to proper type parsing, it is "ghetto parsed", +with only `struct`, `union`, `enum` being properly supported. `const` is +ignored if it occurs as the first token. (The plugin always accepts `const` +parameters for printf since printf shouldn't change the passed data it's +printing.) The last token may be zero or more counts of `*`, note that +qualifiers on the intermediate pointers (e.g. `const char * const *`) are not +supported. + + +TODOs and future direction +-------------------------- + +* support two-parameter extension printers that use the precision field + (e.g. `"%.*pI5" (int af, void *addr)` to print an IP address with the + address family in the "precision". + +* port to future GCC versions + +* get the one-liner patch upstreamed + + +License +------- + +This plugin is **derivative of GCC 9.x**. It was created by copying off +`c-format.c`. It must therefore adhere to GCC's GPLv3+ license. diff --git a/tools/gcc-plugins/debian/changelog b/tools/gcc-plugins/debian/changelog new file mode 100644 index 0000000000..62bbbcd46f --- /dev/null +++ b/tools/gcc-plugins/debian/changelog @@ -0,0 +1,5 @@ +gcc-frr-plugin (9.3.0d8+equi2) unstable; urgency=medium + + * package created (+equi1 used during development, never released.) + + -- David Lamparter <equinox-debian@diac24.net> Sun, 29 Mar 2020 08:32:24 +0200 diff --git a/tools/gcc-plugins/debian/compat b/tools/gcc-plugins/debian/compat new file mode 100644 index 0000000000..48082f72f0 --- /dev/null +++ b/tools/gcc-plugins/debian/compat @@ -0,0 +1 @@ +12 diff --git a/tools/gcc-plugins/debian/control b/tools/gcc-plugins/debian/control new file mode 100644 index 0000000000..6a9b886bef --- /dev/null +++ b/tools/gcc-plugins/debian/control @@ -0,0 +1,19 @@ +Source: gcc-frr-plugin +Section: devel +Priority: optional +Maintainer: David Lamparter <equinox-debian@diac24.net> +Build-Depends: + gcc-9-plugin-dev (=9.3.0-8+equi1), + debhelper (>= 12) +Standards-Version: 4.4.1 +Homepage: https://www.frrouting.org/ +Vcs-Browser: https://github.com/FRRouting/frr/ +Vcs-Git: https://github.com/FRRouting/frr.git + +Package: gcc-9-frr-plugin +Architecture: linux-any +Depends: + gcc-9 (=9.3.0-8+equi1), + ${misc:Depends}, + ${shlibs:Depends} +Description: GCC plugin for FRRouting diff --git a/tools/gcc-plugins/debian/copyright b/tools/gcc-plugins/debian/copyright new file mode 100644 index 0000000000..dcd9fa1770 --- /dev/null +++ b/tools/gcc-plugins/debian/copyright @@ -0,0 +1,9 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: FRR +Upstream-Contact: maintainers@frrouting.org, security@frrouting.org +Source: https://www.frrouting.org/ + +Files: * +Copyright: + 2019-2020 by David Lamparter + Code derived from GCC, please refer to gcc package copyright. diff --git a/tools/gcc-plugins/debian/rules b/tools/gcc-plugins/debian/rules new file mode 100755 index 0000000000..f8f42ad337 --- /dev/null +++ b/tools/gcc-plugins/debian/rules @@ -0,0 +1,11 @@ +#!/usr/bin/make -f + +# standard Debian options & profiles + +export DEB_BUILD_MAINT_OPTIONS = hardening=+all + +%: + dh $@ + +override_dh_auto_test: + true diff --git a/tools/gcc-plugins/debian/source/format b/tools/gcc-plugins/debian/source/format new file mode 100644 index 0000000000..af745b310b --- /dev/null +++ b/tools/gcc-plugins/debian/source/format @@ -0,0 +1 @@ +3.0 (git) diff --git a/tools/gcc-plugins/format-test.c b/tools/gcc-plugins/format-test.c new file mode 100644 index 0000000000..b031ca5ece --- /dev/null +++ b/tools/gcc-plugins/format-test.c @@ -0,0 +1,107 @@ +#include <stddef.h> +#include <stdlib.h> +#include <netinet/in.h> +#include <sys/types.h> +#include <unistd.h> + +typedef unsigned long mytype; +typedef size_t mysize; + +typedef unsigned int not_in_addr_t; +typedef in_addr_t yes_in_addr_t; +typedef struct in_addr in_addr_s; + +struct other { + int x; +}; + +int testfn(const char *fmt, ...) __attribute__((frr_format("frr_printf", 1, 2))); + +#ifndef _FRR_ATTRIBUTE_PRINTFRR +#error please load the frr-format plugin +#endif + +#pragma FRR printfrr_ext "%pI4" (struct in_addr *) +#pragma FRR printfrr_ext "%pI4" (in_addr_t *) + +int test(unsigned long long ay) +{ + size_t v_size_t = 0; + long v_long = 0; + int v_int = 0; + uint64_t v_uint64_t = 0; + mytype v_mytype = 0; + mysize v_mysize = 0; + pid_t v_pid_t = 0; + + testfn("%zu", v_size_t); // NOWARN + testfn("%zu", v_long); // WARN + testfn("%zu", v_int); // WARN + testfn("%zu", sizeof(v_int)); // NOWARN + testfn("%zu", v_mytype); // WARN + testfn("%zu", v_mysize); // NOWARN + testfn("%zu", v_uint64_t); // WARN + testfn("%zu", v_pid_t); // WARN + + testfn("%lu", v_long); // NOWARN PEDANTIC + testfn("%lu", v_int); // WARN + testfn("%lu", v_size_t); // WARN + testfn("%lu", sizeof(v_int)); // NOWARN (integer constant) + testfn("%lu", v_uint64_t); // WARN + testfn("%lu", v_pid_t); // WARN + + testfn("%ld", v_long); // NOWARN + testfn("%ld", v_int); // WARN + testfn("%ld", v_size_t); // WARN + testfn("%ld", sizeof(v_int)); // NOWARN (integer constant) + testfn("%ld", v_uint64_t); // WARN + testfn("%ld", v_pid_t); // WARN + + testfn("%d", v_int); // NOWARN + testfn("%d", v_long); // WARN + testfn("%d", v_size_t); // WARN + testfn("%d", sizeof(v_int)); // WARN + testfn("%d", v_uint64_t); // WARN + testfn("%d", v_pid_t); // WARN + + testfn("%Lu", v_size_t); // WARN + testfn("%Lu", v_long); // WARN + testfn("%Lu", v_int); // WARN + testfn("%Lu", sizeof(v_int)); // NOWARN (integer constant) + testfn("%Lu", v_mytype); // WARN + testfn("%Lu", v_mysize); // WARN + testfn("%Lu", v_pid_t); // WARN + testfn("%Lu", v_uint64_t); // NOWARN + + testfn("%Ld", v_size_t); // WARN + testfn("%Ld", v_long); // WARN + testfn("%Ld", v_int); // WARN + testfn("%Ld", sizeof(v_int)); // NOWARN (integer constant) + testfn("%Ld", v_mytype); // WARN + testfn("%Ld", v_mysize); // WARN + testfn("%Ld", v_pid_t); // WARN + testfn("%Ld", v_uint64_t); // NOWARN + + testfn("%pI4", &v_long); // WARN + + in_addr_t v_in_addr_t; + yes_in_addr_t v_yes_in_addr_t; + not_in_addr_t v_not_in_addr_t; + void *v_voidp = &v_in_addr_t; + + testfn("%pI4", &v_in_addr_t); // NOWARN + testfn("%pI4", &v_yes_in_addr_t); // NOWARN + testfn("%pI4", &v_not_in_addr_t); // WARN + testfn("%pI4", v_voidp); // WARN + + struct in_addr v_in_addr; + in_addr_s v_in_addr_s; + struct other v_other; + const struct in_addr *v_in_addr_const = &v_in_addr; + + testfn("%pI4", &v_in_addr); // NOWARN + testfn("%pI4", &v_in_addr_s); // NOWARN + testfn("%pI4", &v_other); // WARN + testfn("%pI4", v_in_addr_const); // NOWARN + return 0; +} diff --git a/tools/gcc-plugins/format-test.py b/tools/gcc-plugins/format-test.py new file mode 100644 index 0000000000..cc6ca6100e --- /dev/null +++ b/tools/gcc-plugins/format-test.py @@ -0,0 +1,57 @@ +import subprocess +import sys +import shlex +import os +import re + +os.environ['LC_ALL'] = 'C' +os.environ['LANG'] = 'C' +for k in list(os.environ.keys()): + if k.startswith('LC_'): + os.environ.pop(k) + +c_re = re.compile(r'//\s+(NO)?WARN') +expect = {} +lines = {} + +with open('format-test.c', 'r') as fd: + for lno, line in enumerate(fd.readlines(), 1): + lines[lno] = line.strip() + m = c_re.search(line) + if m is None: + continue + if m.group(1) is None: + expect[lno] = 'warn' + else: + expect[lno] = 'nowarn' + +cmd = shlex.split('gcc -Wall -Wextra -Wno-unused -fplugin=./frr-format.so -fno-diagnostics-show-caret -c -o format-test.o format-test.c') + +gcc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) +sout, serr = gcc.communicate() +gcc.wait() + +gcclines = serr.decode('UTF-8').splitlines() +line_re = re.compile(r'^format-test\.c:(\d+):(.*)$') +gcc_warns = {} + +for line in gcclines: + if line.find('In function') >= 0: + continue + m = line_re.match(line) + if m is None: + sys.stderr.write('cannot process GCC output: %s\n' % line) + continue + + lno = int(m.group(1)) + gcc_warns.setdefault(lno, []).append(line) + +for lno, val in expect.items(): + if val == 'nowarn' and lno in gcc_warns: + sys.stderr.write('unexpected gcc warning on line %d:\n\t%s\n\t%s\n' % (lno, lines[lno], '\n\t'.join(gcc_warns[lno]))) + if val == 'warn' and lno not in gcc_warns: + sys.stderr.write('expected warning on line %d but did not get one\n\t%s\n' % (lno, lines[lno])) + +leftover = set(gcc_warns.keys()) - set(expect.keys()) +for lno in sorted(leftover): + sys.stderr.write('unmarked gcc warning on line %d:\n\t%s\n\t%s\n' % (lno, lines[lno], '\n\t'.join(gcc_warns[lno]))) diff --git a/tools/gcc-plugins/frr-format.c b/tools/gcc-plugins/frr-format.c new file mode 100644 index 0000000000..174f403d48 --- /dev/null +++ b/tools/gcc-plugins/frr-format.c @@ -0,0 +1,4457 @@ +/* Check calls to formatted I/O functions (-Wformat). + Copyright (C) 1992-2019 Free Software Foundation, Inc. + + Extended for FRR's printfrr() with Linux kernel style extensions + Copyright (C) 2019-2020 David Lamparter, for NetDEF, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING.GPLv3. If not see +<http://www.gnu.org/licenses/>. */ + +#include "gcc-common.h" + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +//include "c-target.h" +#include "c-common.h" +#include "alloc-pool.h" +#include "stringpool.h" +#include "c-tree.h" +#include "c-objc.h" +#include "intl.h" +#include "langhooks.h" +#include "frr-format.h" +#include "diagnostic.h" +#include "substring-locations.h" +#include "selftest.h" +#include "selftest-diagnostic.h" +#ifndef FIRST_PSEUDO_REGISTER +#define FIRST_PSEUDO_REGISTER 0 +#endif +#include "builtins.h" +#include "attribs.h" +#include "gcc-rich-location.h" +#include "c-pretty-print.h" +#include "c-pragma.h" + +extern struct cpp_reader *parse_in; + +#pragma GCC visibility push(hidden) + +/* Handle attributes associated with format checking. */ + +/* This must be in the same order as format_types, except for + format_type_error. Target-specific format types do not have + matching enum values. */ +enum format_type { frr_printf_format_type, + format_type_error = -1}; + +struct function_format_info +{ + int format_type; /* type of format (printf, scanf, etc.) */ + unsigned HOST_WIDE_INT format_num; /* number of format argument */ + unsigned HOST_WIDE_INT first_arg_num; /* number of first arg (zero for varargs) */ +}; + +static GTY(()) tree local_uint64_t_node; +static GTY(()) tree local_int64_t_node; + +static GTY(()) tree local_size_t_node; +static GTY(()) tree local_ssize_t_node; +static GTY(()) tree local_atomic_size_t_node; +static GTY(()) tree local_atomic_ssize_t_node; +static GTY(()) tree local_ptrdiff_t_node; + +static GTY(()) tree local_pid_t_node; +static GTY(()) tree local_uid_t_node; +static GTY(()) tree local_gid_t_node; +static GTY(()) tree local_time_t_node; + +static GTY(()) tree local_socklen_t_node; +static GTY(()) tree local_in_addr_t_node; + +static struct type_special { + tree *match; + tree *replace; + tree *cousin; +} special_types[] = { + { &local_atomic_size_t_node, &local_size_t_node, &local_ssize_t_node, }, + { &local_atomic_ssize_t_node, &local_ssize_t_node, &local_size_t_node, }, + { &local_size_t_node, NULL, &local_ssize_t_node, }, + { &local_ssize_t_node, NULL, &local_size_t_node, }, + { &local_uint64_t_node, NULL, &local_int64_t_node, }, + { &local_int64_t_node, NULL, &local_uint64_t_node, }, + { &local_pid_t_node, NULL, &local_pid_t_node, }, + { &local_uid_t_node, NULL, &local_uid_t_node, }, + { &local_gid_t_node, NULL, &local_gid_t_node, }, + { &local_time_t_node, NULL, &local_time_t_node, }, + { NULL, NULL, NULL, } +}; + +static bool decode_format_attr (tree, function_format_info *, int); +static int decode_format_type (const char *); + +static bool check_format_string (tree argument, + unsigned HOST_WIDE_INT format_num, + int flags, bool *no_add_attrs, + int expected_format_type); +static bool get_constant (tree expr, unsigned HOST_WIDE_INT *value, + int validated_p); +static const char *convert_format_name_to_system_name (const char *attr_name); + +static int first_target_format_type; +static const char *format_name (int format_num); +static int format_flags (int format_num); + +/* Emit a warning as per format_warning_va, but construct the substring_loc + for the character at offset (CHAR_IDX - 1) within a string constant + FORMAT_STRING_CST at FMT_STRING_LOC. */ + +ATTRIBUTE_GCC_DIAG (5,6) +static bool +format_warning_at_char (location_t fmt_string_loc, tree format_string_cst, + int char_idx, int opt, const char *gmsgid, ...) +{ + va_list ap; + va_start (ap, gmsgid); + tree string_type = TREE_TYPE (format_string_cst); + + /* The callers are of the form: + format_warning (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + where format_chars has already been incremented, so that + CHAR_IDX is one character beyond where the warning should + be emitted. Fix it. */ + char_idx -= 1; + + substring_loc fmt_loc (fmt_string_loc, string_type, char_idx, char_idx, + char_idx); +#if BUILDING_GCC_VERSION >= 9000 + format_string_diagnostic_t diag (fmt_loc, NULL, UNKNOWN_LOCATION, NULL, + NULL); + bool warned = diag.emit_warning_va (opt, gmsgid, &ap); +#else + bool warned = format_warning_va (fmt_loc, UNKNOWN_LOCATION, NULL, + opt, gmsgid, &ap); +#endif + va_end (ap); + + return warned; +} + +/* Check that we have a pointer to a string suitable for use as a format. + The default is to check for a char type. + For objective-c dialects, this is extended to include references to string + objects validated by objc_string_ref_type_p (). + Targets may also provide a string object type that can be used within c and + c++ and shared with their respective objective-c dialects. In this case the + reference to a format string is checked for validity via a hook. + + The function returns true if strref points to any string type valid for the + language dialect and target. */ + +static bool +valid_stringptr_type_p (tree strref) +{ + return (strref != NULL + && TREE_CODE (strref) == POINTER_TYPE + && (TYPE_MAIN_VARIANT (TREE_TYPE (strref)) == char_type_node + || objc_string_ref_type_p (strref))); +// || (*targetcm.string_object_ref_type_p) ((const_tree) strref))); +} + +/* Handle a "format_arg" attribute; arguments as in + struct attribute_spec.handler. */ +tree +handle_frr_format_arg_attribute (tree *node, tree ARG_UNUSED (name), + tree args, int flags, bool *no_add_attrs) +{ + tree type = *node; + tree format_num_expr = TREE_VALUE (args); + unsigned HOST_WIDE_INT format_num = 0; + + if (!get_constant (format_num_expr, &format_num, 0)) + { + error ("format string has invalid operand number"); + *no_add_attrs = true; + return NULL_TREE; + } + + if (prototype_p (type)) + { + /* The format arg can be any string reference valid for the language and + target. We cannot be more specific in this case. */ + if (!check_format_string (type, format_num, flags, no_add_attrs, -1)) + return NULL_TREE; + } + + if (!valid_stringptr_type_p (TREE_TYPE (type))) + { + if (!(flags & (int) ATTR_FLAG_BUILT_IN)) + error ("function does not return string type"); + *no_add_attrs = true; + return NULL_TREE; + } + + return NULL_TREE; +} + +/* Verify that the format_num argument is actually a string reference suitable, + for the language dialect and target (in case the format attribute is in + error). When we know the specific reference type expected, this is also + checked. */ +static bool +check_format_string (tree fntype, unsigned HOST_WIDE_INT format_num, + int flags, bool *no_add_attrs, int expected_format_type) +{ + unsigned HOST_WIDE_INT i; + bool is_target_sref, is_char_ref; + tree ref; + int fmt_flags; + function_args_iterator iter; + + i = 1; + FOREACH_FUNCTION_ARGS (fntype, ref, iter) + { + if (i == format_num) + break; + i++; + } + + if (!ref + || !valid_stringptr_type_p (ref)) + { + if (!(flags & (int) ATTR_FLAG_BUILT_IN)) + error ("format string argument is not a string type"); + *no_add_attrs = true; + return false; + } + + /* We only know that we want a suitable string reference. */ + if (expected_format_type < 0) + return true; + + /* Now check that the arg matches the expected type. */ + is_char_ref = + (TYPE_MAIN_VARIANT (TREE_TYPE (ref)) == char_type_node); + + fmt_flags = format_flags (expected_format_type); + is_target_sref = false; + + if (!(fmt_flags & FMT_FLAG_PARSE_ARG_CONVERT_EXTERNAL)) + { + if (is_char_ref) + return true; /* OK, we expected a char and found one. */ + else + { + error ("found a %qT but the format argument should be a string", + ref); + *no_add_attrs = true; + return false; + } + } + + /* We expect a string object type as the format arg. */ + if (is_char_ref) + { + error ("format argument should be a %qs reference but" + " a string was found", format_name (expected_format_type)); + *no_add_attrs = true; + return false; + } + + /* We will allow a target string ref to match only itself. */ + if (first_target_format_type + && expected_format_type >= first_target_format_type + && is_target_sref) + return true; + else + { + error ("format argument should be a %qs reference", + format_name (expected_format_type)); + *no_add_attrs = true; + return false; + } + + gcc_unreachable (); +} + +/* Verify EXPR is a constant, and store its value. + If validated_p is true there should be no errors. + Returns true on success, false otherwise. */ +static bool +get_constant (tree expr, unsigned HOST_WIDE_INT *value, int validated_p) +{ + if (!tree_fits_uhwi_p (expr)) + { + gcc_assert (!validated_p); + return false; + } + + *value = TREE_INT_CST_LOW (expr); + + return true; +} + +/* Decode the arguments to a "format" attribute into a + function_format_info structure. It is already known that the list + is of the right length. If VALIDATED_P is true, then these + attributes have already been validated and must not be erroneous; + if false, it will give an error message. Returns true if the + attributes are successfully decoded, false otherwise. */ + +static bool +decode_format_attr (tree args, function_format_info *info, int validated_p) +{ + tree format_type_id = TREE_VALUE (args); + tree format_num_expr = TREE_VALUE (TREE_CHAIN (args)); + tree first_arg_num_expr + = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (args))); + + if (TREE_CODE (format_type_id) != STRING_CST) + { + gcc_assert (!validated_p); + error ("unrecognized format specifier"); + return false; + } + else + { + const char *p = TREE_STRING_POINTER (format_type_id); + + p = convert_format_name_to_system_name (p); + + info->format_type = decode_format_type (p); + + if (info->format_type == format_type_error) + { + gcc_assert (!validated_p); + warning (OPT_Wformat_, "%qE is an unrecognized format function type", + format_type_id); + return false; + } + } + + if (!get_constant (format_num_expr, &info->format_num, validated_p)) + { + error ("format string has invalid operand number"); + return false; + } + + if (!get_constant (first_arg_num_expr, &info->first_arg_num, validated_p)) + { + error ("%<...%> has invalid operand number"); + return false; + } + + if (info->first_arg_num != 0 && info->first_arg_num <= info->format_num) + { + gcc_assert (!validated_p); + error ("format string argument follows the args to be formatted"); + return false; + } + + return true; +} + +/* Check a call to a format function against a parameter list. */ + +/* The C standard version C++ is treated as equivalent to + or inheriting from, for the purpose of format features supported. */ +#define CPLUSPLUS_STD_VER (cxx_dialect < cxx11 ? STD_C94 : STD_C99) +/* The C standard version we are checking formats against when pedantic. */ +#define C_STD_VER ((int) (c_dialect_cxx () \ + ? CPLUSPLUS_STD_VER \ + : (flag_isoc99 \ + ? STD_C99 \ + : (flag_isoc94 ? STD_C94 : STD_C89)))) +/* The name to give to the standard version we are warning about when + pedantic. FEATURE_VER is the version in which the feature warned out + appeared, which is higher than C_STD_VER. */ +#define C_STD_NAME(FEATURE_VER) (c_dialect_cxx () \ + ? (cxx_dialect < cxx11 ? "ISO C++98" \ + : "ISO C++11") \ + : ((FEATURE_VER) == STD_EXT \ + ? "ISO C" \ + : "ISO C90")) +/* Adjust a C standard version, which may be STD_C9L, to account for + -Wno-long-long. Returns other standard versions unchanged. */ +#define ADJ_STD(VER) ((int) ((VER) == STD_C9L \ + ? (warn_long_long ? STD_C99 : STD_C89) \ + : (VER))) + +/* Enum describing the kind of specifiers present in the format and + requiring an argument. */ +enum format_specifier_kind { + CF_KIND_FORMAT, + CF_KIND_FIELD_WIDTH, + CF_KIND_FIELD_PRECISION +}; + +static const char *kind_descriptions[] = { + N_("format"), + N_("field width specifier"), + N_("field precision specifier") +}; + +/* Structure describing details of a type expected in format checking, + and the type to check against it. */ +struct format_wanted_type +{ + /* The type wanted. */ + tree wanted_type; + /* The name of this type to use in diagnostics. */ + const char *wanted_type_name; + /* Should be type checked just for scalar width identity. */ + int scalar_identity_flag; + /* The level of indirection through pointers at which this type occurs. */ + int pointer_count; + /* Whether, when pointer_count is 1, to allow any character type when + pedantic, rather than just the character or void type specified. */ + int char_lenient_flag; + /* Whether the argument, dereferenced once, is written into and so the + argument must not be a pointer to a const-qualified type. */ + int writing_in_flag; + /* Whether the argument, dereferenced once, is read from and so + must not be a NULL pointer. */ + int reading_from_flag; + /* The kind of specifier that this type is used for. */ + enum format_specifier_kind kind; + /* The starting character of the specifier. This never includes the + initial percent sign. */ + const char *format_start; + /* The length of the specifier. */ + int format_length; + /* The actual parameter to check against the wanted type. */ + tree param; + /* The argument number of that parameter. */ + int arg_num; + /* The offset location of this argument with respect to the format + string location. */ + unsigned int offset_loc; + /* The next type to check for this format conversion, or NULL if none. */ + struct format_wanted_type *next; +}; + +/* Convenience macro for format_length_info meaning unused. */ +#define NO_FMT NULL, FMT_LEN_none, STD_C89 + +static const format_length_info printf_length_specs[] = +{ + { "h", FMT_LEN_h, STD_C89, "hh", FMT_LEN_hh, STD_C99, 0 }, + { "l", FMT_LEN_l, STD_C89, "ll", FMT_LEN_ll, STD_C9L, 0 }, + { "q", FMT_LEN_ll, STD_EXT, NO_FMT, 0 }, + { "L", FMT_LEN_L, STD_C89, NO_FMT, 0 }, + { "z", FMT_LEN_z, STD_C99, NO_FMT, 0 }, + { "Z", FMT_LEN_z, STD_EXT, NO_FMT, 0 }, + { "t", FMT_LEN_t, STD_C99, NO_FMT, 0 }, + { "j", FMT_LEN_j, STD_C99, NO_FMT, 0 }, + { "H", FMT_LEN_H, STD_EXT, NO_FMT, 0 }, + { "D", FMT_LEN_D, STD_EXT, "DD", FMT_LEN_DD, STD_EXT, 0 }, + { NO_FMT, NO_FMT, 0 } +}; + +static const format_flag_spec printf_flag_specs[] = +{ + { ' ', 0, 0, 0, N_("' ' flag"), N_("the ' ' printf flag"), STD_C89 }, + { '+', 0, 0, 0, N_("'+' flag"), N_("the '+' printf flag"), STD_C89 }, + { '#', 0, 0, 0, N_("'#' flag"), N_("the '#' printf flag"), STD_C89 }, + { '0', 0, 0, 0, N_("'0' flag"), N_("the '0' printf flag"), STD_C89 }, + { '-', 0, 0, 0, N_("'-' flag"), N_("the '-' printf flag"), STD_C89 }, + { '\'', 0, 0, 0, N_("''' flag"), N_("the ''' printf flag"), STD_EXT }, + { 'I', 0, 0, 0, N_("'I' flag"), N_("the 'I' printf flag"), STD_EXT }, + { 'w', 0, 0, 0, N_("field width"), N_("field width in printf format"), STD_C89 }, + { 'p', 0, 0, 0, N_("precision"), N_("precision in printf format"), STD_C89 }, + { 'L', 0, 0, 0, N_("length modifier"), N_("length modifier in printf format"), STD_C89 }, + { 0, 0, 0, 0, NULL, NULL, STD_C89 } +}; + + +static const format_flag_pair printf_flag_pairs[] = +{ + { ' ', '+', 1, 0 }, + { '0', '-', 1, 0 }, + { '0', 'p', 1, 'i' }, + { 0, 0, 0, 0 } +}; + +#define ETAB_SZ 128 +static kernel_ext_fmt ext_p[ETAB_SZ] = { + { NULL } +}; +static kernel_ext_fmt ext_d[ETAB_SZ] = { + { NULL } +}; + +static const format_char_info print_char_table[] = +{ + /* C89 conversion specifiers. */ + /* none, hh, h, l, ll, L, z, t, j, H, D, DD */ + { "di", 0, STD_C89, { T89_I, T99_SC, T89_S, T89_L, T9L_LL, TEX_S64, T99_SST, T99_PD, T99_IM, BADLEN, BADLEN, BADLEN }, "-wp0 +'I", "i", NULL, ext_d }, + { "oxX", 0, STD_C89, { T89_UI, T99_UC, T89_US, T89_UL, T9L_ULL, TEX_U64, T99_ST, T99_UPD, T99_UIM, BADLEN, BADLEN, BADLEN }, "-wp0#", "i", NULL, NULL }, + { "u", 0, STD_C89, { T89_UI, T99_UC, T89_US, T89_UL, T9L_ULL, TEX_U64, T99_ST, T99_UPD, T99_UIM, BADLEN, BADLEN, BADLEN }, "-wp0'I", "i", NULL, NULL }, + { "fgG", 0, STD_C89, { T89_D, BADLEN, BADLEN, T99_D, BADLEN, T89_LD, BADLEN, BADLEN, BADLEN, TEX_D32, TEX_D64, TEX_D128 }, "-wp0 +#'I", "", NULL, NULL }, + { "eE", 0, STD_C89, { T89_D, BADLEN, BADLEN, T99_D, BADLEN, T89_LD, BADLEN, BADLEN, BADLEN, TEX_D32, TEX_D64, TEX_D128 }, "-wp0 +#I", "", NULL, NULL }, + { "c", 0, STD_C89, { T89_I, BADLEN, BADLEN, T94_WI, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "-w", "", NULL, NULL }, + { "s", 1, STD_C89, { T89_C, BADLEN, BADLEN, T94_W, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "-wp", "cR", NULL, NULL }, + { "p", 1, STD_C89, { T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "-wp", "c", NULL, ext_p }, + { "n", 1, STD_C89, { T89_I, T99_SC, T89_S, T89_L, T9L_LL, BADLEN, T99_SST, T99_PD, T99_IM, BADLEN, BADLEN, BADLEN }, "", "W", NULL, NULL }, + /* C99 conversion specifiers. */ + { "F", 0, STD_C99, { T99_D, BADLEN, BADLEN, T99_D, BADLEN, T99_LD, BADLEN, BADLEN, BADLEN, TEX_D32, TEX_D64, TEX_D128 }, "-wp0 +#'I", "", NULL, NULL }, + { "aA", 0, STD_C99, { T99_D, BADLEN, BADLEN, T99_D, BADLEN, T99_LD, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "-wp0 +#", "", NULL, NULL }, + /* X/Open conversion specifiers. */ + { "C", 0, STD_EXT, { TEX_WI, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "-w", "", NULL, NULL }, + { "S", 1, STD_EXT, { TEX_W, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "-wp", "R", NULL, NULL }, + /* GNU conversion specifiers. */ + { "m", 0, STD_EXT, { T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "-wp", "", NULL, NULL }, + { NULL, 0, STD_C89, NOLENGTHS, NULL, NULL, NULL, NULL } +}; + +/* This must be in the same order as enum format_type. */ +static const format_kind_info format_types_orig[] = +{ + { "frr_printf", printf_length_specs, print_char_table, " +#0-'I", NULL, + printf_flag_specs, printf_flag_pairs, + FMT_FLAG_ARG_CONVERT|FMT_FLAG_DOLLAR_MULTIPLE|FMT_FLAG_USE_DOLLAR|FMT_FLAG_EMPTY_PREC_OK, + 'w', 0, 'p', 0, 'L', 0, + &integer_type_node, &integer_type_node + }, +}; + +/* This layer of indirection allows GCC to reassign format_types with + new data if necessary, while still allowing the original data to be + const. */ +static const format_kind_info *format_types = format_types_orig; + +static int n_format_types = ARRAY_SIZE (format_types_orig); + +/* Structure detailing the results of checking a format function call + where the format expression may be a conditional expression with + many leaves resulting from nested conditional expressions. */ +struct format_check_results +{ + /* Number of leaves of the format argument that could not be checked + as they were not string literals. */ + int number_non_literal; + /* Number of leaves of the format argument that were null pointers or + string literals, but had extra format arguments. */ + int number_extra_args; + location_t extra_arg_loc; + /* Number of leaves of the format argument that were null pointers or + string literals, but had extra format arguments and used $ operand + numbers. */ + int number_dollar_extra_args; + /* Number of leaves of the format argument that were wide string + literals. */ + int number_wide; + /* Number of leaves of the format argument that are not array of "char". */ + int number_non_char; + /* Number of leaves of the format argument that were empty strings. */ + int number_empty; + /* Number of leaves of the format argument that were unterminated + strings. */ + int number_unterminated; + /* Number of leaves of the format argument that were not counted above. */ + int number_other; + /* Location of the format string. */ + location_t format_string_loc; +}; + +struct format_check_context +{ + format_check_results *res; + function_format_info *info; + tree params; + vec<location_t> *arglocs; +}; + +/* Return the format name (as specified in the original table) for the format + type indicated by format_num. */ +static const char * +format_name (int format_num) +{ + if (format_num >= 0 && format_num < n_format_types) + return format_types[format_num].name; + gcc_unreachable (); +} + +/* Return the format flags (as specified in the original table) for the format + type indicated by format_num. */ +static int +format_flags (int format_num) +{ + if (format_num >= 0 && format_num < n_format_types) + return format_types[format_num].flags; + gcc_unreachable (); +} + +static void check_format_info (function_format_info *, tree, + vec<location_t> *); +static void check_format_arg (void *, tree, unsigned HOST_WIDE_INT); +static void check_format_info_main (format_check_results *, + function_format_info *, const char *, + location_t, tree, + int, tree, + unsigned HOST_WIDE_INT, + object_allocator<format_wanted_type> &, + vec<location_t> *); + +static void init_dollar_format_checking (int, tree); +static int maybe_read_dollar_number (const char **, int, + tree, tree *, const format_kind_info *); +static bool avoid_dollar_number (const char *); +static void finish_dollar_format_checking (format_check_results *, int); + +static const format_flag_spec *get_flag_spec (const format_flag_spec *, + int, const char *); + +static void check_format_types (const substring_loc &fmt_loc, + format_wanted_type *, + const format_kind_info *fki, + int offset_to_type_start, + char conversion_char, + vec<location_t> *arglocs); +static void format_type_warning (const substring_loc &fmt_loc, + location_t param_loc, + format_wanted_type *, tree, + tree, + const format_kind_info *fki, + int offset_to_type_start, + char conversion_char, + const char *extra = NULL); + +static bool check_kef_type (const substring_loc &fmt_loc, + const struct kernel_ext_fmt *kef, + unsigned arg_num, + tree cur_param, + tree wanted_type, + const format_kind_info *fki, + int offset_to_type_start, + char conversion_char, + vec<location_t> *arglocs); + +/* Decode a format type from a string, returning the type, or + format_type_error if not valid, in which case the caller should print an + error message. */ +static int +decode_format_type (const char *s) +{ + int i; + int slen; + + s = convert_format_name_to_system_name (s); + slen = strlen (s); + for (i = 0; i < n_format_types; i++) + { + int alen; + if (!strcmp (s, format_types[i].name)) + return i; + alen = strlen (format_types[i].name); + if (slen == alen + 4 && s[0] == '_' && s[1] == '_' + && s[slen - 1] == '_' && s[slen - 2] == '_' + && !strncmp (s + 2, format_types[i].name, alen)) + return i; + } + return format_type_error; +} + + +/* Check the argument list of a call to printf, scanf, etc. + ATTRS are the attributes on the function type. There are NARGS argument + values in the array ARGARRAY. + Also, if -Wsuggest-attribute=format, + warn for calls to vprintf or vscanf in functions with no such format + attribute themselves. */ + +void +check_function_format (tree attrs, int nargs, tree *argarray, + vec<location_t> *arglocs) +{ + tree a; + + /* See if this function has any format attributes. */ + for (a = attrs; a; a = TREE_CHAIN (a)) + { + if (is_attribute_p ("frr_format", TREE_PURPOSE (a))) + { + /* Yup; check it. */ + function_format_info info; + decode_format_attr (TREE_VALUE (a), &info, /*validated=*/true); + if (warn_format) + { + /* FIXME: Rewrite all the internal functions in this file + to use the ARGARRAY directly instead of constructing this + temporary list. */ + tree params = NULL_TREE; + int i; + for (i = nargs - 1; i >= 0; i--) + params = tree_cons (NULL_TREE, argarray[i], params); + check_format_info (&info, params, arglocs); + } + + /* Attempt to detect whether the current function might benefit + from the format attribute if the called function is decorated + with it. Avoid using calls with string literal formats for + guidance since those are unlikely to be viable candidates. */ + if (warn_suggest_attribute_format + && current_function_decl != NULL_TREE + && info.first_arg_num == 0 + && (format_types[info.format_type].flags + & (int) FMT_FLAG_ARG_CONVERT) + /* c_strlen will fail for a function parameter but succeed + for a literal or constant array. */ + && !c_strlen (argarray[info.format_num - 1], 1)) + { + tree c; + for (c = TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl)); + c; + c = TREE_CHAIN (c)) + if (is_attribute_p ("frr_format", TREE_PURPOSE (c)) + && (decode_format_type (IDENTIFIER_POINTER + (TREE_VALUE (TREE_VALUE (c)))) + == info.format_type)) + break; + if (c == NULL_TREE) + { + /* Check if the current function has a parameter to which + the format attribute could be attached; if not, it + can't be a candidate for a format attribute, despite + the vprintf-like or vscanf-like call. */ + tree args; + for (args = DECL_ARGUMENTS (current_function_decl); + args != 0; + args = DECL_CHAIN (args)) + { + if (TREE_CODE (TREE_TYPE (args)) == POINTER_TYPE + && (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (args))) + == char_type_node)) + break; + } + if (args != 0) + warning (OPT_Wsuggest_attribute_format, "function %qD " + "might be a candidate for %qs frr_format attribute", + current_function_decl, + format_types[info.format_type].name); + } + } + } + } +} + + +/* Variables used by the checking of $ operand number formats. */ +static char *dollar_arguments_used = NULL; +static char *dollar_arguments_pointer_p = NULL; +static int dollar_arguments_alloc = 0; +static int dollar_arguments_count; +static int dollar_first_arg_num; +static int dollar_max_arg_used; +static int dollar_format_warned; + +/* Initialize the checking for a format string that may contain $ + parameter number specifications; we will need to keep track of whether + each parameter has been used. FIRST_ARG_NUM is the number of the first + argument that is a parameter to the format, or 0 for a vprintf-style + function; PARAMS is the list of arguments starting at this argument. */ + +static void +init_dollar_format_checking (int first_arg_num, tree params) +{ + tree oparams = params; + + dollar_first_arg_num = first_arg_num; + dollar_arguments_count = 0; + dollar_max_arg_used = 0; + dollar_format_warned = 0; + if (first_arg_num > 0) + { + while (params) + { + dollar_arguments_count++; + params = TREE_CHAIN (params); + } + } + if (dollar_arguments_alloc < dollar_arguments_count) + { + free (dollar_arguments_used); + free (dollar_arguments_pointer_p); + dollar_arguments_alloc = dollar_arguments_count; + dollar_arguments_used = XNEWVEC (char, dollar_arguments_alloc); + dollar_arguments_pointer_p = XNEWVEC (char, dollar_arguments_alloc); + } + if (dollar_arguments_alloc) + { + memset (dollar_arguments_used, 0, dollar_arguments_alloc); + if (first_arg_num > 0) + { + int i = 0; + params = oparams; + while (params) + { + dollar_arguments_pointer_p[i] = (TREE_CODE (TREE_TYPE (TREE_VALUE (params))) + == POINTER_TYPE); + params = TREE_CHAIN (params); + i++; + } + } + } +} + + +/* Look for a decimal number followed by a $ in *FORMAT. If DOLLAR_NEEDED + is set, it is an error if one is not found; otherwise, it is OK. If + such a number is found, check whether it is within range and mark that + numbered operand as being used for later checking. Returns the operand + number if found and within range, zero if no such number was found and + this is OK, or -1 on error. PARAMS points to the first operand of the + format; PARAM_PTR is made to point to the parameter referred to. If + a $ format is found, *FORMAT is updated to point just after it. */ + +static int +maybe_read_dollar_number (const char **format, + int dollar_needed, tree params, tree *param_ptr, + const format_kind_info *fki) +{ + int argnum; + int overflow_flag; + const char *fcp = *format; + if (!ISDIGIT (*fcp)) + { + if (dollar_needed) + { + warning (OPT_Wformat_, "missing $ operand number in format"); + return -1; + } + else + return 0; + } + argnum = 0; + overflow_flag = 0; + while (ISDIGIT (*fcp)) + { + int nargnum; + nargnum = 10 * argnum + (*fcp - '0'); + if (nargnum < 0 || nargnum / 10 != argnum) + overflow_flag = 1; + argnum = nargnum; + fcp++; + } + if (*fcp != '$') + { + if (dollar_needed) + { + warning (OPT_Wformat_, "missing $ operand number in format"); + return -1; + } + else + return 0; + } + *format = fcp + 1; + if (pedantic && !dollar_format_warned) + { + warning (OPT_Wformat_, "%s does not support %%n$ operand number formats", + C_STD_NAME (STD_EXT)); + dollar_format_warned = 1; + } + if (overflow_flag || argnum == 0 + || (dollar_first_arg_num && argnum > dollar_arguments_count)) + { + warning (OPT_Wformat_, "operand number out of range in format"); + return -1; + } + if (argnum > dollar_max_arg_used) + dollar_max_arg_used = argnum; + /* For vprintf-style functions we may need to allocate more memory to + track which arguments are used. */ + while (dollar_arguments_alloc < dollar_max_arg_used) + { + int nalloc; + nalloc = 2 * dollar_arguments_alloc + 16; + dollar_arguments_used = XRESIZEVEC (char, dollar_arguments_used, + nalloc); + dollar_arguments_pointer_p = XRESIZEVEC (char, dollar_arguments_pointer_p, + nalloc); + memset (dollar_arguments_used + dollar_arguments_alloc, 0, + nalloc - dollar_arguments_alloc); + dollar_arguments_alloc = nalloc; + } + if (!(fki->flags & (int) FMT_FLAG_DOLLAR_MULTIPLE) + && dollar_arguments_used[argnum - 1] == 1) + { + dollar_arguments_used[argnum - 1] = 2; + warning (OPT_Wformat_, "format argument %d used more than once in %s format", + argnum, fki->name); + } + else + dollar_arguments_used[argnum - 1] = 1; + if (dollar_first_arg_num) + { + int i; + *param_ptr = params; + for (i = 1; i < argnum && *param_ptr != 0; i++) + *param_ptr = TREE_CHAIN (*param_ptr); + + /* This case shouldn't be caught here. */ + gcc_assert (*param_ptr); + } + else + *param_ptr = 0; + return argnum; +} + +/* Ensure that FORMAT does not start with a decimal number followed by + a $; give a diagnostic and return true if it does, false otherwise. */ + +static bool +avoid_dollar_number (const char *format) +{ + if (!ISDIGIT (*format)) + return false; + while (ISDIGIT (*format)) + format++; + if (*format == '$') + { + warning (OPT_Wformat_, "$ operand number used after format without operand number"); + return true; + } + return false; +} + + +/* Finish the checking for a format string that used $ operand number formats + instead of non-$ formats. We check for unused operands before used ones + (a serious error, since the implementation of the format function + can't know what types to pass to va_arg to find the later arguments). + and for unused operands at the end of the format (if we know how many + arguments the format had, so not for vprintf). If there were operand + numbers out of range on a non-vprintf-style format, we won't have reached + here. If POINTER_GAP_OK, unused arguments are OK if all arguments are + pointers. */ + +static void +finish_dollar_format_checking (format_check_results *res, int pointer_gap_ok) +{ + int i; + bool found_pointer_gap = false; + for (i = 0; i < dollar_max_arg_used; i++) + { + if (!dollar_arguments_used[i]) + { + if (pointer_gap_ok && (dollar_first_arg_num == 0 + || dollar_arguments_pointer_p[i])) + found_pointer_gap = true; + else + warning_at (res->format_string_loc, OPT_Wformat_, + "format argument %d unused before used argument %d in $-style format", + i + 1, dollar_max_arg_used); + } + } + if (found_pointer_gap + || (dollar_first_arg_num + && dollar_max_arg_used < dollar_arguments_count)) + { + res->number_other--; + res->number_dollar_extra_args++; + } +} + + +/* Retrieve the specification for a format flag. SPEC contains the + specifications for format flags for the applicable kind of format. + FLAG is the flag in question. If PREDICATES is NULL, the basic + spec for that flag must be retrieved and must exist. If + PREDICATES is not NULL, it is a string listing possible predicates + for the spec entry; if an entry predicated on any of these is + found, it is returned, otherwise NULL is returned. */ + +static const format_flag_spec * +get_flag_spec (const format_flag_spec *spec, int flag, const char *predicates) +{ + int i; + for (i = 0; spec[i].flag_char != 0; i++) + { + if (spec[i].flag_char != flag) + continue; + if (predicates != NULL) + { + if (spec[i].predicate != 0 + && strchr (predicates, spec[i].predicate) != 0) + return &spec[i]; + } + else if (spec[i].predicate == 0) + return &spec[i]; + } + gcc_assert (predicates); + return NULL; +} + + +/* Check the argument list of a call to printf, scanf, etc. + INFO points to the function_format_info structure. + PARAMS is the list of argument values. */ + +static void +check_format_info (function_format_info *info, tree params, + vec<location_t> *arglocs) +{ + format_check_context format_ctx; + unsigned HOST_WIDE_INT arg_num; + tree format_tree; + format_check_results res; + /* Skip to format argument. If the argument isn't available, there's + no work for us to do; prototype checking will catch the problem. */ + for (arg_num = 1; ; ++arg_num) + { + if (params == 0) + return; + if (arg_num == info->format_num) + break; + params = TREE_CHAIN (params); + } + format_tree = TREE_VALUE (params); + params = TREE_CHAIN (params); + if (format_tree == 0) + return; + + res.number_non_literal = 0; + res.number_extra_args = 0; + res.extra_arg_loc = UNKNOWN_LOCATION; + res.number_dollar_extra_args = 0; + res.number_wide = 0; + res.number_non_char = 0; + res.number_empty = 0; + res.number_unterminated = 0; + res.number_other = 0; + res.format_string_loc = input_location; + + format_ctx.res = &res; + format_ctx.info = info; + format_ctx.params = params; + format_ctx.arglocs = arglocs; + + check_function_arguments_recurse (check_format_arg, &format_ctx, + format_tree, arg_num); + + location_t loc = format_ctx.res->format_string_loc; + + if (res.number_non_literal > 0) + { + /* Functions taking a va_list normally pass a non-literal format + string. These functions typically are declared with + first_arg_num == 0, so avoid warning in those cases. */ + if (!(format_types[info->format_type].flags & (int) FMT_FLAG_ARG_CONVERT)) + { + /* For strftime-like formats, warn for not checking the format + string; but there are no arguments to check. */ + warning_at (loc, OPT_Wformat_nonliteral, + "format not a string literal, format string not checked"); + } + else if (info->first_arg_num != 0) + { + /* If there are no arguments for the format at all, we may have + printf (foo) which is likely to be a security hole. */ + while (arg_num + 1 < info->first_arg_num) + { + if (params == 0) + break; + params = TREE_CHAIN (params); + ++arg_num; + } + if (params == 0 && warn_format_security) + warning_at (loc, OPT_Wformat_security, + "format not a string literal and no format arguments"); + else if (params == 0 && warn_format_nonliteral) + warning_at (loc, OPT_Wformat_nonliteral, + "format not a string literal and no format arguments"); + else + warning_at (loc, OPT_Wformat_nonliteral, + "format not a string literal, argument types not checked"); + } + } + + /* If there were extra arguments to the format, normally warn. However, + the standard does say extra arguments are ignored, so in the specific + case where we have multiple leaves (conditional expressions or + ngettext) allow extra arguments if at least one leaf didn't have extra + arguments, but was otherwise OK (either non-literal or checked OK). + If the format is an empty string, this should be counted similarly to the + case of extra format arguments. */ + if (res.number_extra_args > 0 && res.number_non_literal == 0 + && res.number_other == 0) + { + if (res.extra_arg_loc == UNKNOWN_LOCATION) + res.extra_arg_loc = loc; + warning_at (res.extra_arg_loc, OPT_Wformat_extra_args, + "too many arguments for format"); + } + if (res.number_dollar_extra_args > 0 && res.number_non_literal == 0 + && res.number_other == 0) + warning_at (loc, OPT_Wformat_extra_args, "unused arguments in $-style format"); + if (res.number_empty > 0 && res.number_non_literal == 0 + && res.number_other == 0) + warning_at (loc, OPT_Wformat_zero_length, "zero-length %s format string", + format_types[info->format_type].name); + + if (res.number_wide > 0) + warning_at (loc, OPT_Wformat_, "format is a wide character string"); + + if (res.number_non_char > 0) + warning_at (loc, OPT_Wformat_, + "format string is not an array of type %qs", "char"); + + if (res.number_unterminated > 0) + warning_at (loc, OPT_Wformat_, "unterminated format string"); +} + +/* Callback from check_function_arguments_recurse to check a + format string. FORMAT_TREE is the format parameter. ARG_NUM + is the number of the format argument. CTX points to a + format_check_context. */ + +static void +check_format_arg (void *ctx, tree format_tree, + unsigned HOST_WIDE_INT arg_num) +{ + format_check_context *format_ctx = (format_check_context *) ctx; + format_check_results *res = format_ctx->res; + function_format_info *info = format_ctx->info; + tree params = format_ctx->params; + vec<location_t> *arglocs = format_ctx->arglocs; + + int format_length; + HOST_WIDE_INT offset; + const char *format_chars; + tree array_size = 0; + tree array_init; + + location_t fmt_param_loc = EXPR_LOC_OR_LOC (format_tree, input_location); + + /* Pull out a constant value if the front end didn't, and handle location + wrappers. */ + format_tree = fold_for_warn (format_tree); + STRIP_NOPS (format_tree); + + if (integer_zerop (format_tree)) + { + /* Skip to first argument to check, so we can see if this format + has any arguments (it shouldn't). */ + while (arg_num + 1 < info->first_arg_num) + { + if (params == 0) + return; + params = TREE_CHAIN (params); + ++arg_num; + } + + if (params == 0) + res->number_other++; + else + { + if (res->number_extra_args == 0) + res->extra_arg_loc = EXPR_LOC_OR_LOC (TREE_VALUE (params), + input_location); + res->number_extra_args++; + } + return; + } + + offset = 0; + if (TREE_CODE (format_tree) == POINTER_PLUS_EXPR) + { + tree arg0, arg1; + + arg0 = TREE_OPERAND (format_tree, 0); + arg1 = TREE_OPERAND (format_tree, 1); + STRIP_NOPS (arg0); + STRIP_NOPS (arg1); + if (TREE_CODE (arg1) == INTEGER_CST) + format_tree = arg0; + else + { + res->number_non_literal++; + return; + } + /* POINTER_PLUS_EXPR offsets are to be interpreted signed. */ + if (!cst_and_fits_in_hwi (arg1)) + { + res->number_non_literal++; + return; + } + offset = int_cst_value (arg1); + } + if (TREE_CODE (format_tree) != ADDR_EXPR) + { + res->number_non_literal++; + return; + } + res->format_string_loc = EXPR_LOC_OR_LOC (format_tree, input_location); + format_tree = TREE_OPERAND (format_tree, 0); + if (format_types[info->format_type].flags + & (int) FMT_FLAG_PARSE_ARG_CONVERT_EXTERNAL) + { + /* We cannot examine this string here - but we can check that it is + a valid type. */ + if (TREE_CODE (format_tree) != CONST_DECL) + { + res->number_non_literal++; + return; + } + /* Skip to first argument to check. */ + while (arg_num + 1 < info->first_arg_num) + { + if (params == 0) + return; + params = TREE_CHAIN (params); + ++arg_num; + } + return; + } + if (TREE_CODE (format_tree) == ARRAY_REF + && tree_fits_shwi_p (TREE_OPERAND (format_tree, 1)) + && (offset += tree_to_shwi (TREE_OPERAND (format_tree, 1))) >= 0) + format_tree = TREE_OPERAND (format_tree, 0); + if (offset < 0) + { + res->number_non_literal++; + return; + } + if (VAR_P (format_tree) + && TREE_CODE (TREE_TYPE (format_tree)) == ARRAY_TYPE + && (array_init = decl_constant_value (format_tree)) != format_tree + && TREE_CODE (array_init) == STRING_CST) + { + /* Extract the string constant initializer. Note that this may include + a trailing NUL character that is not in the array (e.g. + const char a[3] = "foo";). */ + array_size = DECL_SIZE_UNIT (format_tree); + format_tree = array_init; + } + if (TREE_CODE (format_tree) != STRING_CST) + { + res->number_non_literal++; + return; + } + tree underlying_type + = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (format_tree))); + if (underlying_type != char_type_node) + { + if (underlying_type == char16_type_node + || underlying_type == char32_type_node + || underlying_type == wchar_type_node) + res->number_wide++; + else + res->number_non_char++; + return; + } + format_chars = TREE_STRING_POINTER (format_tree); + format_length = TREE_STRING_LENGTH (format_tree); + if (array_size != 0) + { + /* Variable length arrays can't be initialized. */ + gcc_assert (TREE_CODE (array_size) == INTEGER_CST); + + if (tree_fits_shwi_p (array_size)) + { + HOST_WIDE_INT array_size_value = tree_to_shwi (array_size); + if (array_size_value > 0 + && array_size_value == (int) array_size_value + && format_length > array_size_value) + format_length = array_size_value; + } + } + if (offset) + { + if (offset >= format_length) + { + res->number_non_literal++; + return; + } + format_chars += offset; + format_length -= offset; + } + if (format_length < 1 || format_chars[--format_length] != 0) + { + res->number_unterminated++; + return; + } + if (format_length == 0) + { + res->number_empty++; + return; + } + + /* Skip to first argument to check. */ + while (arg_num + 1 < info->first_arg_num) + { + if (params == 0) + return; + params = TREE_CHAIN (params); + ++arg_num; + } + /* Provisionally increment res->number_other; check_format_info_main + will decrement it if it finds there are extra arguments, but this way + need not adjust it for every return. */ + res->number_other++; + object_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool"); + check_format_info_main (res, info, format_chars, fmt_param_loc, format_tree, + format_length, params, arg_num, fwt_pool, arglocs); +} + +/* Support class for argument_parser and check_format_info_main. + Tracks any flag characters that have been applied to the + current argument. */ + +class flag_chars_t +{ + public: + flag_chars_t (); + bool has_char_p (char ch) const; + void add_char (char ch); + void validate (const format_kind_info *fki, + const format_char_info *fci, + const format_flag_spec *flag_specs, + const char * const format_chars, + tree format_string_cst, + location_t format_string_loc, + const char * const orig_format_chars, + char format_char, + bool quoted); + int get_alloc_flag (const format_kind_info *fki); + int assignment_suppression_p (const format_kind_info *fki); + + private: + char m_flag_chars[256]; +}; + +/* Support struct for argument_parser and check_format_info_main. + Encapsulates any length modifier applied to the current argument. */ + +struct length_modifier +{ + length_modifier () + : chars (NULL), val (FMT_LEN_none), std (STD_C89), + scalar_identity_flag (0) + { + } + + length_modifier (const char *chars_, + enum format_lengths val_, + enum format_std_version std_, + int scalar_identity_flag_) + : chars (chars_), val (val_), std (std_), + scalar_identity_flag (scalar_identity_flag_) + { + } + + const char *chars; + enum format_lengths val; + enum format_std_version std; + int scalar_identity_flag; +}; + +/* Parsing one argument within a format string. */ + +class argument_parser +{ + public: + argument_parser (function_format_info *info, const char *&format_chars, + tree format_string_cst, + const char * const orig_format_chars, + location_t format_string_loc, flag_chars_t &flag_chars, + int &has_operand_number, tree first_fillin_param, + object_allocator <format_wanted_type> &fwt_pool_, + vec<location_t> *arglocs); + + bool read_any_dollar (); + + bool read_format_flags (); + + bool + read_any_format_width (tree ¶ms, + unsigned HOST_WIDE_INT &arg_num); + + void + read_any_format_left_precision (); + + bool + read_any_format_precision (tree ¶ms, + unsigned HOST_WIDE_INT &arg_num); + + void handle_alloc_chars (); + + length_modifier read_any_length_modifier (); + + void read_any_other_modifier (); + + const format_char_info *find_format_char_info (char format_char); + + void + validate_flag_pairs (const format_char_info *fci, + char format_char); + + void + give_y2k_warnings (const format_char_info *fci, + char format_char); + + void parse_any_scan_set (const format_char_info *fci); + + bool handle_conversions (const format_char_info *fci, + const length_modifier &len_modifier, + tree &wanted_type, + const char *&wanted_type_name, + unsigned HOST_WIDE_INT &arg_num, + tree ¶ms, + char format_char); + + bool + check_argument_type (const format_char_info *fci, + const struct kernel_ext_fmt *kef, + const length_modifier &len_modifier, + tree &wanted_type, + const char *&wanted_type_name, + const bool suppressed, + unsigned HOST_WIDE_INT &arg_num, + tree ¶ms, + const int alloc_flag, + const char * const format_start, + const char * const type_start, + location_t fmt_param_loc, + char conversion_char); + + private: + const function_format_info *const info; + const format_kind_info * const fki; + const format_flag_spec * const flag_specs; + const char *start_of_this_format; + const char *&format_chars; + const tree format_string_cst; + const char * const orig_format_chars; + const location_t format_string_loc; + object_allocator <format_wanted_type> &fwt_pool; + flag_chars_t &flag_chars; + int main_arg_num; + tree main_arg_params; + int &has_operand_number; + const tree first_fillin_param; + format_wanted_type width_wanted_type; + format_wanted_type precision_wanted_type; + public: + format_wanted_type main_wanted_type; + private: + format_wanted_type *first_wanted_type; + format_wanted_type *last_wanted_type; + vec<location_t> *arglocs; +}; + +/* flag_chars_t's constructor. */ + +flag_chars_t::flag_chars_t () +{ + m_flag_chars[0] = 0; +} + +/* Has CH been seen as a flag within the current argument? */ + +bool +flag_chars_t::has_char_p (char ch) const +{ + return strchr (m_flag_chars, ch) != 0; +} + +/* Add CH to the flags seen within the current argument. */ + +void +flag_chars_t::add_char (char ch) +{ + int i = strlen (m_flag_chars); + m_flag_chars[i++] = ch; + m_flag_chars[i] = 0; +} + +/* Validate the individual flags used, removing any that are invalid. */ + +void +flag_chars_t::validate (const format_kind_info *fki, + const format_char_info *fci, + const format_flag_spec *flag_specs, + const char * const format_chars, + tree format_string_cst, + location_t format_string_loc, + const char * const orig_format_chars, + char format_char, + bool quoted) +{ + int i; + int d = 0; + bool quotflag = false; + + for (i = 0; m_flag_chars[i] != 0; i++) + { + const format_flag_spec *s = get_flag_spec (flag_specs, + m_flag_chars[i], NULL); + m_flag_chars[i - d] = m_flag_chars[i]; + if (m_flag_chars[i] == fki->length_code_char) + continue; + + /* Remember if a quoting flag is seen. */ + quotflag |= s->quoting; + + if (strchr (fci->flag_chars, m_flag_chars[i]) == 0) + { + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "%s used with %<%%%c%> %s format", + _(s->name), format_char, fki->name); + d++; + continue; + } + if (pedantic) + { + const format_flag_spec *t; + if (ADJ_STD (s->std) > C_STD_VER) + warning_at (format_string_loc, OPT_Wformat_, + "%s does not support %s", + C_STD_NAME (s->std), _(s->long_name)); + t = get_flag_spec (flag_specs, m_flag_chars[i], fci->flags2); + if (t != NULL && ADJ_STD (t->std) > ADJ_STD (s->std)) + { + const char *long_name = (t->long_name != NULL + ? t->long_name + : s->long_name); + if (ADJ_STD (t->std) > C_STD_VER) + warning_at (format_string_loc, OPT_Wformat_, + "%s does not support %s with" + " the %<%%%c%> %s format", + C_STD_NAME (t->std), _(long_name), + format_char, fki->name); + } + } + + /* Detect quoting directives used within a quoted sequence, such + as GCC's "%<...%qE". */ + if (quoted && s->quoting) + { + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars - 1, + OPT_Wformat_, + "%s used within a quoted sequence", + _(s->name)); + } + } + m_flag_chars[i - d] = 0; + + if (!quoted + && !quotflag + && strchr (fci->flags2, '\'')) + { + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "%qc conversion used unquoted", + format_char); + } +} + +/* Determine if an assignment-allocation has been set, requiring + an extra char ** for writing back a dynamically-allocated char *. + This is for handling the optional 'm' character in scanf. */ + +int +flag_chars_t::get_alloc_flag (const format_kind_info *fki) +{ + if ((fki->flags & (int) FMT_FLAG_SCANF_A_KLUDGE) + && has_char_p ('a')) + return 1; + if (fki->alloc_char && has_char_p (fki->alloc_char)) + return 1; + return 0; +} + +/* Determine if an assignment-suppression character was seen. + ('*' in scanf, for discarding the converted input). */ + +int +flag_chars_t::assignment_suppression_p (const format_kind_info *fki) +{ + if (fki->suppression_char + && has_char_p (fki->suppression_char)) + return 1; + return 0; +} + +/* Constructor for argument_parser. Initialize for parsing one + argument within a format string. */ + +argument_parser:: +argument_parser (function_format_info *info_, const char *&format_chars_, + tree format_string_cst_, + const char * const orig_format_chars_, + location_t format_string_loc_, + flag_chars_t &flag_chars_, + int &has_operand_number_, + tree first_fillin_param_, + object_allocator <format_wanted_type> &fwt_pool_, + vec<location_t> *arglocs_) +: info (info_), + fki (&format_types[info->format_type]), + flag_specs (fki->flag_specs), + start_of_this_format (format_chars_), + format_chars (format_chars_), + format_string_cst (format_string_cst_), + orig_format_chars (orig_format_chars_), + format_string_loc (format_string_loc_), + fwt_pool (fwt_pool_), + flag_chars (flag_chars_), + main_arg_num (0), + main_arg_params (NULL), + has_operand_number (has_operand_number_), + first_fillin_param (first_fillin_param_), + first_wanted_type (NULL), + last_wanted_type (NULL), + arglocs (arglocs_) +{ +} + +/* Handle dollars at the start of format arguments, setting up main_arg_params + and main_arg_num. + + Return true if format parsing is to continue, false otherwise. */ + +bool +argument_parser::read_any_dollar () +{ + if ((fki->flags & (int) FMT_FLAG_USE_DOLLAR) && has_operand_number != 0) + { + /* Possibly read a $ operand number at the start of the format. + If one was previously used, one is required here. If one + is not used here, we can't immediately conclude this is a + format without them, since it could be printf %m or scanf %*. */ + int opnum; + opnum = maybe_read_dollar_number (&format_chars, 0, + first_fillin_param, + &main_arg_params, fki); + if (opnum == -1) + return false; + else if (opnum > 0) + { + has_operand_number = 1; + main_arg_num = opnum + info->first_arg_num - 1; + } + } + else if (fki->flags & FMT_FLAG_USE_DOLLAR) + { + if (avoid_dollar_number (format_chars)) + return false; + } + return true; +} + +/* Read any format flags, but do not yet validate them beyond removing + duplicates, since in general validation depends on the rest of + the format. + + Return true if format parsing is to continue, false otherwise. */ + +bool +argument_parser::read_format_flags () +{ + while (*format_chars != 0 + && strchr (fki->flag_chars, *format_chars) != 0) + { + const format_flag_spec *s = get_flag_spec (flag_specs, + *format_chars, NULL); + if (flag_chars.has_char_p (*format_chars)) + { + format_warning_at_char (format_string_loc, format_string_cst, + format_chars + 1 - orig_format_chars, + OPT_Wformat_, + "repeated %s in format", _(s->name)); + } + else + flag_chars.add_char (*format_chars); + + if (s->skip_next_char) + { + ++format_chars; + if (*format_chars == 0) + { + warning_at (format_string_loc, OPT_Wformat_, + "missing fill character at end of strfmon format"); + return false; + } + } + ++format_chars; + } + + return true; +} + +/* Read any format width, possibly * or *m$. + + Return true if format parsing is to continue, false otherwise. */ + +bool +argument_parser:: +read_any_format_width (tree ¶ms, + unsigned HOST_WIDE_INT &arg_num) +{ + if (!fki->width_char) + return true; + + if (fki->width_type != NULL && *format_chars == '*') + { + flag_chars.add_char (fki->width_char); + /* "...a field width...may be indicated by an asterisk. + In this case, an int argument supplies the field width..." */ + ++format_chars; + if (has_operand_number != 0) + { + int opnum; + opnum = maybe_read_dollar_number (&format_chars, + has_operand_number == 1, + first_fillin_param, + ¶ms, fki); + if (opnum == -1) + return false; + else if (opnum > 0) + { + has_operand_number = 1; + arg_num = opnum + info->first_arg_num - 1; + } + else + has_operand_number = 0; + } + else + { + if (avoid_dollar_number (format_chars)) + return false; + } + if (info->first_arg_num != 0) + { + tree cur_param; + if (params == 0) + cur_param = NULL; + else + { + cur_param = TREE_VALUE (params); + if (has_operand_number <= 0) + { + params = TREE_CHAIN (params); + ++arg_num; + } + } + width_wanted_type.wanted_type = *fki->width_type; + width_wanted_type.wanted_type_name = NULL; + width_wanted_type.pointer_count = 0; + width_wanted_type.char_lenient_flag = 0; + width_wanted_type.scalar_identity_flag = 0; + width_wanted_type.writing_in_flag = 0; + width_wanted_type.reading_from_flag = 0; + width_wanted_type.kind = CF_KIND_FIELD_WIDTH; + width_wanted_type.format_start = format_chars - 1; + width_wanted_type.format_length = 1; + width_wanted_type.param = cur_param; + width_wanted_type.arg_num = arg_num; + width_wanted_type.offset_loc = + format_chars - orig_format_chars; + width_wanted_type.next = NULL; + if (last_wanted_type != 0) + last_wanted_type->next = &width_wanted_type; + if (first_wanted_type == 0) + first_wanted_type = &width_wanted_type; + last_wanted_type = &width_wanted_type; + } + } + else + { + /* Possibly read a numeric width. If the width is zero, + we complain if appropriate. */ + int non_zero_width_char = FALSE; + int found_width = FALSE; + while (ISDIGIT (*format_chars)) + { + found_width = TRUE; + if (*format_chars != '0') + non_zero_width_char = TRUE; + ++format_chars; + } + if (found_width && !non_zero_width_char && + (fki->flags & (int) FMT_FLAG_ZERO_WIDTH_BAD)) + warning_at (format_string_loc, OPT_Wformat_, + "zero width in %s format", fki->name); + if (found_width) + flag_chars.add_char (fki->width_char); + } + + return true; +} + +/* Read any format left precision (must be a number, not *). */ +void +argument_parser::read_any_format_left_precision () +{ + if (fki->left_precision_char == 0) + return; + if (*format_chars != '#') + return; + + ++format_chars; + flag_chars.add_char (fki->left_precision_char); + if (!ISDIGIT (*format_chars)) + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "empty left precision in %s format", fki->name); + while (ISDIGIT (*format_chars)) + ++format_chars; +} + +/* Read any format precision, possibly * or *m$. + + Return true if format parsing is to continue, false otherwise. */ + +bool +argument_parser:: +read_any_format_precision (tree ¶ms, + unsigned HOST_WIDE_INT &arg_num) +{ + if (fki->precision_char == 0) + return true; + if (*format_chars != '.') + return true; + + ++format_chars; + flag_chars.add_char (fki->precision_char); + if (fki->precision_type != NULL && *format_chars == '*') + { + /* "...a...precision...may be indicated by an asterisk. + In this case, an int argument supplies the...precision." */ + ++format_chars; + if (has_operand_number != 0) + { + int opnum; + opnum = maybe_read_dollar_number (&format_chars, + has_operand_number == 1, + first_fillin_param, + ¶ms, fki); + if (opnum == -1) + return false; + else if (opnum > 0) + { + has_operand_number = 1; + arg_num = opnum + info->first_arg_num - 1; + } + else + has_operand_number = 0; + } + else + { + if (avoid_dollar_number (format_chars)) + return false; + } + if (info->first_arg_num != 0) + { + tree cur_param; + if (params == 0) + cur_param = NULL; + else + { + cur_param = TREE_VALUE (params); + if (has_operand_number <= 0) + { + params = TREE_CHAIN (params); + ++arg_num; + } + } + precision_wanted_type.wanted_type = *fki->precision_type; + precision_wanted_type.wanted_type_name = NULL; + precision_wanted_type.pointer_count = 0; + precision_wanted_type.char_lenient_flag = 0; + precision_wanted_type.scalar_identity_flag = 0; + precision_wanted_type.writing_in_flag = 0; + precision_wanted_type.reading_from_flag = 0; + precision_wanted_type.kind = CF_KIND_FIELD_PRECISION; + precision_wanted_type.param = cur_param; + precision_wanted_type.format_start = format_chars - 2; + precision_wanted_type.format_length = 2; + precision_wanted_type.arg_num = arg_num; + precision_wanted_type.offset_loc = + format_chars - orig_format_chars; + precision_wanted_type.next = NULL; + if (last_wanted_type != 0) + last_wanted_type->next = &precision_wanted_type; + if (first_wanted_type == 0) + first_wanted_type = &precision_wanted_type; + last_wanted_type = &precision_wanted_type; + } + } + else + { + if (!(fki->flags & (int) FMT_FLAG_EMPTY_PREC_OK) + && !ISDIGIT (*format_chars)) + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "empty precision in %s format", fki->name); + while (ISDIGIT (*format_chars)) + ++format_chars; + } + + return true; +} + +/* Parse any assignment-allocation flags, which request an extra + char ** for writing back a dynamically-allocated char *. + This is for handling the optional 'm' character in scanf, + and, before C99, 'a' (for compatibility with a non-standard + GNU libc extension). */ + +void +argument_parser::handle_alloc_chars () +{ + if (fki->alloc_char && fki->alloc_char == *format_chars) + { + flag_chars.add_char (fki->alloc_char); + format_chars++; + } + + /* Handle the scanf allocation kludge. */ + if (fki->flags & (int) FMT_FLAG_SCANF_A_KLUDGE) + { + if (*format_chars == 'a' && !flag_isoc99) + { + if (format_chars[1] == 's' || format_chars[1] == 'S' + || format_chars[1] == '[') + { + /* 'a' is used as a flag. */ + flag_chars.add_char ('a'); + format_chars++; + } + } + } +} + +/* Look for length modifiers within the current format argument, + returning a length_modifier instance describing it (or the + default if one is not found). + + Issue warnings about non-standard modifiers. */ + +length_modifier +argument_parser::read_any_length_modifier () +{ + length_modifier result; + + const format_length_info *fli = fki->length_char_specs; + if (!fli) + return result; + + while (fli->name != 0 + && strncmp (fli->name, format_chars, strlen (fli->name))) + fli++; + if (fli->name != 0) + { + format_chars += strlen (fli->name); + if (fli->double_name != 0 && fli->name[0] == *format_chars) + { + format_chars++; + result = length_modifier (fli->double_name, fli->double_index, + fli->double_std, 0); + } + else + { + result = length_modifier (fli->name, fli->index, fli->std, + fli->scalar_identity_flag); + } + flag_chars.add_char (fki->length_code_char); + } + if (pedantic) + { + /* Warn if the length modifier is non-standard. */ + if (ADJ_STD (result.std) > C_STD_VER) + warning_at (format_string_loc, OPT_Wformat_, + "%s does not support the %qs %s length modifier", + C_STD_NAME (result.std), result.chars, + fki->name); + } + + return result; +} + +/* Read any other modifier (strftime E/O). */ + +void +argument_parser::read_any_other_modifier () +{ + if (fki->modifier_chars == NULL) + return; + + while (*format_chars != 0 + && strchr (fki->modifier_chars, *format_chars) != 0) + { + if (flag_chars.has_char_p (*format_chars)) + { + const format_flag_spec *s = get_flag_spec (flag_specs, + *format_chars, NULL); + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "repeated %s in format", _(s->name)); + } + else + flag_chars.add_char (*format_chars); + ++format_chars; + } +} + +/* Return the format_char_info corresponding to FORMAT_CHAR, + potentially issuing a warning if the format char is + not supported in the C standard version we are checking + against. + + Issue a warning and return NULL if it is not found. + + Issue warnings about non-standard modifiers. */ + +const format_char_info * +argument_parser::find_format_char_info (char format_char) +{ + const format_char_info *fci = fki->conversion_specs; + + while (fci->format_chars != 0 + && strchr (fci->format_chars, format_char) == 0) + ++fci; + if (fci->format_chars == 0) + { + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "unknown conversion type character" + " %qc in format", + format_char); + return NULL; + } + + if (pedantic) + { + if (ADJ_STD (fci->std) > C_STD_VER) + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "%s does not support the %<%%%c%> %s format", + C_STD_NAME (fci->std), format_char, fki->name); + } + + return fci; +} + +/* Validate the pairs of flags used. + Issue warnings about incompatible combinations of flags. */ + +void +argument_parser::validate_flag_pairs (const format_char_info *fci, + char format_char) +{ + const format_flag_pair * const bad_flag_pairs = fki->bad_flag_pairs; + + for (int i = 0; bad_flag_pairs[i].flag_char1 != 0; i++) + { + const format_flag_spec *s, *t; + if (!flag_chars.has_char_p (bad_flag_pairs[i].flag_char1)) + continue; + if (!flag_chars.has_char_p (bad_flag_pairs[i].flag_char2)) + continue; + if (bad_flag_pairs[i].predicate != 0 + && strchr (fci->flags2, bad_flag_pairs[i].predicate) == 0) + continue; + s = get_flag_spec (flag_specs, bad_flag_pairs[i].flag_char1, NULL); + t = get_flag_spec (flag_specs, bad_flag_pairs[i].flag_char2, NULL); + if (bad_flag_pairs[i].ignored) + { + if (bad_flag_pairs[i].predicate != 0) + warning_at (format_string_loc, OPT_Wformat_, + "%s ignored with %s and %<%%%c%> %s format", + _(s->name), _(t->name), format_char, + fki->name); + else + warning_at (format_string_loc, OPT_Wformat_, + "%s ignored with %s in %s format", + _(s->name), _(t->name), fki->name); + } + else + { + if (bad_flag_pairs[i].predicate != 0) + warning_at (format_string_loc, OPT_Wformat_, + "use of %s and %s together with %<%%%c%> %s format", + _(s->name), _(t->name), format_char, + fki->name); + else + warning_at (format_string_loc, OPT_Wformat_, + "use of %s and %s together in %s format", + _(s->name), _(t->name), fki->name); + } + } +} + +/* Give Y2K warnings. */ + +void +argument_parser::give_y2k_warnings (const format_char_info *fci, + char format_char) +{ + if (!warn_format_y2k) + return; + + int y2k_level = 0; + if (strchr (fci->flags2, '4') != 0) + if (flag_chars.has_char_p ('E')) + y2k_level = 3; + else + y2k_level = 2; + else if (strchr (fci->flags2, '3') != 0) + y2k_level = 3; + else if (strchr (fci->flags2, '2') != 0) + y2k_level = 2; + if (y2k_level == 3) + warning_at (format_string_loc, OPT_Wformat_y2k, + "%<%%%c%> yields only last 2 digits of " + "year in some locales", format_char); + else if (y2k_level == 2) + warning_at (format_string_loc, OPT_Wformat_y2k, + "%<%%%c%> yields only last 2 digits of year", + format_char); +} + +/* Parse any "scan sets" enclosed in square brackets, e.g. + for scanf-style calls. */ + +void +argument_parser::parse_any_scan_set (const format_char_info *fci) +{ + if (strchr (fci->flags2, '[') == NULL) + return; + + /* Skip over scan set, in case it happens to have '%' in it. */ + if (*format_chars == '^') + ++format_chars; + /* Find closing bracket; if one is hit immediately, then + it's part of the scan set rather than a terminator. */ + if (*format_chars == ']') + ++format_chars; + while (*format_chars && *format_chars != ']') + ++format_chars; + if (*format_chars != ']') + /* The end of the format string was reached. */ + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "no closing %<]%> for %<%%[%> format"); +} + +/* Return true if this argument is to be continued to be parsed, + false to skip to next argument. */ + +bool +argument_parser::handle_conversions (const format_char_info *fci, + const length_modifier &len_modifier, + tree &wanted_type, + const char *&wanted_type_name, + unsigned HOST_WIDE_INT &arg_num, + tree ¶ms, + char format_char) +{ + enum format_std_version wanted_type_std; + + if (!(fki->flags & (int) FMT_FLAG_ARG_CONVERT)) + return true; + + wanted_type = (fci->types[len_modifier.val].type + ? *fci->types[len_modifier.val].type : 0); + wanted_type_name = fci->types[len_modifier.val].name; + wanted_type_std = fci->types[len_modifier.val].std; + if (wanted_type == 0) + { + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "use of %qs length modifier with %qc type" + " character has either no effect" + " or undefined behavior", + len_modifier.chars, format_char); + /* Heuristic: skip one argument when an invalid length/type + combination is encountered. */ + arg_num++; + if (params != 0) + params = TREE_CHAIN (params); + return false; + } + else if (pedantic + /* Warn if non-standard, provided it is more non-standard + than the length and type characters that may already + have been warned for. */ + && ADJ_STD (wanted_type_std) > ADJ_STD (len_modifier.std) + && ADJ_STD (wanted_type_std) > ADJ_STD (fci->std)) + { + if (ADJ_STD (wanted_type_std) > C_STD_VER) + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "%s does not support the %<%%%s%c%> %s format", + C_STD_NAME (wanted_type_std), + len_modifier.chars, + format_char, fki->name); + } + + return true; +} + +/* Check type of argument against desired type. + + Return true if format parsing is to continue, false otherwise. */ + +bool +argument_parser:: +check_argument_type (const format_char_info *fci, + const struct kernel_ext_fmt *kef, + const length_modifier &len_modifier, + tree &wanted_type, + const char *&wanted_type_name, + const bool suppressed, + unsigned HOST_WIDE_INT &arg_num, + tree ¶ms, + const int alloc_flag, + const char * const format_start, + const char * const type_start, + location_t fmt_param_loc, + char conversion_char) +{ + if (info->first_arg_num == 0) + return true; + + if ((fci->pointer_count == 0 && wanted_type == void_type_node) + || suppressed) + { + if (main_arg_num != 0) + { + if (suppressed) + warning_at (format_string_loc, OPT_Wformat_, + "operand number specified with " + "suppressed assignment"); + else + warning_at (format_string_loc, OPT_Wformat_, + "operand number specified for format " + "taking no argument"); + } + } + else + { + format_wanted_type *wanted_type_ptr; + + if (main_arg_num != 0) + { + arg_num = main_arg_num; + params = main_arg_params; + } + else + { + ++arg_num; + if (has_operand_number > 0) + { + warning_at (format_string_loc, OPT_Wformat_, + "missing $ operand number in format"); + return false; + } + else + has_operand_number = 0; + } + + wanted_type_ptr = &main_wanted_type; + while (fci) + { + tree cur_param; + if (params == 0) + cur_param = NULL; + else + { + cur_param = TREE_VALUE (params); + params = TREE_CHAIN (params); + } + + wanted_type_ptr->wanted_type = wanted_type; + wanted_type_ptr->wanted_type_name = wanted_type_name; + wanted_type_ptr->pointer_count = fci->pointer_count + alloc_flag; + wanted_type_ptr->char_lenient_flag = 0; + if (strchr (fci->flags2, 'c') != 0) + wanted_type_ptr->char_lenient_flag = 1; + wanted_type_ptr->scalar_identity_flag = 0; + if (len_modifier.scalar_identity_flag) + wanted_type_ptr->scalar_identity_flag = 1; + wanted_type_ptr->writing_in_flag = 0; + wanted_type_ptr->reading_from_flag = 0; + if (alloc_flag) + wanted_type_ptr->writing_in_flag = 1; + else + { + if (strchr (fci->flags2, 'W') != 0) + wanted_type_ptr->writing_in_flag = 1; + if (strchr (fci->flags2, 'R') != 0) + wanted_type_ptr->reading_from_flag = 1; + } + wanted_type_ptr->kind = CF_KIND_FORMAT; + wanted_type_ptr->param = cur_param; + wanted_type_ptr->arg_num = arg_num; + wanted_type_ptr->format_start = format_start; + wanted_type_ptr->format_length = format_chars - format_start; + wanted_type_ptr->offset_loc = format_chars - orig_format_chars; + wanted_type_ptr->next = NULL; + if (last_wanted_type != 0) + last_wanted_type->next = wanted_type_ptr; + if (first_wanted_type == 0) + first_wanted_type = wanted_type_ptr; + last_wanted_type = wanted_type_ptr; + + fci = fci->chain; + if (fci) + { + wanted_type_ptr = fwt_pool.allocate (); + arg_num++; + wanted_type = *fci->types[len_modifier.val].type; + wanted_type_name = fci->types[len_modifier.val].name; + } + } + } + + if (first_wanted_type != 0) + { + ptrdiff_t offset_to_format_start = (start_of_this_format - 1) - orig_format_chars; + ptrdiff_t offset_to_format_end = (format_chars - 1) - orig_format_chars; + /* By default, use the end of the range for the caret location. */ + substring_loc fmt_loc (fmt_param_loc, TREE_TYPE (format_string_cst), + offset_to_format_end, + offset_to_format_start, offset_to_format_end); + ptrdiff_t offset_to_type_start = type_start - orig_format_chars; + check_format_types (fmt_loc, first_wanted_type, fki, + offset_to_type_start, + conversion_char, arglocs); + + /* note printf extension type checks are *additional* - %p must always + * be pointer compatible, %d always int compatible. + */ + if (!kef) + return true; + + const struct kernel_ext_fmt *kef_now; + bool success; + + for (kef_now = kef; kef_now->suffix && !strcmp (kef->suffix, kef_now->suffix); kef_now++) + { + success = check_kef_type (fmt_loc, kef_now, + first_wanted_type->arg_num, + first_wanted_type->param, + kef_now->type, fki, offset_to_type_start, conversion_char, arglocs); + + if (success) + return true; + } + + location_t param_loc; + + if (EXPR_HAS_LOCATION (first_wanted_type->param)) + param_loc = EXPR_LOCATION (first_wanted_type->param); + else if (arglocs) + { + /* arg_num is 1-based. */ + gcc_assert (first_wanted_type->arg_num > 0); + param_loc = (*arglocs)[first_wanted_type->arg_num - 1]; + } + + format_type_warning (fmt_loc, param_loc, first_wanted_type, + kef->type, TREE_TYPE (first_wanted_type->param), + fki, offset_to_type_start, conversion_char); + } + + return true; +} + +/* Do the main part of checking a call to a format function. FORMAT_CHARS + is the NUL-terminated format string (which at this point may contain + internal NUL characters); FORMAT_LENGTH is its length (excluding the + terminating NUL character). ARG_NUM is one less than the number of + the first format argument to check; PARAMS points to that format + argument in the list of arguments. */ + +static void +check_format_info_main (format_check_results *res, + function_format_info *info, const char *format_chars, + location_t fmt_param_loc, tree format_string_cst, + int format_length, tree params, + unsigned HOST_WIDE_INT arg_num, + object_allocator <format_wanted_type> &fwt_pool, + vec<location_t> *arglocs) +{ + const char * const orig_format_chars = format_chars; + const tree first_fillin_param = params; + + const format_kind_info * const fki = &format_types[info->format_type]; + const format_flag_spec * const flag_specs = fki->flag_specs; + const location_t format_string_loc = res->format_string_loc; + + /* -1 if no conversions taking an operand have been found; 0 if one has + and it didn't use $; 1 if $ formats are in use. */ + int has_operand_number = -1; + + /* Vector of pointers to opening quoting directives (like GCC "%<"). */ + auto_vec<const char*> quotdirs; + + /* Pointers to the most recent color directives (like GCC's "%r or %R"). + A starting color directive much be terminated before the end of + the format string. A terminating directive makes no sense without + a prior starting directive. */ + const char *color_begin = NULL; + const char *color_end = NULL; + + init_dollar_format_checking (info->first_arg_num, first_fillin_param); + + while (*format_chars != 0) + { + if (*format_chars++ != '%') + continue; + if (*format_chars == 0) + { + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "spurious trailing %<%%%> in format"); + continue; + } + if (*format_chars == '%') + { + ++format_chars; + continue; + } + + flag_chars_t flag_chars; + argument_parser arg_parser (info, format_chars, format_string_cst, + orig_format_chars, format_string_loc, + flag_chars, has_operand_number, + first_fillin_param, fwt_pool, arglocs); + + if (!arg_parser.read_any_dollar ()) + return; + + if (!arg_parser.read_format_flags ()) + return; + + /* Read any format width, possibly * or *m$. */ + if (!arg_parser.read_any_format_width (params, arg_num)) + return; + + /* Read any format left precision (must be a number, not *). */ + arg_parser.read_any_format_left_precision (); + + /* Read any format precision, possibly * or *m$. */ + if (!arg_parser.read_any_format_precision (params, arg_num)) + return; + + const char *format_start = format_chars; + + arg_parser.handle_alloc_chars (); + + /* The rest of the conversion specification is the length modifier + (if any), and the conversion specifier, so this is where the + type information starts. If we need to issue a suggestion + about a type mismatch, then we should preserve everything up + to here. */ + const char *type_start = format_chars; + + /* Read any length modifier, if this kind of format has them. */ + const length_modifier len_modifier + = arg_parser.read_any_length_modifier (); + + /* Read any modifier (strftime E/O). */ + arg_parser.read_any_other_modifier (); + + char format_char = *format_chars; + if (format_char == 0 + || (!(fki->flags & (int) FMT_FLAG_FANCY_PERCENT_OK) + && format_char == '%')) + { + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "conversion lacks type at end of format"); + continue; + } + format_chars++; + + const format_char_info * const fci + = arg_parser.find_format_char_info (format_char); + if (!fci) + continue; + + struct kernel_ext_fmt *etab = fci->kernel_ext; + + if (etab && format_chars[0] >= 'A' && format_chars[0] <= 'Z') + { + struct kernel_ext_fmt *etab_end = etab + ETAB_SZ; + + for (; etab < etab_end && etab->suffix; etab++) + { + if (!strncmp (etab->suffix, format_chars, strlen (etab->suffix))) + break; + } + + if (!etab->suffix || etab == etab_end) + { + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars + 1, + OPT_Wformat_, + "unrecognized printf extension suffix"); + etab = NULL; + } + else + { + format_chars += strlen (etab->suffix); + } + } + else + etab = NULL; + + flag_chars.validate (fki, fci, flag_specs, format_chars, + format_string_cst, + format_string_loc, orig_format_chars, format_char, + quotdirs.length () > 0); + + const int alloc_flag = flag_chars.get_alloc_flag (fki); + const bool suppressed = flag_chars.assignment_suppression_p (fki); + + /* Diagnose nested or unmatched quoting directives such as GCC's + "%<...%<" and "%>...%>". */ + bool quot_begin_p = strchr (fci->flags2, '<'); + bool quot_end_p = strchr (fci->flags2, '>'); + + if (quot_begin_p && !quot_end_p) + { + if (quotdirs.length ()) + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "nested quoting directive"); + quotdirs.safe_push (format_chars); + } + else if (!quot_begin_p && quot_end_p) + { + if (quotdirs.length ()) + quotdirs.pop (); + else + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "unmatched quoting directive"); + } + + bool color_begin_p = strchr (fci->flags2, '/'); + if (color_begin_p) + { + color_begin = format_chars; + color_end = NULL; + } + else if (strchr (fci->flags2, '\\')) + { + if (color_end) + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "%qc directive redundant after prior " + "occurence of the same", format_char); + else if (!color_begin) + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "unmatched color reset directive"); + color_end = format_chars; + } + + /* Diagnose directives that shouldn't appear in a quoted sequence. + (They are denoted by a double quote in FLAGS2.) */ + if (quotdirs.length ()) + { + if (strchr (fci->flags2, '"')) + format_warning_at_char (format_string_loc, format_string_cst, + format_chars - orig_format_chars, + OPT_Wformat_, + "%qc conversion used within a quoted " + "sequence", + format_char); + } + + /* Validate the pairs of flags used. */ + arg_parser.validate_flag_pairs (fci, format_char); + + arg_parser.give_y2k_warnings (fci, format_char); + + arg_parser.parse_any_scan_set (fci); + + tree wanted_type = NULL; + const char *wanted_type_name = NULL; + + if (!arg_parser.handle_conversions (fci, len_modifier, + wanted_type, wanted_type_name, + arg_num, + params, + format_char)) + continue; + + arg_parser.main_wanted_type.next = NULL; + + /* Finally. . .check type of argument against desired type! */ + if (!arg_parser.check_argument_type (fci, etab, len_modifier, + wanted_type, wanted_type_name, + suppressed, + arg_num, params, + alloc_flag, + format_start, type_start, + fmt_param_loc, + format_char)) + return; + } + + if (format_chars - orig_format_chars != format_length) + format_warning_at_char (format_string_loc, format_string_cst, + format_chars + 1 - orig_format_chars, + OPT_Wformat_contains_nul, + "embedded %<\\0%> in format"); + if (info->first_arg_num != 0 && params != 0 + && has_operand_number <= 0) + { + res->number_other--; + res->number_extra_args++; + } + if (has_operand_number > 0) + finish_dollar_format_checking (res, fki->flags & (int) FMT_FLAG_DOLLAR_GAP_POINTER_OK); + + if (quotdirs.length ()) + format_warning_at_char (format_string_loc, format_string_cst, + quotdirs.pop () - orig_format_chars, + OPT_Wformat_, "unterminated quoting directive"); + if (color_begin && !color_end) + format_warning_at_char (format_string_loc, format_string_cst, + color_begin - orig_format_chars, + OPT_Wformat_, "unterminated color directive"); +} + +/* Check the argument types from a single format conversion (possibly + including width and precision arguments). + + FMT_LOC is the location of the format conversion. + + TYPES is a singly-linked list expressing the parts of the format + conversion that expect argument types, and the arguments they + correspond to. + + OFFSET_TO_TYPE_START is the offset within the execution-charset encoded + format string to where type information begins for the conversion + (the length modifier and conversion specifier). + + CONVERSION_CHAR is the user-provided conversion specifier. + + For example, given: + + sprintf (d, "before %-+*.*lld after", arg3, arg4, arg5); + + then FMT_LOC covers this range: + + sprintf (d, "before %-+*.*lld after", arg3, arg4, arg5); + ^^^^^^^^^ + + and TYPES in this case is a three-entry singly-linked list consisting of: + (1) the check for the field width here: + sprintf (d, "before %-+*.*lld after", arg3, arg4, arg5); + ^ ^^^^ + against arg3, and + (2) the check for the field precision here: + sprintf (d, "before %-+*.*lld after", arg3, arg4, arg5); + ^^ ^^^^ + against arg4, and + (3) the check for the length modifier and conversion char here: + sprintf (d, "before %-+*.*lld after", arg3, arg4, arg5); + ^^^ ^^^^ + against arg5. + + OFFSET_TO_TYPE_START is 13, the offset to the "lld" within the + STRING_CST: + + 0000000000111111111122 + 0123456789012345678901 + sprintf (d, "before %-+*.*lld after", arg3, arg4, arg5); + ^ ^ + | ` CONVERSION_CHAR: 'd' + type starts here. */ +tree type_normalize (tree type, tree *cousin, tree target = NULL) +{ + while (1) + { + if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == POINTER_TYPE) + return type; + if (target) + /* Strip off any "const" etc. */ + type = build_qualified_type (type, 0); + if (TREE_CODE (TYPE_NAME (type)) != TYPE_DECL) + return type; + + if (target && (type == target || TYPE_NAME (type) == target)) + return target; + + struct type_special *t; + for (t = special_types; t->match; t++) + { + if (!*t->match) + continue; + if (TYPE_NAME (type) != *t->match) + continue; + if (t->cousin && *t->cousin) + *cousin = *t->cousin; + if (t->replace) + return *t->replace ? *t->replace : type; + return type; + } + + tree orig = DECL_ORIGINAL_TYPE (TYPE_NAME (type)); + if (!orig) + return type; + + type = orig; + } + return type; +} + +static void +check_format_types (const substring_loc &fmt_loc, + format_wanted_type *types, const format_kind_info *fki, + int offset_to_type_start, + char conversion_char, + vec<location_t> *arglocs) +{ + for (; types != 0; types = types->next) + { + tree cur_param; + tree cur_type; + tree cur_type_cousin = NULL; + tree orig_cur_type; + tree wanted_type; + int arg_num; + int i; + int char_type_flag; + + wanted_type = types->wanted_type; + arg_num = types->arg_num; + + /* The following should not occur here. */ + gcc_assert (wanted_type); + gcc_assert (wanted_type != void_type_node || types->pointer_count); + + if (types->pointer_count == 0) + wanted_type = lang_hooks.types.type_promotes_to (wanted_type); + + switch (TREE_CODE (wanted_type)) + { + case IDENTIFIER_NODE: + break; + case TYPE_DECL: + wanted_type = TYPE_MAIN_VARIANT (DECL_ORIGINAL_TYPE (wanted_type)); + break; + default: + wanted_type = TYPE_MAIN_VARIANT (wanted_type); + break; + } + + cur_param = types->param; + if (!cur_param) + { + format_type_warning (fmt_loc, UNKNOWN_LOCATION, types, wanted_type, + NULL, fki, offset_to_type_start, + conversion_char); + continue; + } + + cur_type = TREE_TYPE (cur_param); + if (cur_type == error_mark_node) + continue; + orig_cur_type = cur_type; + char_type_flag = 0; + + location_t param_loc = UNKNOWN_LOCATION; + if (EXPR_HAS_LOCATION (cur_param)) + param_loc = EXPR_LOCATION (cur_param); + else if (arglocs) + { + /* arg_num is 1-based. */ + gcc_assert (types->arg_num > 0); + param_loc = (*arglocs)[types->arg_num - 1]; + } + + STRIP_NOPS (cur_param); + + /* Check the types of any additional pointer arguments + that precede the "real" argument. */ + for (i = 0; i < types->pointer_count; ++i) + { + if (TREE_CODE (cur_type) == POINTER_TYPE) + { + cur_type = TREE_TYPE (cur_type); + if (cur_type == error_mark_node) + break; + + /* Check for writing through a NULL pointer. */ + if (types->writing_in_flag + && i == 0 + && cur_param != 0 + && integer_zerop (cur_param)) + warning (OPT_Wformat_, "writing through null pointer " + "(argument %d)", arg_num); + + /* Check for reading through a NULL pointer. */ + if (types->reading_from_flag + && i == 0 + && cur_param != 0 + && integer_zerop (cur_param)) + warning (OPT_Wformat_, "reading through null pointer " + "(argument %d)", arg_num); + + if (cur_param != 0 && TREE_CODE (cur_param) == ADDR_EXPR) + cur_param = TREE_OPERAND (cur_param, 0); + else + cur_param = 0; + + /* See if this is an attempt to write into a const type with + scanf or with printf "%n". Note: the writing in happens + at the first indirection only, if for example + void * const * is passed to scanf %p; passing + const void ** is simply passing an incompatible type. */ + if (types->writing_in_flag + && i == 0 + && (TYPE_READONLY (cur_type) + || (cur_param != 0 + && (CONSTANT_CLASS_P (cur_param) + || (DECL_P (cur_param) + && TREE_READONLY (cur_param)))))) + warning (OPT_Wformat_, "writing into constant object " + "(argument %d)", arg_num); + + /* If there are extra type qualifiers beyond the first + indirection, then this makes the types technically + incompatible. */ + if (i > 0 + && pedantic + && (TYPE_READONLY (cur_type) + || TYPE_VOLATILE (cur_type) + || TYPE_ATOMIC (cur_type) + || TYPE_RESTRICT (cur_type))) + warning (OPT_Wformat_, "extra type qualifiers in format " + "argument (argument %d)", + arg_num); + + } + else + { + format_type_warning (fmt_loc, param_loc, + types, wanted_type, orig_cur_type, fki, + offset_to_type_start, conversion_char); + break; + } + } + + if (i < types->pointer_count) + continue; + + cur_type = type_normalize (cur_type, &cur_type_cousin); + + /* Check whether the argument type is a character type. This leniency + only applies to certain formats, flagged with 'c'. */ + if (types->char_lenient_flag) + char_type_flag = (cur_type == char_type_node + || cur_type == signed_char_type_node + || cur_type == unsigned_char_type_node); + + int compat = lang_hooks.types_compatible_p (wanted_type, cur_type); + /* Check the type of the "real" argument, if there's a type we want. */ + if ((TREE_CODE (wanted_type) != INTEGER_TYPE || types->pointer_count) + && compat) + continue; + if (TREE_CODE (wanted_type) == INTEGER_TYPE && !types->pointer_count + && compat) + { +compat_inner: + if (TREE_CODE (cur_param) == INTEGER_CST) + continue; + + if (TREE_CODE (types->wanted_type) == TYPE_DECL + && TREE_CODE (cur_type) == TYPE_DECL) + { + if (types->wanted_type == cur_type) + continue; + format_type_warning (fmt_loc, param_loc, types, + wanted_type, orig_cur_type, fki, + offset_to_type_start, conversion_char, + " (strict match required [A])"); + continue; + } + else if (TREE_CODE (types->wanted_type) == TYPE_DECL) + { + if (types->wanted_type == TYPE_NAME(cur_type)) + continue; + format_type_warning (fmt_loc, param_loc, types, + wanted_type, orig_cur_type, fki, + offset_to_type_start, conversion_char, + " (strict match required [B])"); + continue; + } + else if (wanted_type == cur_type) + continue; + else if (cur_type_cousin) + { + format_type_warning (fmt_loc, param_loc, types, + wanted_type, orig_cur_type, fki, + offset_to_type_start, conversion_char, + " (strict match required [C])"); + } + + /* + format_type_warning (fmt_loc, param_loc, types, + wanted_type, orig_cur_type, fki, + offset_to_type_start, conversion_char, + " (ultra-pedantic mode)"); + */ + continue; + } + + /* If we want 'void *', allow any pointer type. + (Anything else would already have got a warning.) + With -Wpedantic, only allow pointers to void and to character + types. */ + if (wanted_type == void_type_node + && (!pedantic || (i == 1 && char_type_flag))) + continue; + /* Don't warn about differences merely in signedness, unless + -Wpedantic. With -Wpedantic, warn if the type is a pointer + target and not a character type, and for character types at + a second level of indirection. */ + if (TREE_CODE (wanted_type) == INTEGER_TYPE + && TREE_CODE (cur_type) == INTEGER_TYPE + && ((!pedantic && !warn_format_signedness) + || (i == 0 && !warn_format_signedness) + || (i == 1 && char_type_flag)) + && (TYPE_UNSIGNED (wanted_type) + ? wanted_type == c_common_unsigned_type (cur_type) + : wanted_type == c_common_signed_type (cur_type))) + { + if (cur_type_cousin) + { + if (TREE_CODE (types->wanted_type) == TYPE_DECL + && TREE_CODE (cur_type_cousin) == TYPE_DECL) + { + if (types->wanted_type == cur_type_cousin) + continue; + format_type_warning (fmt_loc, param_loc, types, + wanted_type, orig_cur_type, fki, + offset_to_type_start, conversion_char, + " (strict match required [X])"); + continue; + } + else if (TREE_CODE (types->wanted_type) == TYPE_DECL) + { + if (types->wanted_type == TYPE_NAME(cur_type_cousin)) + continue; + format_type_warning (fmt_loc, param_loc, types, + wanted_type, orig_cur_type, fki, + offset_to_type_start, conversion_char, + " (strict match required [Y])"); + continue; + } + else if (wanted_type == cur_type_cousin) + continue; + else + { + format_type_warning (fmt_loc, param_loc, types, + wanted_type, orig_cur_type, fki, + offset_to_type_start, conversion_char, + " (strict match required [Z])"); + } + } + + goto compat_inner; + } + /* Don't warn about differences merely in signedness if we know + that the current type is integer-promoted and its original type + was unsigned such as that it is in the range of WANTED_TYPE. */ + if (TREE_CODE (wanted_type) == INTEGER_TYPE + && TREE_CODE (cur_type) == INTEGER_TYPE + && warn_format_signedness + && TYPE_UNSIGNED (wanted_type) + && cur_param != NULL_TREE + && TREE_CODE (cur_param) == NOP_EXPR) + { + tree t = TREE_TYPE (TREE_OPERAND (cur_param, 0)); + if (TYPE_UNSIGNED (t) + && cur_type == lang_hooks.types.type_promotes_to (t)) + continue; + } + /* Likewise, "signed char", "unsigned char" and "char" are + equivalent but the above test won't consider them equivalent. */ + if (wanted_type == char_type_node + && (!pedantic || i < 2) + && char_type_flag) + continue; + if (types->scalar_identity_flag + && (TREE_CODE (cur_type) == TREE_CODE (wanted_type) + || (INTEGRAL_TYPE_P (cur_type) + && INTEGRAL_TYPE_P (wanted_type))) + && TYPE_PRECISION (cur_type) == TYPE_PRECISION (wanted_type)) + continue; + /* Now we have a type mismatch. */ + format_type_warning (fmt_loc, param_loc, types, + wanted_type, orig_cur_type, fki, + offset_to_type_start, conversion_char); + } +} + +static bool +check_kef_type (const substring_loc &fmt_loc, + const struct kernel_ext_fmt *kef, + unsigned arg_num, + tree cur_param, + tree wanted_type, + const format_kind_info *fki, + int offset_to_type_start, + char conversion_char, + vec<location_t> *arglocs) +{ + tree cur_type; + bool ok = true; + int i; + + /* The following should not occur here. */ + gcc_assert (wanted_type); + gcc_assert (wanted_type != void_type_node || kef->ptrlevel); + + if (TREE_CODE (wanted_type) == TYPE_DECL) + wanted_type = DECL_ORIGINAL_TYPE (wanted_type); + + if (!cur_param) + return false; + + cur_type = TREE_TYPE (cur_param); + if (cur_type == error_mark_node) + return false; + + location_t param_loc = UNKNOWN_LOCATION; + if (EXPR_HAS_LOCATION (cur_param)) + param_loc = EXPR_LOCATION (cur_param); + else if (arglocs) + { + /* arg_num is 1-based. */ + gcc_assert (arg_num > 0); + param_loc = (*arglocs)[arg_num - 1]; + } + (void)param_loc; + + STRIP_NOPS (cur_param); + + /* Check the types of any additional pointer arguments + that precede the "real" argument. */ + for (i = 0; i < kef->ptrlevel; ++i) + { + if (TREE_CODE (cur_type) == POINTER_TYPE) + { + cur_type = TREE_TYPE (cur_type); + if (cur_type == error_mark_node) + break; + + if (cur_param != 0 && TREE_CODE (cur_param) == ADDR_EXPR) + cur_param = TREE_OPERAND (cur_param, 0); + else + cur_param = 0; + + /* If there are extra type qualifiers beyond the first + indirection, then this makes the types technically + incompatible. */ + if (i > 0 + && pedantic + && (TYPE_READONLY (cur_type) + || TYPE_VOLATILE (cur_type) + || TYPE_ATOMIC (cur_type) + || TYPE_RESTRICT (cur_type))) + warning (OPT_Wformat_, "extra type qualifiers in format " + "argument (argument %d)", + arg_num); + + } + else + { + ok = false; + break; + } + } + + if (i < kef->ptrlevel) + return ok; + + int compat = lang_hooks.types_compatible_p (wanted_type, cur_type); + + if (!compat) + return false; + + tree cousin; + tree normal_type; + + normal_type = type_normalize (cur_type, &cousin, wanted_type); + + return normal_type == wanted_type; +} + + +/* Given type TYPE, attempt to dereference the type N times + (e.g. from ("int ***", 2) to "int *") + + Return the derefenced type, with any qualifiers + such as "const" stripped from the result, or + NULL if unsuccessful (e.g. TYPE is not a pointer type). */ + +static tree +deref_n_times (tree type, int n) +{ + gcc_assert (type); + + for (int i = n; i > 0; i--) + { + if (TREE_CODE (type) != POINTER_TYPE) + return NULL_TREE; + type = TREE_TYPE (type); + } + /* Strip off any "const" etc. */ + return build_qualified_type (type, 0); +} + +/* Lookup the format code for FORMAT_LEN within FLI, + returning the string code for expressing it, or NULL + if it is not found. */ + +static const char * +get_modifier_for_format_len (const format_length_info *fli, + enum format_lengths format_len) +{ + for (; fli->name; fli++) + { + if (fli->index == format_len) + return fli->name; + if (fli->double_index == format_len) + return fli->double_name; + } + return NULL; +} + +#if CHECKING_P + +namespace selftest { + +static void +test_get_modifier_for_format_len () +{ + ASSERT_STREQ ("h", + get_modifier_for_format_len (printf_length_specs, FMT_LEN_h)); + ASSERT_STREQ ("hh", + get_modifier_for_format_len (printf_length_specs, FMT_LEN_hh)); + ASSERT_STREQ ("L", + get_modifier_for_format_len (printf_length_specs, FMT_LEN_L)); + ASSERT_EQ (NULL, + get_modifier_for_format_len (printf_length_specs, FMT_LEN_none)); +} + +} // namespace selftest + +#endif /* CHECKING_P */ + +/* Determine if SPEC_TYPE and ARG_TYPE are sufficiently similar for a + format_type_detail using SPEC_TYPE to be offered as a suggestion for + Wformat type errors where the argument has type ARG_TYPE. */ + +static bool +matching_type_p (tree spec_type, tree arg_type) +{ + gcc_assert (spec_type); + gcc_assert (arg_type); + + /* If any of the types requires structural equality, we can't compare + their canonical types. */ + if (TYPE_STRUCTURAL_EQUALITY_P (spec_type) + || TYPE_STRUCTURAL_EQUALITY_P (arg_type)) + return false; + + spec_type = TYPE_CANONICAL (spec_type); + arg_type = TYPE_CANONICAL (arg_type); + + if (TREE_CODE (spec_type) == INTEGER_TYPE + && TREE_CODE (arg_type) == INTEGER_TYPE + && (TYPE_UNSIGNED (spec_type) + ? spec_type == c_common_unsigned_type (arg_type) + : spec_type == c_common_signed_type (arg_type))) + return true; + + return spec_type == arg_type; +} + +/* Subroutine of get_format_for_type. + + Generate a string containing the length modifier and conversion specifier + that should be used to format arguments of type ARG_TYPE within FKI + (effectively the inverse of the checking code). + + If CONVERSION_CHAR is not zero (the first pass), the resulting suggestion + is required to use it, for correcting bogus length modifiers. + If CONVERSION_CHAR is zero (the second pass), then allow any suggestion + that matches ARG_TYPE. + + If successful, returns a non-NULL string which should be freed + by the caller. + Otherwise, returns NULL. */ + +static char * +get_format_for_type_1 (const format_kind_info *fki, tree arg_type, + char conversion_char) +{ + gcc_assert (arg_type); + + const format_char_info *spec; + for (spec = &fki->conversion_specs[0]; + spec->format_chars; + spec++) + { + if (conversion_char) + if (!strchr (spec->format_chars, conversion_char)) + continue; + + tree effective_arg_type = deref_n_times (arg_type, + spec->pointer_count); + if (!effective_arg_type) + continue; + for (int i = 0; i < FMT_LEN_MAX; i++) + { + const format_type_detail *ftd = &spec->types[i]; + if (!ftd->type) + continue; + if (matching_type_p (*ftd->type, effective_arg_type)) + { + const char *len_modifier + = get_modifier_for_format_len (fki->length_char_specs, + (enum format_lengths)i); + if (!len_modifier) + len_modifier = ""; + + if (conversion_char) + /* We found a match, using the given conversion char - the + length modifier was incorrect (or absent). + Provide a suggestion using the conversion char with the + correct length modifier for the type. */ + return xasprintf ("%s%c", len_modifier, conversion_char); + else + /* 2nd pass: no match was possible using the user-provided + conversion char, but we do have a match without using it. + Provide a suggestion using the first conversion char + listed for the given type. */ + return xasprintf ("%s%c", len_modifier, spec->format_chars[0]); + } + } + } + + return NULL; +} + +/* Generate a string containing the length modifier and conversion specifier + that should be used to format arguments of type ARG_TYPE within FKI + (effectively the inverse of the checking code). + + If successful, returns a non-NULL string which should be freed + by the caller. + Otherwise, returns NULL. */ + +static char * +get_format_for_type (const format_kind_info *fki, tree arg_type, + char conversion_char) +{ + gcc_assert (arg_type); + gcc_assert (conversion_char); + + /* First pass: look for a format_char_info containing CONVERSION_CHAR + If we find one, then presumably the length modifier was incorrect + (or absent). */ + char *result = get_format_for_type_1 (fki, arg_type, conversion_char); + if (result) + return result; + + /* Second pass: we didn't find a match for CONVERSION_CHAR, so try + matching just on the type. */ + return get_format_for_type_1 (fki, arg_type, '\0'); +} + +/* Attempt to get a string for use as a replacement fix-it hint for the + source range in FMT_LOC. + + Preserve all of the text within the range of FMT_LOC up to + OFFSET_TO_TYPE_START, replacing the rest with an appropriate + length modifier and conversion specifier for ARG_TYPE, attempting + to keep the user-provided CONVERSION_CHAR if possible. + + For example, given a long vs long long mismatch for arg5 here: + + 000000000111111111122222222223333333333| + 123456789012345678901234567890123456789` column numbers + 0000000000111111111122| + 0123456789012345678901` string offsets + V~~~~~~~~ : range of FMT_LOC, from cols 23-31 + sprintf (d, "before %-+*.*lld after", arg3, arg4, arg5); + ^ ^ + | ` CONVERSION_CHAR: 'd' + type starts here + + where OFFSET_TO_TYPE_START is 13 (the offset to the "lld" within the + STRING_CST), where the user provided: + %-+*.*lld + the result (assuming "long" argument 5) should be: + %-+*.*ld + + If successful, returns a non-NULL string which should be freed + by the caller. + Otherwise, returns NULL. */ + +static char * +get_corrected_substring (const substring_loc &fmt_loc, + format_wanted_type *type, tree arg_type, + const format_kind_info *fki, + int offset_to_type_start, char conversion_char) +{ + /* Attempt to provide hints for argument types, but not for field widths + and precisions. */ + if (!arg_type) + return NULL; + if (type->kind != CF_KIND_FORMAT) + return NULL; + + /* Locate the current code within the source range, rejecting + any awkward cases where the format string occupies more than + one line. + Lookup the place where the type starts (including any length + modifiers), getting it as the caret location. */ + substring_loc type_loc (fmt_loc); + type_loc.set_caret_index (offset_to_type_start); + + location_t fmt_substring_loc; + const char *err = type_loc.get_location (&fmt_substring_loc); + if (err) + return NULL; + + source_range fmt_substring_range + = get_range_from_loc (line_table, fmt_substring_loc); + + expanded_location caret + = expand_location_to_spelling_point (fmt_substring_loc); + expanded_location start + = expand_location_to_spelling_point (fmt_substring_range.m_start); + expanded_location finish + = expand_location_to_spelling_point (fmt_substring_range.m_finish); + if (caret.file != start.file) + return NULL; + if (start.file != finish.file) + return NULL; + if (caret.line != start.line) + return NULL; + if (start.line != finish.line) + return NULL; + if (start.column > caret.column) + return NULL; + if (start.column > finish.column) + return NULL; + if (caret.column > finish.column) + return NULL; + +#if BUILDING_GCC_VERSION >= 9000 + char_span line = location_get_source_line (start.file, start.line); + if (!line) + return NULL; + + /* If we got this far, then we have the line containing the + existing conversion specification. + + Generate a trimmed copy, containing the prefix part of the conversion + specification, up to the (but not including) the length modifier. + In the above example, this would be "%-+*.*". */ + int length_up_to_type = caret.column - start.column; + char_span prefix_span = line.subspan (start.column - 1, length_up_to_type); + char *prefix = prefix_span.xstrdup (); +#else + char *prefix = NULL; +#endif + + /* Now attempt to generate a suggestion for the rest of the specification + (length modifier and conversion char), based on ARG_TYPE and + CONVERSION_CHAR. + In the above example, this would be "ld". */ + char *format_for_type = get_format_for_type (fki, arg_type, conversion_char); + if (!format_for_type) + { + free (prefix); + return NULL; + } + + /* Success. Generate the resulting suggestion for the whole range of + FMT_LOC by concatenating the two strings. + In the above example, this would be "%-+*.*ld". */ + char *result = concat (prefix, format_for_type, NULL); + free (format_for_type); + free (prefix); + return result; +} + +/* Helper class for adding zero or more trailing '*' to types. + + The format type and name exclude any '*' for pointers, so those + must be formatted manually. For all the types we currently have, + this is adequate, but formats taking pointers to functions or + arrays would require the full type to be built up in order to + print it with %T. */ + +class indirection_suffix +{ + public: + indirection_suffix (int pointer_count) : m_pointer_count (pointer_count) {} + + /* Determine the size of the buffer (including NUL-terminator). */ + + size_t get_buffer_size () const + { + return m_pointer_count + 2; + } + + /* Write the '*' to DST and add a NUL-terminator. */ + + void fill_buffer (char *dst) const + { + if (m_pointer_count == 0) + dst[0] = 0; + else if (c_dialect_cxx ()) + { + memset (dst, '*', m_pointer_count); + dst[m_pointer_count] = 0; + } + else + { + dst[0] = ' '; + memset (dst + 1, '*', m_pointer_count); + dst[m_pointer_count + 1] = 0; + } + } + + private: + int m_pointer_count; +}; + +#if BUILDING_GCC_VERSION >= 9000 +/* not exported by GCC... need a local copy :( */ +class frr_range_label_for_type_mismatch : public range_label +{ + public: + frr_range_label_for_type_mismatch (tree labelled_type, tree other_type) + : m_labelled_type (labelled_type), m_other_type (other_type) + { + } + + label_text get_text (unsigned range_idx) const OVERRIDE; + + protected: + tree m_labelled_type; + tree m_other_type; +}; + +/* Print T to CPP. */ + +static void +print_type (c_pretty_printer *cpp, tree t, bool *quoted) +{ + gcc_assert (TYPE_P (t)); + struct obstack *ob = pp_buffer (cpp)->obstack; + char *p = (char *) obstack_base (ob); + /* Remember the end of the initial dump. */ + int len = obstack_object_size (ob); + + tree name = TYPE_NAME (t); + if (name && TREE_CODE (name) == TYPE_DECL && DECL_NAME (name)) + pp_identifier (cpp, lang_hooks.decl_printable_name (name, 2)); + else + cpp->type_id (t); + + /* If we're printing a type that involves typedefs, also print the + stripped version. But sometimes the stripped version looks + exactly the same, so we don't want it after all. To avoid + printing it in that case, we play ugly obstack games. */ + if (TYPE_CANONICAL (t) && t != TYPE_CANONICAL (t)) + { + c_pretty_printer cpp2; + /* Print the stripped version into a temporary printer. */ + cpp2.type_id (TYPE_CANONICAL (t)); + struct obstack *ob2 = cpp2.buffer->obstack; + /* Get the stripped version from the temporary printer. */ + const char *aka = (char *) obstack_base (ob2); + int aka_len = obstack_object_size (ob2); + int type1_len = obstack_object_size (ob) - len; + + /* If they are identical, bail out. */ + if (aka_len == type1_len && memcmp (p + len, aka, aka_len) == 0) + return; + + /* They're not, print the stripped version now. */ + if (*quoted) + pp_end_quote (cpp, pp_show_color (cpp)); + pp_c_whitespace (cpp); + pp_left_brace (cpp); + pp_c_ws_string (cpp, _("aka")); + pp_c_whitespace (cpp); + if (*quoted) + pp_begin_quote (cpp, pp_show_color (cpp)); + cpp->type_id (TYPE_CANONICAL (t)); + if (*quoted) + pp_end_quote (cpp, pp_show_color (cpp)); + pp_right_brace (cpp); + /* No further closing quotes are needed. */ + *quoted = false; + } +} + +/* C-specific implementation of range_label::get_text () vfunc for + range_label_for_type_mismatch. */ + +label_text +frr_range_label_for_type_mismatch::get_text (unsigned /*range_idx*/) const +{ + if (m_labelled_type == NULL_TREE) + return label_text (NULL, false); + + c_pretty_printer cpp; + bool quoted = false; + print_type (&cpp, m_labelled_type, "ed); + return label_text (xstrdup (pp_formatted_text (&cpp)), true); +} + +#define range_label_for_type_mismatch frr_range_label_for_type_mismatch +#endif + +/* Subclass of range_label for labelling the range in the format string + with the type in question, adding trailing '*' for pointer_count. */ + +class range_label_for_format_type_mismatch + : public range_label_for_type_mismatch +{ + public: + range_label_for_format_type_mismatch (tree labelled_type, tree other_type, + int pointer_count) + : range_label_for_type_mismatch (labelled_type, other_type), + m_pointer_count (pointer_count) + { + } + + label_text get_text (unsigned range_idx) const FINAL OVERRIDE + { + label_text text = range_label_for_type_mismatch::get_text (range_idx); + if (text.m_buffer == NULL) + return text; + + indirection_suffix suffix (m_pointer_count); + char *p = (char *) alloca (suffix.get_buffer_size ()); + suffix.fill_buffer (p); + + char *result = concat (text.m_buffer, p, NULL); + text.maybe_free (); + return label_text (result, true); + } + + private: + int m_pointer_count; +}; + +/* Give a warning about a format argument of different type from that expected. + The range of the diagnostic is taken from WHOLE_FMT_LOC; the caret location + is based on the location of the char at TYPE->offset_loc. + PARAM_LOC is the location of the relevant argument, or UNKNOWN_LOCATION + if this is unavailable. + WANTED_TYPE is the type the argument should have, + possibly stripped of pointer dereferences. The description (such as "field + precision"), the placement in the format string, a possibly more + friendly name of WANTED_TYPE, and the number of pointer dereferences + are taken from TYPE. ARG_TYPE is the type of the actual argument, + or NULL if it is missing. + + OFFSET_TO_TYPE_START is the offset within the execution-charset encoded + format string to where type information begins for the conversion + (the length modifier and conversion specifier). + CONVERSION_CHAR is the user-provided conversion specifier. + + For example, given a type mismatch for argument 5 here: + + 00000000011111111112222222222333333333344444444445555555555| + 12345678901234567890123456789012345678901234567890123456789` column numbers + 0000000000111111111122| + 0123456789012345678901` offsets within STRING_CST + V~~~~~~~~ : range of WHOLE_FMT_LOC, from cols 23-31 + sprintf (d, "before %-+*.*lld after", int_expr, int_expr, long_expr); + ^ ^ ^~~~~~~~~ + | ` CONVERSION_CHAR: 'd' PARAM_LOC + type starts here + + OFFSET_TO_TYPE_START is 13, the offset to the "lld" within the + STRING_CST. */ + +static void +format_type_warning (const substring_loc &whole_fmt_loc, + location_t param_loc, + format_wanted_type *type, + tree wanted_type, tree arg_type, + const format_kind_info *fki, + int offset_to_type_start, + char conversion_char, + const char *extra) +{ + enum format_specifier_kind kind = type->kind; + const char *wanted_type_name = type->wanted_type_name; + const char *format_start = type->format_start; + int format_length = type->format_length; + int pointer_count = type->pointer_count; + int arg_num = type->arg_num; + + if (!extra) + extra = ""; + + /* If ARG_TYPE is a typedef with a misleading name (for example, + size_t but not the standard size_t expected by printf %zu), avoid + printing the typedef name. */ + if (wanted_type_name + && arg_type + && TYPE_NAME (arg_type) + && TREE_CODE (TYPE_NAME (arg_type)) == TYPE_DECL + && DECL_NAME (TYPE_NAME (arg_type)) + && !strcmp (wanted_type_name, + lang_hooks.decl_printable_name (TYPE_NAME (arg_type), 2))) + arg_type = TYPE_MAIN_VARIANT (arg_type); + + indirection_suffix suffix (pointer_count); + char *p = (char *) alloca (suffix.get_buffer_size ()); + suffix.fill_buffer (p); + + /* WHOLE_FMT_LOC has the caret at the end of the range. + Set the caret to be at the offset from TYPE. Subtract one + from the offset for the same reason as in format_warning_at_char. */ + substring_loc fmt_loc (whole_fmt_loc); + fmt_loc.set_caret_index (type->offset_loc - 1); + +#if BUILDING_GCC_VERSION >= 9000 + range_label_for_format_type_mismatch fmt_label (wanted_type, arg_type, + pointer_count); + range_label_for_type_mismatch param_label (arg_type, wanted_type); + + /* Get a string for use as a replacement fix-it hint for the range in + fmt_loc, or NULL. */ + char *corrected_substring + = get_corrected_substring (fmt_loc, type, arg_type, fki, + offset_to_type_start, conversion_char); + format_string_diagnostic_t diag (fmt_loc, &fmt_label, param_loc, ¶m_label, + corrected_substring); +# define format_warning_at_substring(a,b,c,d,e,...) \ + diag.emit_warning(__VA_ARGS__) +#else +# define format_warning_at_substring(a,b,c,d,...) \ + format_warning_at_substring(a,c,__VA_ARGS__) + /* Get a string for use as a replacement fix-it hint for the range in + fmt_loc, or NULL. */ + char *corrected_substring + = get_corrected_substring (fmt_loc, type, arg_type, fki, + offset_to_type_start, conversion_char); + +#endif + + if (wanted_type_name) + { + if (arg_type) + format_warning_at_substring + (fmt_loc, &fmt_label, param_loc, ¶m_label, + corrected_substring, OPT_Wformat_, + "%s %<%s%.*s%> expects argument of type %<%s%s%>, " + "but argument %d has type %qT%s", + gettext (kind_descriptions[kind]), + (kind == CF_KIND_FORMAT ? "%" : ""), + format_length, format_start, + wanted_type_name, p, arg_num, arg_type, extra); + else + format_warning_at_substring + (fmt_loc, &fmt_label, param_loc, ¶m_label, + corrected_substring, OPT_Wformat_, + "%s %<%s%.*s%> expects a matching %<%s%s%> argument%s", + gettext (kind_descriptions[kind]), + (kind == CF_KIND_FORMAT ? "%" : ""), + format_length, format_start, wanted_type_name, p, extra); + } + else + { + if (arg_type) + format_warning_at_substring + (fmt_loc, &fmt_label, param_loc, ¶m_label, + corrected_substring, OPT_Wformat_, + "%s %<%s%.*s%> expects argument of type %<%T%s%>, " + "but argument %d has type %qT%s", + gettext (kind_descriptions[kind]), + (kind == CF_KIND_FORMAT ? "%" : ""), + format_length, format_start, + wanted_type, p, arg_num, arg_type, extra); + else + format_warning_at_substring + (fmt_loc, &fmt_label, param_loc, ¶m_label, + corrected_substring, OPT_Wformat_, + "%s %<%s%.*s%> expects a matching %<%T%s%> argument%s", + gettext (kind_descriptions[kind]), + (kind == CF_KIND_FORMAT ? "%" : ""), + format_length, format_start, wanted_type, p, extra); + } + + free (corrected_substring); +} + + +#if 0 +/* Given a format_char_info array FCI, and a character C, this function + returns the index into the conversion_specs where that specifier's + data is located. The character must exist. */ +static unsigned int +find_char_info_specifier_index (const format_char_info *fci, int c) +{ + unsigned i; + + for (i = 0; fci->format_chars; i++, fci++) + if (strchr (fci->format_chars, c)) + return i; + + /* We shouldn't be looking for a non-existent specifier. */ + gcc_unreachable (); +} + +/* Given a format_length_info array FLI, and a character C, this + function returns the index into the conversion_specs where that + modifier's data is located. The character must exist. */ +static unsigned int +find_length_info_modifier_index (const format_length_info *fli, int c) +{ + unsigned i; + + for (i = 0; fli->name; i++, fli++) + if (strchr (fli->name, c)) + return i; + + /* We shouldn't be looking for a non-existent modifier. */ + gcc_unreachable (); +} +#endif + +#ifdef TARGET_FORMAT_TYPES +extern const format_kind_info TARGET_FORMAT_TYPES[]; +#endif + +#ifdef TARGET_OVERRIDES_FORMAT_ATTRIBUTES +extern const target_ovr_attr TARGET_OVERRIDES_FORMAT_ATTRIBUTES[]; +#endif +#ifdef TARGET_OVERRIDES_FORMAT_INIT + extern void TARGET_OVERRIDES_FORMAT_INIT (void); +#endif + +/* Attributes such as "printf" are equivalent to those such as + "gnu_printf" unless this is overridden by a target. */ +static const target_ovr_attr gnu_target_overrides_format_attributes[] = +{ + { NULL, NULL } +}; + +/* Translate to unified attribute name. This is used in decode_format_type and + decode_format_attr. In attr_name the user specified argument is passed. It + returns the unified format name from TARGET_OVERRIDES_FORMAT_ATTRIBUTES + or the attr_name passed to this function, if there is no matching entry. */ +static const char * +convert_format_name_to_system_name (const char *attr_name) +{ + int i; + + if (attr_name == NULL || *attr_name == 0 + || strncmp (attr_name, "gcc_", 4) == 0) + return attr_name; +#ifdef TARGET_OVERRIDES_FORMAT_INIT + TARGET_OVERRIDES_FORMAT_INIT (); +#endif + +#ifdef TARGET_OVERRIDES_FORMAT_ATTRIBUTES + /* Check if format attribute is overridden by target. */ + if (TARGET_OVERRIDES_FORMAT_ATTRIBUTES != NULL + && TARGET_OVERRIDES_FORMAT_ATTRIBUTES_COUNT > 0) + { + for (i = 0; i < TARGET_OVERRIDES_FORMAT_ATTRIBUTES_COUNT; ++i) + { + if (cmp_attribs (TARGET_OVERRIDES_FORMAT_ATTRIBUTES[i].named_attr_src, + attr_name)) + return attr_name; + if (cmp_attribs (TARGET_OVERRIDES_FORMAT_ATTRIBUTES[i].named_attr_dst, + attr_name)) + return TARGET_OVERRIDES_FORMAT_ATTRIBUTES[i].named_attr_src; + } + } +#endif + /* Otherwise default to gnu format. */ + for (i = 0; + gnu_target_overrides_format_attributes[i].named_attr_src != NULL; + ++i) + { + if (cmp_attribs (gnu_target_overrides_format_attributes[i].named_attr_src, + attr_name)) + return attr_name; + if (cmp_attribs (gnu_target_overrides_format_attributes[i].named_attr_dst, + attr_name)) + return gnu_target_overrides_format_attributes[i].named_attr_src; + } + + return attr_name; +} + +/* Handle a "format" attribute; arguments as in + struct attribute_spec.handler. */ +tree +handle_frr_format_attribute (tree *node, tree ARG_UNUSED (name), tree args, + int flags, bool *no_add_attrs) +{ + tree type = *node; + function_format_info info; + + /* Canonicalize name of format function. */ + if (TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE) + TREE_VALUE (args) = canonicalize_attr_name (TREE_VALUE (args)); + + if (!decode_format_attr (args, &info, 0)) + { + *no_add_attrs = true; + return NULL_TREE; + } + + if (prototype_p (type)) + { + if (!check_format_string (type, info.format_num, flags, + no_add_attrs, info.format_type)) + return NULL_TREE; + + if (info.first_arg_num != 0) + { + unsigned HOST_WIDE_INT arg_num = 1; + function_args_iterator iter; + tree arg_type; + + /* Verify that first_arg_num points to the last arg, + the ... */ + FOREACH_FUNCTION_ARGS (type, arg_type, iter) + arg_num++; + + if (arg_num != info.first_arg_num) + { + if (!(flags & (int) ATTR_FLAG_BUILT_IN)) + error ("args to be formatted is not %<...%>"); + *no_add_attrs = true; + return NULL_TREE; + } + } + } + + /* Check if this is a strftime variant. Just for this variant + FMT_FLAG_ARG_CONVERT is not set. */ + if ((format_types[info.format_type].flags & (int) FMT_FLAG_ARG_CONVERT) == 0 + && info.first_arg_num != 0) + { + error ("strftime formats cannot format arguments"); + *no_add_attrs = true; + return NULL_TREE; + } + + return NULL_TREE; +} + +#if CHECKING_P + +namespace selftest { + +/* Selftests of location handling. */ + +/* Get the format_kind_info with the given name. */ + +static const format_kind_info * +get_info (const char *name) +{ + int idx = decode_format_type (name); + const format_kind_info *fki = &format_types[idx]; + ASSERT_STREQ (fki->name, name); + return fki; +} + +/* Verify that get_format_for_type (FKI, TYPE, CONVERSION_CHAR) + is EXPECTED_FORMAT. */ + +static void +assert_format_for_type_streq (const location &loc, const format_kind_info *fki, + const char *expected_format, tree type, + char conversion_char) +{ + gcc_assert (fki); + gcc_assert (expected_format); + gcc_assert (type); + + char *actual_format = get_format_for_type (fki, type, conversion_char); + ASSERT_STREQ_AT (loc, expected_format, actual_format); + free (actual_format); +} + +/* Selftests for get_format_for_type. */ + +#define ASSERT_FORMAT_FOR_TYPE_STREQ(EXPECTED_FORMAT, TYPE, CONVERSION_CHAR) \ + assert_format_for_type_streq (SELFTEST_LOCATION, (fki), (EXPECTED_FORMAT), \ + (TYPE), (CONVERSION_CHAR)) + +/* Selftest for get_format_for_type for "printf"-style functions. */ + +static void +test_get_format_for_type_printf () +{ + const format_kind_info *fki = get_info ("gnu_printf"); + ASSERT_NE (fki, NULL); + + ASSERT_FORMAT_FOR_TYPE_STREQ ("f", double_type_node, 'i'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("Lf", long_double_type_node, 'i'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("f", double_type_node, 'o'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("Lf", long_double_type_node, 'o'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("f", double_type_node, 'x'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("Lf", long_double_type_node, 'x'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("f", double_type_node, 'X'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("Lf", long_double_type_node, 'X'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("d", integer_type_node, 'd'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("i", integer_type_node, 'i'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("o", integer_type_node, 'o'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("x", integer_type_node, 'x'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("X", integer_type_node, 'X'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("d", unsigned_type_node, 'd'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("i", unsigned_type_node, 'i'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("o", unsigned_type_node, 'o'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("x", unsigned_type_node, 'x'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("X", unsigned_type_node, 'X'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("ld", long_integer_type_node, 'd'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("li", long_integer_type_node, 'i'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("lx", long_integer_type_node, 'x'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("lo", long_unsigned_type_node, 'o'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("lx", long_unsigned_type_node, 'x'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("lld", long_long_integer_type_node, 'd'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("lli", long_long_integer_type_node, 'i'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("llo", long_long_unsigned_type_node, 'o'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("llx", long_long_unsigned_type_node, 'x'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("s", build_pointer_type (char_type_node), 'i'); +} + +/* Selftest for get_format_for_type for "scanf"-style functions. */ + +static void +test_get_format_for_type_scanf () +{ + const format_kind_info *fki = get_info ("gnu_scanf"); + ASSERT_NE (fki, NULL); + ASSERT_FORMAT_FOR_TYPE_STREQ ("d", build_pointer_type (integer_type_node), 'd'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("u", build_pointer_type (unsigned_type_node), 'u'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("ld", + build_pointer_type (long_integer_type_node), 'd'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("lu", + build_pointer_type (long_unsigned_type_node), 'u'); + ASSERT_FORMAT_FOR_TYPE_STREQ + ("lld", build_pointer_type (long_long_integer_type_node), 'd'); + ASSERT_FORMAT_FOR_TYPE_STREQ + ("llu", build_pointer_type (long_long_unsigned_type_node), 'u'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("e", build_pointer_type (float_type_node), 'e'); + ASSERT_FORMAT_FOR_TYPE_STREQ ("le", build_pointer_type (double_type_node), 'e'); +} + +#undef ASSERT_FORMAT_FOR_TYPE_STREQ + +/* Exercise the type-printing label code, to give some coverage + under "make selftest-valgrind" (in particular, to ensure that + the label-printing machinery doesn't leak). */ + +static void +test_type_mismatch_range_labels () +{ + /* Create a tempfile and write some text to it. + ....................0000000001 11111111 12 22222222 + ....................1234567890 12345678 90 12345678. */ + const char *content = " printf (\"msg: %i\\n\", msg);\n"; + temp_source_file tmp (SELFTEST_LOCATION, ".c", content); + line_table_test ltt; + + linemap_add (line_table, LC_ENTER, false, tmp.get_filename (), 1); + + location_t c17 = linemap_position_for_column (line_table, 17); + ASSERT_EQ (LOCATION_COLUMN (c17), 17); + location_t c18 = linemap_position_for_column (line_table, 18); + location_t c24 = linemap_position_for_column (line_table, 24); + location_t c26 = linemap_position_for_column (line_table, 26); + + /* Don't attempt to run the tests if column data might be unavailable. */ + if (c26 > LINE_MAP_MAX_LOCATION_WITH_COLS) + return; + + location_t fmt = make_location (c18, c17, c18); + ASSERT_EQ (LOCATION_COLUMN (fmt), 18); + + location_t param = make_location (c24, c24, c26); + ASSERT_EQ (LOCATION_COLUMN (param), 24); + + range_label_for_format_type_mismatch fmt_label (char_type_node, + integer_type_node, 1); + range_label_for_type_mismatch param_label (integer_type_node, + char_type_node); + gcc_rich_location richloc (fmt, &fmt_label); + richloc.add_range (param, SHOW_RANGE_WITHOUT_CARET, ¶m_label); + + test_diagnostic_context dc; + diagnostic_show_locus (&dc, &richloc, DK_ERROR); + if (c_dialect_cxx ()) + /* "char*", without a space. */ + ASSERT_STREQ ("\n" + " printf (\"msg: %i\\n\", msg);\n" + " ~^ ~~~\n" + " | |\n" + " char* int\n", + pp_formatted_text (dc.printer)); + else + /* "char *", with a space. */ + ASSERT_STREQ ("\n" + " printf (\"msg: %i\\n\", msg);\n" + " ~^ ~~~\n" + " | |\n" + " | int\n" + " char *\n", + pp_formatted_text (dc.printer)); +} + +/* Run all of the selftests within this file. */ + +void +c_format_c_tests () +{ + test_get_modifier_for_format_len (); + test_get_format_for_type_printf (); + test_get_format_for_type_scanf (); + test_type_mismatch_range_labels (); +} + +} // namespace selftest + +#endif /* CHECKING_P */ + +// include "gt-c-family-c-format.h" + +static const struct attribute_spec frr_format_attribute_table[] = +{ + /* { name, min_len, max_len, decl_req, type_req, fn_type_req, + affects_type_identity, handler, exclude } */ + { "frr_format", 3, 3, false, true, true, false, + handle_frr_format_attribute, NULL }, + { "frr_format_arg", 1, 1, false, true, true, false, + handle_frr_format_arg_attribute, NULL }, + { NULL, 0, 0, false, false, false, false, NULL, NULL } +}; + +static void +register_attributes (void *event_data, void *data) +{ + // warning (0, G_("Callback to register attributes")); + register_attribute (frr_format_attribute_table); +} + +tree +cb_walk_tree_fn (tree * tp, int * walk_subtrees, void * data ATTRIBUTE_UNUSED) +{ + if (TREE_CODE (*tp) != CALL_EXPR) + return NULL_TREE; + + tree call_expr = *tp; + + int nargs = call_expr_nargs(call_expr); + tree fn = CALL_EXPR_FN(call_expr); + + if (!fn || TREE_CODE (fn) != ADDR_EXPR) + return NULL_TREE; + + tree fndecl = TREE_OPERAND (fn, 0); + if (TREE_CODE (fndecl) != FUNCTION_DECL) + return NULL_TREE; + +#if 0 + warning (0, G_("function call to %s, %d args"), + IDENTIFIER_POINTER (DECL_NAME (fndecl)), + nargs); +#endif + + tree *fargs = (tree *) alloca (nargs * sizeof (tree)); + + for (int j = 0; j < nargs; j++) + { + tree arg = CALL_EXPR_ARG(call_expr, j); + + /* For -Wformat undo the implicit passing by hidden reference + done by convert_arg_to_ellipsis. */ + if (TREE_CODE (arg) == ADDR_EXPR + && TREE_CODE (TREE_TYPE (arg)) == REFERENCE_TYPE) + fargs[j] = TREE_OPERAND (arg, 0); + else + fargs[j] = arg; + } + + check_function_format (TYPE_ATTRIBUTES (TREE_TYPE (fndecl)), nargs, fargs, NULL); + return NULL_TREE; +} + +static void +setup_type (const char *name, tree *dst) +{ + tree tmp; + + if (*dst && *dst != void_type_node) + return; + + *dst = maybe_get_identifier (name); + if (!*dst) + return; + + tmp = identifier_global_value (*dst); + if (tmp && TREE_CODE (tmp) != TYPE_DECL) + { + warning (0, "%<%s%> is not defined as a type", name); + *dst = NULL; + return; + } + if (tmp && TREE_CODE (tmp) == TYPE_DECL) + *dst = tmp; + else + *dst = NULL; +} + +static void +handle_finish_parse (void *event_data, void *data) +{ + tree fndecl = (tree) event_data; + gcc_assert (TREE_CODE (fndecl) == FUNCTION_DECL); + + setup_type ("uint64_t", &local_uint64_t_node); + setup_type ("int64_t", &local_int64_t_node); + + setup_type ("size_t", &local_size_t_node); + setup_type ("ssize_t", &local_ssize_t_node); + setup_type ("atomic_size_t", &local_atomic_size_t_node); + setup_type ("atomic_ssize_t", &local_atomic_ssize_t_node); + setup_type ("ptrdiff_t", &local_ptrdiff_t_node); + + setup_type ("pid_t", &local_pid_t_node); + setup_type ("uid_t", &local_uid_t_node); + setup_type ("gid_t", &local_gid_t_node); + setup_type ("time_t", &local_time_t_node); + + setup_type ("socklen_t", &local_socklen_t_node); + setup_type ("in_addr_t", &local_in_addr_t_node); + + const format_char_info *fci; + + for (fci = print_char_table; fci->format_chars; fci++) + { + if (!fci->kernel_ext) + continue; + + struct kernel_ext_fmt *etab = fci->kernel_ext; + struct kernel_ext_fmt *etab_end = etab + ETAB_SZ; + + for (; etab->suffix && etab < etab_end; etab++) + { + tree identifier, node; + + if (etab->type && etab->type != void_type_node) + continue; + + identifier = maybe_get_identifier (etab->type_str); + + if (!identifier || identifier == error_mark_node) + continue; + + if (etab->type_code) + { + node = identifier_global_tag (identifier); + if (!node) + continue; + + if (node->base.code != etab->type_code) + { + if (!etab->warned) + { + warning (0, "%qs tag category (struct/union/enum) mismatch", etab->type_str); + etab->warned = true; + } + continue; + } + } + else + { + node = identifier_global_value (identifier); + if (!node) + continue; + + if (TREE_CODE (node) != TYPE_DECL) + { + if (!etab->warned) + { + warning (0, "%qs is defined as a non-type", etab->type_str); + etab->warned = true; + } + continue; + } + node = TREE_TYPE (node); + } + + etab->type = node; + } + } + + walk_tree (&DECL_SAVED_TREE (fndecl), cb_walk_tree_fn, NULL, NULL); +} + +static void +handle_pragma_printfrr_ext (cpp_reader *dummy) +{ + tree token = 0; + location_t loc; + enum cpp_ttype ttype; + + ttype = pragma_lex (&token, &loc); + if (ttype != CPP_STRING) + { + error_at (loc, "%<#pragma FRR printfrr_ext%> requires string argument"); + return; + } + + const char *s = TREE_STRING_POINTER (token); + + if (s[0] != '%') + { + error_at (loc, "%<#pragma FRR printfrr_ext%>: invalid format string, needs to start with '%%'"); + return; + } + + switch (s[1]) + { + case 'p': + case 'd': + case 'i': + break; + default: + error_at (loc, "%<#pragma FRR printfrr_ext%>: invalid format string, needs to be %%p, %%d or %%i"); + return; + } + + const format_char_info *fci; + + for (fci = print_char_table; fci->format_chars; fci++) + if (strchr (fci->format_chars, s[1])) + break; + + gcc_assert (fci->format_chars); + gcc_assert (fci->kernel_ext); + + struct kernel_ext_fmt *etab = fci->kernel_ext; + struct kernel_ext_fmt *etab_end = etab + ETAB_SZ; + + switch (s[2]) + { + case 'A' ... 'Z': + break; + + default: + error_at (loc, "%<#pragma FRR printfrr_ext%>: invalid format string, suffix must start with an uppercase letter"); + return; + } + + /* -2 -- need to keep the sentinel at the end */ + if (etab[ETAB_SZ - 2].suffix) + { + error_at (loc, "%<#pragma FRR printfrr_ext%>: out of space for format suffixes"); + return; + } + + for (; etab->suffix && etab < etab_end; etab++) + { + if (!strcmp(s + 2, etab->suffix)) + { + memmove (etab + 1, etab, (etab_end - etab - 1) * sizeof (*etab)); + + if (0) + { + warning_at (loc, OPT_Wformat_, + "%<#pragma FRR printfrr_ext%>: duplicate printf format suffix \"%s\"", s); + warning_at (etab->origin_loc, OPT_Wformat_, + "%<#pragma FRR printfrr_ext%>: previous definition was here"); + return; + } + + break; + } + + if (!strncmp(s + 2, etab->suffix, MIN(strlen(s + 2), strlen(etab->suffix)))) + { + warning_at (loc, OPT_Wformat_, + "%<#pragma FRR printfrr_ext%>: overlapping printf format suffix \"%s\"", s); + warning_at (etab->origin_loc, OPT_Wformat_, + "%<#pragma FRR printfrr_ext%>: previous definition for \"%%%c%s\" was here", s[1], etab->suffix); + return; + } + } + + gcc_assert (etab < etab_end); + + memset (etab, 0, sizeof (*etab)); + etab->suffix = xstrdup(s + 2); + etab->origin_loc = loc; + etab->type = void_type_node; + + ttype = pragma_lex (&token, &loc); + if (ttype != CPP_OPEN_PAREN) + { + error_at (loc, "%<#pragma FRR printfrr_ext%> expected %<(%>"); + goto out_drop; + } + + ttype = pragma_lex (&token, &loc); + + /* qualifiers */ + if (ttype == CPP_NAME && !strcmp (IDENTIFIER_POINTER (token), "const")) + { + etab->t_const = true; + ttype = pragma_lex (&token, &loc); + } + + /* tagged types */ + if (ttype == CPP_NAME && !strcmp (IDENTIFIER_POINTER (token), "struct")) + { + etab->type_code = RECORD_TYPE; + ttype = pragma_lex (&token, &loc); + } + else if (ttype == CPP_NAME && !strcmp (IDENTIFIER_POINTER (token), "union")) + { + etab->type_code = UNION_TYPE; + ttype = pragma_lex (&token, &loc); + } + else if (ttype == CPP_NAME && !strcmp (IDENTIFIER_POINTER (token), "enum")) + { + etab->type_code = ENUMERAL_TYPE; + ttype = pragma_lex (&token, &loc); + } + + /* type name */ + if (ttype != CPP_NAME) + { + error_at (loc, "%<#pragma FRR printfrr_ext%>: expected typename identifier"); + goto out_drop; + } + + etab->type_str = xstrdup (IDENTIFIER_POINTER (token)); + + while ((ttype = pragma_lex (&token, &loc)) != CPP_CLOSE_PAREN) + { + switch (ttype) { + case CPP_NAME: + error_at (loc, "%<#pragma FRR printfrr_ext%>: unexpected identifier. Note the only supported qualifier is \"const\"."); + goto out_drop; + + case CPP_MULT: + etab->ptrlevel++; + break; + + case CPP_EOF: + error_at (loc, "%<#pragma FRR printfrr_ext%>: premature end of line, missing %<)%>"); + goto out_drop; + + default: + error_at (loc, "%<#pragma FRR printfrr_ext%>: unsupported token"); + goto out_drop; + } + } + + ttype = pragma_lex (&token, &loc); + if (ttype != CPP_EOF) + warning_at (loc, OPT_Wformat_, + "%<#pragma FRR printfrr_ext%>: garbage at end of line"); + + return; + +out_drop: + memset (etab, 0, sizeof (*etab)); +} + +static void +register_pragma_printfrr_ext (void *event_data, void *data) +{ + c_register_pragma_with_expansion ("FRR", "printfrr_ext", handle_pragma_printfrr_ext); +} + +static void +define_vars (void *gcc_data, void *user_data) +{ + cpp_define (parse_in, "_FRR_ATTRIBUTE_PRINTFRR=0x10000"); +} + +#ifndef __visible +#define __visible __attribute__((visibility("default"))) +#endif + +__visible int plugin_is_GPL_compatible; + +__visible int +plugin_init (struct plugin_name_args *plugin_info, + struct plugin_gcc_version *version) +{ + const char *plugin_name = plugin_info->base_name; + + if (!plugin_default_version_check(version, &gcc_version)) + { + error(G_("incompatible gcc/plugin versions")); + return 1; + } + + memset (ext_p, 0, sizeof (ext_p)); + memset (ext_d, 0, sizeof (ext_d)); + + register_callback (plugin_name, PLUGIN_FINISH_PARSE_FUNCTION, handle_finish_parse, NULL); + register_callback (plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); + register_callback (plugin_name, PLUGIN_START_UNIT, define_vars, NULL); + register_callback (plugin_name, PLUGIN_PRAGMAS, register_pragma_printfrr_ext, NULL); + return 0; +} diff --git a/tools/gcc-plugins/frr-format.h b/tools/gcc-plugins/frr-format.h new file mode 100644 index 0000000000..87d2049ed4 --- /dev/null +++ b/tools/gcc-plugins/frr-format.h @@ -0,0 +1,364 @@ +/* Check calls to formatted I/O functions (-Wformat). + Copyright (C) 1992-2018 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#ifndef GCC_C_FORMAT_H +#define GCC_C_FORMAT_H + +/* The meaningfully distinct length modifiers for format checking recognized + by GCC. */ +enum format_lengths +{ + FMT_LEN_none, + FMT_LEN_hh, + FMT_LEN_h, + FMT_LEN_l, + FMT_LEN_ll, + FMT_LEN_L, + FMT_LEN_z, + FMT_LEN_t, + FMT_LEN_j, + FMT_LEN_H, + FMT_LEN_D, + FMT_LEN_DD, + FMT_LEN_MAX +}; + + +/* The standard versions in which various format features appeared. */ +enum format_std_version +{ + STD_C89, + STD_C94, + STD_C9L, /* C99, but treat as C89 if -Wno-long-long. */ + STD_C99, + STD_EXT +}; + +/* Flags that may apply to a particular kind of format checked by GCC. */ +enum +{ + /* This format converts arguments of types determined by the + format string. */ + FMT_FLAG_ARG_CONVERT = 1, + /* The scanf allocation 'a' kludge applies to this format kind. */ + FMT_FLAG_SCANF_A_KLUDGE = 2, + /* A % during parsing a specifier is allowed to be a modified % rather + that indicating the format is broken and we are out-of-sync. */ + FMT_FLAG_FANCY_PERCENT_OK = 4, + /* With $ operand numbers, it is OK to reference the same argument more + than once. */ + FMT_FLAG_DOLLAR_MULTIPLE = 8, + /* This format type uses $ operand numbers (strfmon doesn't). */ + FMT_FLAG_USE_DOLLAR = 16, + /* Zero width is bad in this type of format (scanf). */ + FMT_FLAG_ZERO_WIDTH_BAD = 32, + /* Empty precision specification is OK in this type of format (printf). */ + FMT_FLAG_EMPTY_PREC_OK = 64, + /* Gaps are allowed in the arguments with $ operand numbers if all + arguments are pointers (scanf). */ + FMT_FLAG_DOLLAR_GAP_POINTER_OK = 128, + /* The format arg is an opaque object that will be parsed by an external + facility. */ + FMT_FLAG_PARSE_ARG_CONVERT_EXTERNAL = 256 + /* Not included here: details of whether width or precision may occur + (controlled by width_char and precision_char); details of whether + '*' can be used for these (width_type and precision_type); details + of whether length modifiers can occur (length_char_specs). */ +}; + +/* Structure describing a length modifier supported in format checking, and + possibly a doubled version such as "hh". */ +struct format_length_info +{ + /* Name of the single-character length modifier. If prefixed by + a zero character, it describes a multi character length + modifier, like I64, I32, etc. */ + const char *name; + /* Index into a format_char_info.types array. */ + enum format_lengths index; + /* Standard version this length appears in. */ + enum format_std_version std; + /* Same, if the modifier can be repeated, or NULL if it can't. */ + const char *double_name; + enum format_lengths double_index; + enum format_std_version double_std; + + /* If this flag is set, just scalar width identity is checked, and + not the type identity itself. */ + int scalar_identity_flag; +}; + + +struct kernel_ext_fmt +{ + const char *suffix; + + /* RECORD_TYPE, UNION_TYPE, ENUMERAL_TYPE, or NULL for typedef */ + tree_code type_code; + int ptrlevel; + bool t_const; + bool warned; + + const char *type_str; + GTY(()) tree type; + + location_t origin_loc; +}; + + +/* Structure describing the combination of a conversion specifier + (or a set of specifiers which act identically) and a length modifier. */ +struct format_type_detail +{ + /* The standard version this combination of length and type appeared in. + This is only relevant if greater than those for length and type + individually; otherwise it is ignored. */ + enum format_std_version std; + /* The name to use for the type, if different from that generated internally + (e.g., "signed size_t"). */ + const char *name; + /* The type itself. */ + tree *type; +}; + + +/* Macros to fill out tables of these. */ +#define NOARGUMENTS { T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN } +#define BADLEN { STD_C89, NULL, NULL } +#define NOLENGTHS { BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN } + + +/* Structure describing a format conversion specifier (or a set of specifiers + which act identically), and the length modifiers used with it. */ +struct format_char_info +{ + const char *format_chars; + int pointer_count; + enum format_std_version std; + /* Types accepted for each length modifier. */ + format_type_detail types[FMT_LEN_MAX]; + /* List of other modifier characters allowed with these specifiers. + This lists flags, and additionally "w" for width, "p" for precision + (right precision, for strfmon), "#" for left precision (strfmon), + "a" for scanf "a" allocation extension (not applicable in C99 mode), + "*" for scanf suppression, and "E" and "O" for those strftime + modifiers. */ + const char *flag_chars; + /* List of additional flags describing these conversion specifiers. + "c" for generic character pointers being allowed, "2" for strftime + two digit year formats, "3" for strftime formats giving two digit + years in some locales, "4" for "2" which becomes "3" with an "E" modifier, + "o" if use of strftime "O" is a GNU extension beyond C99, + "W" if the argument is a pointer which is dereferenced and written into, + "R" if the argument is a pointer which is dereferenced and read from, + "i" for printf integer formats where the '0' flag is ignored with + precision, and "[" for the starting character of a scanf scanset, + "<" if the specifier introduces a quoted sequence (such as "%<"), + ">" if the specifier terminates a quoted sequence (such as "%>"), + "[" if the specifier introduces a color sequence (such as "%r"), + "]" if the specifier terminates a color sequence (such as "%R"), + "'" (single quote) if the specifier is expected to be quoted when + it appears outside a quoted sequence and unquoted otherwise (such + as the GCC internal printf format directive "%T"), and + "\"" (double quote) if the specifier is not expected to appear in + a quoted sequence (such as the GCC internal format directive "%K". */ + const char *flags2; + /* If this format conversion character consumes more than one argument, + CHAIN points to information about the next argument. For later + arguments, only POINTER_COUNT, TYPES, and the "c", "R", and "W" flags + in FLAGS2 are used. */ + const struct format_char_info *chain; + + struct kernel_ext_fmt *kernel_ext; +}; + + +/* Structure describing a flag accepted by some kind of format. */ +struct format_flag_spec +{ + /* The flag character in question (0 for end of array). */ + int flag_char; + /* Zero if this entry describes the flag character in general, or a + nonzero character that may be found in flags2 if it describes the + flag when used with certain formats only. If the latter, only + the first such entry found that applies to the current conversion + specifier is used; the values of 'name' and 'long_name' it supplies + will be used, if non-NULL and the standard version is higher than + the unpredicated one, for any pedantic warning. For example, 'o' + for strftime formats (meaning 'O' is an extension over C99). */ + int predicate; + /* Nonzero if the next character after this flag in the format should + be skipped ('=' in strfmon), zero otherwise. */ + int skip_next_char; + /* True if the flag introduces quoting (as in GCC's %qE). */ + bool quoting; + /* The name to use for this flag in diagnostic messages. For example, + N_("'0' flag"), N_("field width"). */ + const char *name; + /* Long name for this flag in diagnostic messages; currently only used for + "ISO C does not support ...". For example, N_("the 'I' printf flag"). */ + const char *long_name; + /* The standard version in which it appeared. */ + enum format_std_version std; +}; + + +/* Structure describing a combination of flags that is bad for some kind + of format. */ +struct format_flag_pair +{ + /* The first flag character in question (0 for end of array). */ + int flag_char1; + /* The second flag character. */ + int flag_char2; + /* Nonzero if the message should say that the first flag is ignored with + the second, zero if the combination should simply be objected to. */ + int ignored; + /* Zero if this entry applies whenever this flag combination occurs, + a nonzero character from flags2 if it only applies in some + circumstances (e.g. 'i' for printf formats ignoring 0 with precision). */ + int predicate; +}; + + +/* Structure describing a particular kind of format processed by GCC. */ +struct format_kind_info +{ + /* The name of this kind of format, for use in diagnostics. Also + the name of the attribute (without preceding and following __). */ + const char *name; + /* Specifications of the length modifiers accepted; possibly NULL. */ + const format_length_info *length_char_specs; + /* Details of the conversion specification characters accepted. */ + const format_char_info *conversion_specs; + /* String listing the flag characters that are accepted. */ + const char *flag_chars; + /* String listing modifier characters (strftime) accepted. May be NULL. */ + const char *modifier_chars; + /* Details of the flag characters, including pseudo-flags. */ + const format_flag_spec *flag_specs; + /* Details of bad combinations of flags. */ + const format_flag_pair *bad_flag_pairs; + /* Flags applicable to this kind of format. */ + int flags; + /* Flag character to treat a width as, or 0 if width not used. */ + int width_char; + /* Flag character to treat a left precision (strfmon) as, + or 0 if left precision not used. */ + int left_precision_char; + /* Flag character to treat a precision (for strfmon, right precision) as, + or 0 if precision not used. */ + int precision_char; + /* If a flag character has the effect of suppressing the conversion of + an argument ('*' in scanf), that flag character, otherwise 0. */ + int suppression_char; + /* Flag character to treat a length modifier as (ignored if length + modifiers not used). Need not be placed in flag_chars for conversion + specifiers, but is used to check for bad combinations such as length + modifier with assignment suppression in scanf. */ + int length_code_char; + /* Assignment-allocation flag character ('m' in scanf), otherwise 0. */ + int alloc_char; + /* Pointer to type of argument expected if '*' is used for a width, + or NULL if '*' not used for widths. */ + tree *width_type; + /* Pointer to type of argument expected if '*' is used for a precision, + or NULL if '*' not used for precisions. */ + tree *precision_type; +}; + +#define T_I &integer_type_node +#define T89_I { STD_C89, NULL, T_I } +#define T_L &long_integer_type_node +#define T89_L { STD_C89, NULL, T_L } +#define T_LL &long_long_integer_type_node +#define T9L_LL { STD_C9L, NULL, T_LL } +#define TEX_LL { STD_EXT, NULL, T_LL } +#define T_U64 &local_uint64_t_node +#define TEX_U64 { STD_EXT, "uint64_t", T_U64 } +#define T_S64 &local_int64_t_node +#define TEX_S64 { STD_EXT, "int64_t", T_S64 } +#define T_S &short_integer_type_node +#define T89_S { STD_C89, NULL, T_S } +#define T_UI &unsigned_type_node +#define T89_UI { STD_C89, NULL, T_UI } +#define T_UL &long_unsigned_type_node +#define T89_UL { STD_C89, NULL, T_UL } +#define T_ULL &long_long_unsigned_type_node +#define T9L_ULL { STD_C9L, NULL, T_ULL } +#define TEX_ULL { STD_EXT, NULL, T_ULL } +#define T_US &short_unsigned_type_node +#define T89_US { STD_C89, NULL, T_US } +#define T_F &float_type_node +#define T89_F { STD_C89, NULL, T_F } +#define T99_F { STD_C99, NULL, T_F } +#define T_D &double_type_node +#define T89_D { STD_C89, NULL, T_D } +#define T99_D { STD_C99, NULL, T_D } +#define T_LD &long_double_type_node +#define T89_LD { STD_C89, NULL, T_LD } +#define T99_LD { STD_C99, NULL, T_LD } +#define T_C &char_type_node +#define T89_C { STD_C89, NULL, T_C } +#define T_SC &signed_char_type_node +#define T99_SC { STD_C99, NULL, T_SC } +#define T_UC &unsigned_char_type_node +#define T99_UC { STD_C99, NULL, T_UC } +#define T_V &void_type_node +#define T89_G { STD_C89, NULL, &local_gimple_ptr_node } +#define T89_T { STD_C89, NULL, &local_tree_type_node } +#define T89_V { STD_C89, NULL, T_V } +#define T_W &wchar_type_node +#define T94_W { STD_C94, "wchar_t", T_W } +#define TEX_W { STD_EXT, "wchar_t", T_W } +#define T_WI &wint_type_node +#define T94_WI { STD_C94, "wint_t", T_WI } +#define TEX_WI { STD_EXT, "wint_t", T_WI } +#define T_ST &local_size_t_node +#define T99_ST { STD_C99, "size_t", T_ST } +#define T_SST &local_ssize_t_node +#define T99_SST { STD_C99, "ssize_t", T_SST } +#define T_PD &ptrdiff_type_node +#define T99_PD { STD_C99, "ptrdiff_t", T_PD } +#define T_UPD &unsigned_ptrdiff_type_node +#define T99_UPD { STD_C99, "unsigned ptrdiff_t", T_UPD } +#define T_IM &intmax_type_node +#define T99_IM { STD_C99, "intmax_t", T_IM } +#define T_UIM &uintmax_type_node +#define T99_UIM { STD_C99, "uintmax_t", T_UIM } +#define T_D32 &dfloat32_type_node +#define TEX_D32 { STD_EXT, "_Decimal32", T_D32 } +#define T_D64 &dfloat64_type_node +#define TEX_D64 { STD_EXT, "_Decimal64", T_D64 } +#define T_D128 &dfloat128_type_node +#define TEX_D128 { STD_EXT, "_Decimal128", T_D128 } + +/* Structure describing how format attributes such as "printf" are + interpreted as "gnu_printf" or "ms_printf" on a particular system. + TARGET_OVERRIDES_FORMAT_ATTRIBUTES is used to specify target-specific + defaults. */ +struct target_ovr_attr +{ + /* The name of the to be copied format attribute. */ + const char *named_attr_src; + /* The name of the to be overridden format attribute. */ + const char *named_attr_dst; +}; + +#endif /* GCC_C_FORMAT_H */ diff --git a/tools/gcc-plugins/gcc-common.h b/tools/gcc-plugins/gcc-common.h new file mode 100644 index 0000000000..6b6c17231a --- /dev/null +++ b/tools/gcc-plugins/gcc-common.h @@ -0,0 +1,981 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* FRR: imported from Linux kernel on 2019-07-29 */ + +#ifndef GCC_COMMON_H_INCLUDED +#define GCC_COMMON_H_INCLUDED + +#include "bversion.h" +#if BUILDING_GCC_VERSION >= 6000 +#include "gcc-plugin.h" +#else +#include "plugin.h" +#endif +#include "plugin-version.h" +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "line-map.h" +#include "input.h" +#include "tree.h" + +#include "tree-inline.h" +#include "version.h" +#include "rtl.h" +#include "tm_p.h" +#include "flags.h" +#include "hard-reg-set.h" +#include "output.h" +#include "except.h" +#include "function.h" +#include "toplev.h" +#if BUILDING_GCC_VERSION >= 5000 +#include "expr.h" +#endif +#include "basic-block.h" +#include "intl.h" +#include "ggc.h" +#include "timevar.h" + +#include "params.h" + +#if BUILDING_GCC_VERSION <= 4009 +#include "pointer-set.h" +#else +#include "hash-map.h" +#endif + +#if BUILDING_GCC_VERSION >= 7000 +#include "memmodel.h" +#endif +#include "emit-rtl.h" +#include "debug.h" +#include "target.h" +#include "langhooks.h" +#include "cfgloop.h" +#include "cgraph.h" +#include "opts.h" + +#if BUILDING_GCC_VERSION == 4005 +#include <sys/mman.h> +#endif + +#if BUILDING_GCC_VERSION >= 4007 +#include "tree-pretty-print.h" +#include "gimple-pretty-print.h" +#endif + +#if BUILDING_GCC_VERSION >= 4006 +/* + * The c-family headers were moved into a subdirectory in GCC version + * 4.7, but most plugin-building users of GCC 4.6 are using the Debian + * or Ubuntu package, which has an out-of-tree patch to move this to the + * same location as found in 4.7 and later: + * https://sources.debian.net/src/gcc-4.6/4.6.3-14/debian/patches/pr45078.diff/ + */ +#include "c-family/c-common.h" +#else +#include "c-common.h" +#endif + +#if BUILDING_GCC_VERSION <= 4008 +#include "tree-flow.h" +#else +#include "tree-cfgcleanup.h" +#include "tree-ssa-operands.h" +#include "tree-into-ssa.h" +#endif + +#if BUILDING_GCC_VERSION >= 4008 +#include "is-a.h" +#endif + +#include "diagnostic.h" +#include "tree-dump.h" +#include "tree-pass.h" +#if BUILDING_GCC_VERSION >= 4009 +#include "pass_manager.h" +#endif +#include "predict.h" +#include "ipa-utils.h" + +#if BUILDING_GCC_VERSION >= 8000 +#include "stringpool.h" +#endif + +#if BUILDING_GCC_VERSION >= 4009 +#include "attribs.h" +#include "varasm.h" +#include "stor-layout.h" +#include "internal-fn.h" +#include "gimple-expr.h" +#include "gimple-fold.h" +#include "context.h" +#include "tree-ssa-alias.h" +#include "tree-ssa.h" +#include "stringpool.h" +#if BUILDING_GCC_VERSION >= 7000 +#include "tree-vrp.h" +#endif +#include "tree-ssanames.h" +#include "print-tree.h" +#include "tree-eh.h" +#include "stmt.h" +#include "gimplify.h" +#endif + +#include "gimple.h" + +#if BUILDING_GCC_VERSION >= 4009 +#include "tree-ssa-operands.h" +#include "tree-phinodes.h" +#include "tree-cfg.h" +#include "gimple-iterator.h" +#include "gimple-ssa.h" +#include "ssa-iterators.h" +#endif + +#if BUILDING_GCC_VERSION >= 5000 +#include "builtins.h" +#endif + +/* missing from basic_block.h... */ +void debug_dominance_info(enum cdi_direction dir); +void debug_dominance_tree(enum cdi_direction dir, basic_block root); + +#if BUILDING_GCC_VERSION == 4006 +void debug_gimple_stmt(gimple); +void debug_gimple_seq(gimple_seq); +void print_gimple_seq(FILE *, gimple_seq, int, int); +void print_gimple_stmt(FILE *, gimple, int, int); +void print_gimple_expr(FILE *, gimple, int, int); +void dump_gimple_stmt(pretty_printer *, gimple, int, int); +#endif + +#ifndef __unused +#define __unused __attribute__((__unused__)) +#endif +#ifndef __visible +#define __visible __attribute__((visibility("default"))) +#endif + +#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node)) +#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node)) +#define TYPE_NAME_POINTER(node) IDENTIFIER_POINTER(TYPE_NAME(node)) +#define TYPE_NAME_LENGTH(node) IDENTIFIER_LENGTH(TYPE_NAME(node)) + +#if BUILDING_GCC_VERSION < 9000 +/* should come from c-tree.h if only it were installed for gcc 4.5... */ +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE) +#endif + +static inline tree build_const_char_string(int len, const char *str) +{ + tree cstr, elem, index, type; + + cstr = build_string(len, str); + elem = build_type_variant(char_type_node, 1, 0); + index = build_index_type(size_int(len - 1)); + type = build_array_type(elem, index); + TREE_TYPE(cstr) = type; + TREE_CONSTANT(cstr) = 1; + TREE_READONLY(cstr) = 1; + TREE_STATIC(cstr) = 1; + return cstr; +} + +#define PASS_INFO(NAME, REF, ID, POS) \ +struct register_pass_info NAME##_pass_info = { \ + .pass = make_##NAME##_pass(), \ + .reference_pass_name = REF, \ + .ref_pass_instance_number = ID, \ + .pos_op = POS, \ +} + +#if BUILDING_GCC_VERSION == 4005 +#define FOR_EACH_LOCAL_DECL(FUN, I, D) \ + for (tree vars = (FUN)->local_decls, (I) = 0; \ + vars && ((D) = TREE_VALUE(vars)); \ + vars = TREE_CHAIN(vars), (I)++) +#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE))) +#define FOR_EACH_VEC_ELT(T, V, I, P) \ + for (I = 0; VEC_iterate(T, (V), (I), (P)); ++(I)) +#define TODO_rebuild_cgraph_edges 0 +#define SCOPE_FILE_SCOPE_P(EXP) (!(EXP)) + +#ifndef O_BINARY +#define O_BINARY 0 +#endif + +typedef struct varpool_node *varpool_node_ptr; + +static inline bool gimple_call_builtin_p(gimple stmt, enum built_in_function code) +{ + tree fndecl; + + if (!is_gimple_call(stmt)) + return false; + fndecl = gimple_call_fndecl(stmt); + if (!fndecl || DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL) + return false; + return DECL_FUNCTION_CODE(fndecl) == code; +} + +static inline bool is_simple_builtin(tree decl) +{ + if (decl && DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL) + return false; + + switch (DECL_FUNCTION_CODE(decl)) { + /* Builtins that expand to constants. */ + case BUILT_IN_CONSTANT_P: + case BUILT_IN_EXPECT: + case BUILT_IN_OBJECT_SIZE: + case BUILT_IN_UNREACHABLE: + /* Simple register moves or loads from stack. */ + case BUILT_IN_RETURN_ADDRESS: + case BUILT_IN_EXTRACT_RETURN_ADDR: + case BUILT_IN_FROB_RETURN_ADDR: + case BUILT_IN_RETURN: + case BUILT_IN_AGGREGATE_INCOMING_ADDRESS: + case BUILT_IN_FRAME_ADDRESS: + case BUILT_IN_VA_END: + case BUILT_IN_STACK_SAVE: + case BUILT_IN_STACK_RESTORE: + /* Exception state returns or moves registers around. */ + case BUILT_IN_EH_FILTER: + case BUILT_IN_EH_POINTER: + case BUILT_IN_EH_COPY_VALUES: + return true; + + default: + return false; + } +} + +static inline void add_local_decl(struct function *fun, tree d) +{ + gcc_assert(TREE_CODE(d) == VAR_DECL); + fun->local_decls = tree_cons(NULL_TREE, d, fun->local_decls); +} +#endif + +#if BUILDING_GCC_VERSION <= 4006 +#define ANY_RETURN_P(rtx) (GET_CODE(rtx) == RETURN) +#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4(EXP) +#define EDGE_PRESERVE 0ULL +#define HOST_WIDE_INT_PRINT_HEX_PURE "%" HOST_WIDE_INT_PRINT "x" +#define flag_fat_lto_objects true + +#define get_random_seed(noinit) ({ \ + unsigned HOST_WIDE_INT seed; \ + sscanf(get_random_seed(noinit), "%" HOST_WIDE_INT_PRINT "x", &seed); \ + seed * seed; }) + +#define int_const_binop(code, arg1, arg2) \ + int_const_binop((code), (arg1), (arg2), 0) + +static inline bool gimple_clobber_p(gimple s __unused) +{ + return false; +} + +static inline bool gimple_asm_clobbers_memory_p(const_gimple stmt) +{ + unsigned i; + + for (i = 0; i < gimple_asm_nclobbers(stmt); i++) { + tree op = gimple_asm_clobber_op(stmt, i); + + if (!strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "memory")) + return true; + } + + return false; +} + +static inline tree builtin_decl_implicit(enum built_in_function fncode) +{ + return implicit_built_in_decls[fncode]; +} + +static inline int ipa_reverse_postorder(struct cgraph_node **order) +{ + return cgraph_postorder(order); +} + +static inline struct cgraph_node *cgraph_create_node(tree decl) +{ + return cgraph_node(decl); +} + +static inline struct cgraph_node *cgraph_get_create_node(tree decl) +{ + struct cgraph_node *node = cgraph_get_node(decl); + + return node ? node : cgraph_node(decl); +} + +static inline bool cgraph_function_with_gimple_body_p(struct cgraph_node *node) +{ + return node->analyzed && !node->thunk.thunk_p && !node->alias; +} + +static inline struct cgraph_node *cgraph_first_function_with_gimple_body(void) +{ + struct cgraph_node *node; + + for (node = cgraph_nodes; node; node = node->next) + if (cgraph_function_with_gimple_body_p(node)) + return node; + return NULL; +} + +static inline struct cgraph_node *cgraph_next_function_with_gimple_body(struct cgraph_node *node) +{ + for (node = node->next; node; node = node->next) + if (cgraph_function_with_gimple_body_p(node)) + return node; + return NULL; +} + +static inline bool cgraph_for_node_and_aliases(cgraph_node_ptr node, bool (*callback)(cgraph_node_ptr, void *), void *data, bool include_overwritable) +{ + cgraph_node_ptr alias; + + if (callback(node, data)) + return true; + + for (alias = node->same_body; alias; alias = alias->next) { + if (include_overwritable || cgraph_function_body_availability(alias) > AVAIL_OVERWRITABLE) + if (cgraph_for_node_and_aliases(alias, callback, data, include_overwritable)) + return true; + } + + return false; +} + +#define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \ + for ((node) = cgraph_first_function_with_gimple_body(); (node); \ + (node) = cgraph_next_function_with_gimple_body(node)) + +static inline void varpool_add_new_variable(tree decl) +{ + varpool_finalize_decl(decl); +} +#endif + +#if BUILDING_GCC_VERSION <= 4007 +#define FOR_EACH_FUNCTION(node) \ + for (node = cgraph_nodes; node; node = node->next) +#define FOR_EACH_VARIABLE(node) \ + for (node = varpool_nodes; node; node = node->next) +#define PROP_loops 0 +#define NODE_SYMBOL(node) (node) +#define NODE_DECL(node) (node)->decl +#define INSN_LOCATION(INSN) RTL_LOCATION(INSN) +#define vNULL NULL + +static inline int bb_loop_depth(const_basic_block bb) +{ + return bb->loop_father ? loop_depth(bb->loop_father) : 0; +} + +static inline bool gimple_store_p(gimple gs) +{ + tree lhs = gimple_get_lhs(gs); + + return lhs && !is_gimple_reg(lhs); +} + +static inline void gimple_init_singleton(gimple g __unused) +{ +} +#endif + +#if BUILDING_GCC_VERSION == 4007 || BUILDING_GCC_VERSION == 4008 +static inline struct cgraph_node *cgraph_alias_target(struct cgraph_node *n) +{ + return cgraph_alias_aliased_node(n); +} +#endif + +#if BUILDING_GCC_VERSION <= 4008 +#define ENTRY_BLOCK_PTR_FOR_FN(FN) ENTRY_BLOCK_PTR_FOR_FUNCTION(FN) +#define EXIT_BLOCK_PTR_FOR_FN(FN) EXIT_BLOCK_PTR_FOR_FUNCTION(FN) +#define basic_block_info_for_fn(FN) ((FN)->cfg->x_basic_block_info) +#define n_basic_blocks_for_fn(FN) ((FN)->cfg->x_n_basic_blocks) +#define n_edges_for_fn(FN) ((FN)->cfg->x_n_edges) +#define last_basic_block_for_fn(FN) ((FN)->cfg->x_last_basic_block) +#define label_to_block_map_for_fn(FN) ((FN)->cfg->x_label_to_block_map) +#define profile_status_for_fn(FN) ((FN)->cfg->x_profile_status) +#define BASIC_BLOCK_FOR_FN(FN, N) BASIC_BLOCK_FOR_FUNCTION((FN), (N)) +#define NODE_IMPLICIT_ALIAS(node) (node)->same_body_alias +#define VAR_P(NODE) (TREE_CODE(NODE) == VAR_DECL) + +static inline bool tree_fits_shwi_p(const_tree t) +{ + if (t == NULL_TREE || TREE_CODE(t) != INTEGER_CST) + return false; + + if (TREE_INT_CST_HIGH(t) == 0 && (HOST_WIDE_INT)TREE_INT_CST_LOW(t) >= 0) + return true; + + if (TREE_INT_CST_HIGH(t) == -1 && (HOST_WIDE_INT)TREE_INT_CST_LOW(t) < 0 && !TYPE_UNSIGNED(TREE_TYPE(t))) + return true; + + return false; +} + +static inline bool tree_fits_uhwi_p(const_tree t) +{ + if (t == NULL_TREE || TREE_CODE(t) != INTEGER_CST) + return false; + + return TREE_INT_CST_HIGH(t) == 0; +} + +static inline HOST_WIDE_INT tree_to_shwi(const_tree t) +{ + gcc_assert(tree_fits_shwi_p(t)); + return TREE_INT_CST_LOW(t); +} + +static inline unsigned HOST_WIDE_INT tree_to_uhwi(const_tree t) +{ + gcc_assert(tree_fits_uhwi_p(t)); + return TREE_INT_CST_LOW(t); +} + +static inline const char *get_tree_code_name(enum tree_code code) +{ + gcc_assert(code < MAX_TREE_CODES); + return tree_code_name[code]; +} + +#define ipa_remove_stmt_references(cnode, stmt) + +typedef union gimple_statement_d gasm; +typedef union gimple_statement_d gassign; +typedef union gimple_statement_d gcall; +typedef union gimple_statement_d gcond; +typedef union gimple_statement_d gdebug; +typedef union gimple_statement_d ggoto; +typedef union gimple_statement_d gphi; +typedef union gimple_statement_d greturn; + +static inline gasm *as_a_gasm(gimple stmt) +{ + return stmt; +} + +static inline const gasm *as_a_const_gasm(const_gimple stmt) +{ + return stmt; +} + +static inline gassign *as_a_gassign(gimple stmt) +{ + return stmt; +} + +static inline const gassign *as_a_const_gassign(const_gimple stmt) +{ + return stmt; +} + +static inline gcall *as_a_gcall(gimple stmt) +{ + return stmt; +} + +static inline const gcall *as_a_const_gcall(const_gimple stmt) +{ + return stmt; +} + +static inline gcond *as_a_gcond(gimple stmt) +{ + return stmt; +} + +static inline const gcond *as_a_const_gcond(const_gimple stmt) +{ + return stmt; +} + +static inline gdebug *as_a_gdebug(gimple stmt) +{ + return stmt; +} + +static inline const gdebug *as_a_const_gdebug(const_gimple stmt) +{ + return stmt; +} + +static inline ggoto *as_a_ggoto(gimple stmt) +{ + return stmt; +} + +static inline const ggoto *as_a_const_ggoto(const_gimple stmt) +{ + return stmt; +} + +static inline gphi *as_a_gphi(gimple stmt) +{ + return stmt; +} + +static inline const gphi *as_a_const_gphi(const_gimple stmt) +{ + return stmt; +} + +static inline greturn *as_a_greturn(gimple stmt) +{ + return stmt; +} + +static inline const greturn *as_a_const_greturn(const_gimple stmt) +{ + return stmt; +} +#endif + +#if BUILDING_GCC_VERSION == 4008 +#define NODE_SYMBOL(node) (&(node)->symbol) +#define NODE_DECL(node) (node)->symbol.decl +#endif + +#if BUILDING_GCC_VERSION >= 4008 +#define add_referenced_var(var) +#define mark_sym_for_renaming(var) +#define varpool_mark_needed_node(node) +#define create_var_ann(var) +#define TODO_dump_func 0 +#define TODO_dump_cgraph 0 +#endif + +#if BUILDING_GCC_VERSION <= 4009 +#define TODO_verify_il 0 +#define AVAIL_INTERPOSABLE AVAIL_OVERWRITABLE + +#define section_name_prefix LTO_SECTION_NAME_PREFIX +#define fatal_error(loc, gmsgid, ...) fatal_error((gmsgid), __VA_ARGS__) + +rtx emit_move_insn(rtx x, rtx y); + +typedef struct rtx_def rtx_insn; + +static inline const char *get_decl_section_name(const_tree decl) +{ + if (DECL_SECTION_NAME(decl) == NULL_TREE) + return NULL; + + return TREE_STRING_POINTER(DECL_SECTION_NAME(decl)); +} + +static inline void set_decl_section_name(tree node, const char *value) +{ + if (value) + DECL_SECTION_NAME(node) = build_string(strlen(value) + 1, value); + else + DECL_SECTION_NAME(node) = NULL; +} +#endif + +#if BUILDING_GCC_VERSION == 4009 +typedef struct gimple_statement_asm gasm; +typedef struct gimple_statement_base gassign; +typedef struct gimple_statement_call gcall; +typedef struct gimple_statement_base gcond; +typedef struct gimple_statement_base gdebug; +typedef struct gimple_statement_base ggoto; +typedef struct gimple_statement_phi gphi; +typedef struct gimple_statement_base greturn; + +static inline gasm *as_a_gasm(gimple stmt) +{ + return as_a<gasm>(stmt); +} + +static inline const gasm *as_a_const_gasm(const_gimple stmt) +{ + return as_a<const gasm>(stmt); +} + +static inline gassign *as_a_gassign(gimple stmt) +{ + return stmt; +} + +static inline const gassign *as_a_const_gassign(const_gimple stmt) +{ + return stmt; +} + +static inline gcall *as_a_gcall(gimple stmt) +{ + return as_a<gcall>(stmt); +} + +static inline const gcall *as_a_const_gcall(const_gimple stmt) +{ + return as_a<const gcall>(stmt); +} + +static inline gcond *as_a_gcond(gimple stmt) +{ + return stmt; +} + +static inline const gcond *as_a_const_gcond(const_gimple stmt) +{ + return stmt; +} + +static inline gdebug *as_a_gdebug(gimple stmt) +{ + return stmt; +} + +static inline const gdebug *as_a_const_gdebug(const_gimple stmt) +{ + return stmt; +} + +static inline ggoto *as_a_ggoto(gimple stmt) +{ + return stmt; +} + +static inline const ggoto *as_a_const_ggoto(const_gimple stmt) +{ + return stmt; +} + +static inline gphi *as_a_gphi(gimple stmt) +{ + return as_a<gphi>(stmt); +} + +static inline const gphi *as_a_const_gphi(const_gimple stmt) +{ + return as_a<const gphi>(stmt); +} + +static inline greturn *as_a_greturn(gimple stmt) +{ + return stmt; +} + +static inline const greturn *as_a_const_greturn(const_gimple stmt) +{ + return stmt; +} +#endif + +#if BUILDING_GCC_VERSION >= 4009 +#define TODO_ggc_collect 0 +#define NODE_SYMBOL(node) (node) +#define NODE_DECL(node) (node)->decl +#define cgraph_node_name(node) (node)->name() +#define NODE_IMPLICIT_ALIAS(node) (node)->cpp_implicit_alias + +static inline opt_pass *get_pass_for_id(int id) +{ + return g->get_passes()->get_pass_for_id(id); +} +#endif + +#if BUILDING_GCC_VERSION >= 5000 && BUILDING_GCC_VERSION < 6000 +/* gimple related */ +template <> +template <> +inline bool is_a_helper<const gassign *>::test(const_gimple gs) +{ + return gs->code == GIMPLE_ASSIGN; +} +#endif + +#if BUILDING_GCC_VERSION >= 5000 +#define TODO_verify_ssa TODO_verify_il +#define TODO_verify_flow TODO_verify_il +#define TODO_verify_stmts TODO_verify_il +#define TODO_verify_rtl_sharing TODO_verify_il + +#define INSN_DELETED_P(insn) (insn)->deleted() + +static inline const char *get_decl_section_name(const_tree decl) +{ + return DECL_SECTION_NAME(decl); +} + +/* symtab/cgraph related */ +#define debug_cgraph_node(node) (node)->debug() +#define cgraph_get_node(decl) cgraph_node::get(decl) +#define cgraph_get_create_node(decl) cgraph_node::get_create(decl) +#define cgraph_create_node(decl) cgraph_node::create(decl) +#define cgraph_n_nodes symtab->cgraph_count +#define cgraph_max_uid symtab->cgraph_max_uid +#define varpool_get_node(decl) varpool_node::get(decl) +#define dump_varpool_node(file, node) (node)->dump(file) + +#if BUILDING_GCC_VERSION >= 8000 +#define cgraph_create_edge(caller, callee, call_stmt, count, freq) \ + (caller)->create_edge((callee), (call_stmt), (count)) + +#define cgraph_create_edge_including_clones(caller, callee, \ + old_call_stmt, call_stmt, count, freq, reason) \ + (caller)->create_edge_including_clones((callee), \ + (old_call_stmt), (call_stmt), (count), (reason)) +#else +#define cgraph_create_edge(caller, callee, call_stmt, count, freq) \ + (caller)->create_edge((callee), (call_stmt), (count), (freq)) + +#define cgraph_create_edge_including_clones(caller, callee, \ + old_call_stmt, call_stmt, count, freq, reason) \ + (caller)->create_edge_including_clones((callee), \ + (old_call_stmt), (call_stmt), (count), (freq), (reason)) +#endif + +typedef struct cgraph_node *cgraph_node_ptr; +typedef struct cgraph_edge *cgraph_edge_p; +typedef struct varpool_node *varpool_node_ptr; + +static inline void change_decl_assembler_name(tree decl, tree name) +{ + symtab->change_decl_assembler_name(decl, name); +} + +static inline void varpool_finalize_decl(tree decl) +{ + varpool_node::finalize_decl(decl); +} + +static inline void varpool_add_new_variable(tree decl) +{ + varpool_node::add(decl); +} + +static inline unsigned int rebuild_cgraph_edges(void) +{ + return cgraph_edge::rebuild_edges(); +} + +static inline cgraph_node_ptr cgraph_function_node(cgraph_node_ptr node, enum availability *availability) +{ + return node->function_symbol(availability); +} + +static inline cgraph_node_ptr cgraph_function_or_thunk_node(cgraph_node_ptr node, enum availability *availability = NULL) +{ + return node->ultimate_alias_target(availability); +} + +static inline bool cgraph_only_called_directly_p(cgraph_node_ptr node) +{ + return node->only_called_directly_p(); +} + +static inline enum availability cgraph_function_body_availability(cgraph_node_ptr node) +{ + return node->get_availability(); +} + +static inline cgraph_node_ptr cgraph_alias_target(cgraph_node_ptr node) +{ + return node->get_alias_target(); +} + +static inline bool cgraph_for_node_and_aliases(cgraph_node_ptr node, bool (*callback)(cgraph_node_ptr, void *), void *data, bool include_overwritable) +{ + return node->call_for_symbol_thunks_and_aliases(callback, data, include_overwritable); +} + +static inline struct cgraph_node_hook_list *cgraph_add_function_insertion_hook(cgraph_node_hook hook, void *data) +{ + return symtab->add_cgraph_insertion_hook(hook, data); +} + +static inline void cgraph_remove_function_insertion_hook(struct cgraph_node_hook_list *entry) +{ + symtab->remove_cgraph_insertion_hook(entry); +} + +static inline struct cgraph_node_hook_list *cgraph_add_node_removal_hook(cgraph_node_hook hook, void *data) +{ + return symtab->add_cgraph_removal_hook(hook, data); +} + +static inline void cgraph_remove_node_removal_hook(struct cgraph_node_hook_list *entry) +{ + symtab->remove_cgraph_removal_hook(entry); +} + +static inline struct cgraph_2node_hook_list *cgraph_add_node_duplication_hook(cgraph_2node_hook hook, void *data) +{ + return symtab->add_cgraph_duplication_hook(hook, data); +} + +static inline void cgraph_remove_node_duplication_hook(struct cgraph_2node_hook_list *entry) +{ + symtab->remove_cgraph_duplication_hook(entry); +} + +static inline void cgraph_call_node_duplication_hooks(cgraph_node_ptr node, cgraph_node_ptr node2) +{ + symtab->call_cgraph_duplication_hooks(node, node2); +} + +static inline void cgraph_call_edge_duplication_hooks(cgraph_edge *cs1, cgraph_edge *cs2) +{ + symtab->call_edge_duplication_hooks(cs1, cs2); +} + +#if BUILDING_GCC_VERSION >= 6000 +typedef gimple *gimple_ptr; +typedef const gimple *const_gimple_ptr; +#define gimple gimple_ptr +#define const_gimple const_gimple_ptr +#undef CONST_CAST_GIMPLE +#define CONST_CAST_GIMPLE(X) CONST_CAST(gimple, (X)) +#endif + +/* gimple related */ +static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree lhs, tree op1, tree op2 MEM_STAT_DECL) +{ + return gimple_build_assign(lhs, subcode, op1, op2 PASS_MEM_STAT); +} + +template <> +template <> +inline bool is_a_helper<const ggoto *>::test(const_gimple gs) +{ + return gs->code == GIMPLE_GOTO; +} + +template <> +template <> +inline bool is_a_helper<const greturn *>::test(const_gimple gs) +{ + return gs->code == GIMPLE_RETURN; +} + +static inline gasm *as_a_gasm(gimple stmt) +{ + return as_a<gasm *>(stmt); +} + +static inline const gasm *as_a_const_gasm(const_gimple stmt) +{ + return as_a<const gasm *>(stmt); +} + +static inline gassign *as_a_gassign(gimple stmt) +{ + return as_a<gassign *>(stmt); +} + +static inline const gassign *as_a_const_gassign(const_gimple stmt) +{ + return as_a<const gassign *>(stmt); +} + +static inline gcall *as_a_gcall(gimple stmt) +{ + return as_a<gcall *>(stmt); +} + +static inline const gcall *as_a_const_gcall(const_gimple stmt) +{ + return as_a<const gcall *>(stmt); +} + +static inline ggoto *as_a_ggoto(gimple stmt) +{ + return as_a<ggoto *>(stmt); +} + +static inline const ggoto *as_a_const_ggoto(const_gimple stmt) +{ + return as_a<const ggoto *>(stmt); +} + +static inline gphi *as_a_gphi(gimple stmt) +{ + return as_a<gphi *>(stmt); +} + +static inline const gphi *as_a_const_gphi(const_gimple stmt) +{ + return as_a<const gphi *>(stmt); +} + +static inline greturn *as_a_greturn(gimple stmt) +{ + return as_a<greturn *>(stmt); +} + +static inline const greturn *as_a_const_greturn(const_gimple stmt) +{ + return as_a<const greturn *>(stmt); +} + +/* IPA/LTO related */ +#define ipa_ref_list_referring_iterate(L, I, P) \ + (L)->referring.iterate((I), &(P)) +#define ipa_ref_list_reference_iterate(L, I, P) \ + (L)->reference.iterate((I), &(P)) + +static inline cgraph_node_ptr ipa_ref_referring_node(struct ipa_ref *ref) +{ + return dyn_cast<cgraph_node_ptr>(ref->referring); +} + +static inline void ipa_remove_stmt_references(symtab_node *referring_node, gimple stmt) +{ + referring_node->remove_stmt_references(stmt); +} +#endif + +#if BUILDING_GCC_VERSION < 6000 +#define get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, preversep, pvolatilep, keep_aligning) \ + get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, pvolatilep, keep_aligning) +#define gen_rtx_set(ARG0, ARG1) gen_rtx_SET(VOIDmode, (ARG0), (ARG1)) +#endif + +#if BUILDING_GCC_VERSION >= 6000 +#define gen_rtx_set(ARG0, ARG1) gen_rtx_SET((ARG0), (ARG1)) +#endif + +#ifdef __cplusplus +static inline void debug_tree(const_tree t) +{ + debug_tree(CONST_CAST_TREE(t)); +} + +static inline void debug_gimple_stmt(const_gimple s) +{ + debug_gimple_stmt(CONST_CAST_GIMPLE(s)); +} +#else +#define debug_tree(t) debug_tree(CONST_CAST_TREE(t)) +#define debug_gimple_stmt(s) debug_gimple_stmt(CONST_CAST_GIMPLE(s)) +#endif + +#if BUILDING_GCC_VERSION >= 7000 +#define get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, preversep, pvolatilep, keep_aligning) \ + get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, preversep, pvolatilep) +#endif + +#if BUILDING_GCC_VERSION < 7000 +#define SET_DECL_ALIGN(decl, align) DECL_ALIGN(decl) = (align) +#define SET_DECL_MODE(decl, mode) DECL_MODE(decl) = (mode) +#endif + +#endif diff --git a/tools/gcc-plugins/gcc-retain-typeinfo.patch b/tools/gcc-plugins/gcc-retain-typeinfo.patch new file mode 100644 index 0000000000..ec51f0be6f --- /dev/null +++ b/tools/gcc-plugins/gcc-retain-typeinfo.patch @@ -0,0 +1,11 @@ +--- a/src/gcc/c/c-typeck.c ++++ b/src/gcc/c/c-typeck.c +@@ -5716,8 +5716,6 @@ build_c_cast (location_t loc, tree type, tree expr) + if (objc_is_object_ptr (type) && objc_is_object_ptr (TREE_TYPE (expr))) + return build1 (NOP_EXPR, type, expr); + +- type = TYPE_MAIN_VARIANT (type); +- + if (TREE_CODE (type) == ARRAY_TYPE) + { + error_at (loc, "cast specifies array type"); diff --git a/tools/gen_northbound_callbacks.c b/tools/gen_northbound_callbacks.c index cbdf01e7b8..7118986854 100644 --- a/tools/gen_northbound_callbacks.c +++ b/tools/gen_northbound_callbacks.c @@ -358,7 +358,7 @@ int main(int argc, char *argv[]) if (argc != 1) usage(EXIT_FAILURE); - yang_init(); + yang_init(false); if (search_path) ly_ctx_set_searchdir(ly_native_ctx, search_path); diff --git a/tools/gen_yang_deviations.c b/tools/gen_yang_deviations.c index f611f1c57e..f908e1fc69 100644 --- a/tools/gen_yang_deviations.c +++ b/tools/gen_yang_deviations.c @@ -65,7 +65,7 @@ int main(int argc, char *argv[]) if (argc != 1) usage(EXIT_FAILURE); - yang_init(); + yang_init(false); /* Load YANG module. */ module = yang_module_load(argv[0]); diff --git a/tools/start-stop-daemon.c b/tools/start-stop-daemon.c index c75306a959..13118a2769 100644 --- a/tools/start-stop-daemon.c +++ b/tools/start-stop-daemon.c @@ -235,7 +235,7 @@ static const char *next_dirname(const char *s) { const char *cur; - cur = (const char *)s; + cur = s; if (*cur != '\0') { for (; *cur != '/'; ++cur) @@ -255,7 +255,7 @@ static void add_namespace(const char *path) const char *nsdirname, *nsname, *cur; struct namespace *namespace; - cur = (const char *)path; + cur = path; nsdirname = nsname = ""; while ((cur = next_dirname(cur))[0] != '\0') { @@ -273,7 +273,7 @@ static void add_namespace(const char *path) badusage("invalid namepspace path"); namespace = xmalloc(sizeof(*namespace)); - namespace->path = (const char *)path; + namespace->path = path; namespace->nstype = nstype; LIST_INSERT_HEAD(&namespace_head, namespace, list); } diff --git a/vrrpd/vrrp_ndisc.c b/vrrpd/vrrp_ndisc.c index dc546b09a2..b989e66f60 100644 --- a/vrrpd/vrrp_ndisc.c +++ b/vrrpd/vrrp_ndisc.c @@ -83,8 +83,7 @@ static int vrrp_ndisc_una_build(struct interface *ifp, struct ipaddr *ip, struct nd_opt_hdr *nd_opt_h = (struct nd_opt_hdr *)((char *)ndh + sizeof(struct nd_neighbor_advert)); - char *nd_opt_lladdr = - (char *)((char *)nd_opt_h + sizeof(struct nd_opt_hdr)); + char *nd_opt_lladdr = ((char *)nd_opt_h + sizeof(struct nd_opt_hdr)); char *lladdr = (char *)ifp->hw_addr; /* diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c index 310acdf37f..a5fa686eb5 100644 --- a/vtysh/vtysh.c +++ b/vtysh/vtysh.c @@ -2877,13 +2877,12 @@ static void backup_config_file(const char *fbackup) strlcat(integrate_sav, CONF_BACKUP_EXT, integrate_sav_sz); /* Move current configuration file to backup config file. */ - if (unlink(integrate_sav) != 0) { - vty_out(vty, "Warning: %s unlink failed\n", integrate_sav); - } - if (rename(fbackup, integrate_sav) != 0) { - vty_out(vty, "Error renaming %s to %s\n", fbackup, - integrate_sav); - } + if (unlink(integrate_sav) != 0 && errno != ENOENT) + vty_out(vty, "Unlink failed for %s: %s\n", integrate_sav, + strerror(errno)); + if (rename(fbackup, integrate_sav) != 0 && errno != ENOENT) + vty_out(vty, "Error renaming %s to %s: %s\n", fbackup, + integrate_sav, strerror(errno)); free(integrate_sav); } diff --git a/watchfrr/watchfrr.c b/watchfrr/watchfrr.c index ed9616963d..05e6651229 100644 --- a/watchfrr/watchfrr.c +++ b/watchfrr/watchfrr.c @@ -318,9 +318,8 @@ static pid_t run_background(char *shell_cmd) } default: /* Parent process: we will reap the child later. */ - flog_err_sys(EC_LIB_SYSTEM_CALL, - "Forked background command [pid %d]: %s", - (int)child, shell_cmd); + zlog_info("Forked background command [pid %d]: %s", (int)child, + shell_cmd); return child; } } @@ -559,9 +558,9 @@ static int wakeup_init(struct thread *t_wakeup) dmn->t_wakeup = NULL; if (try_connect(dmn) < 0) { - flog_err(EC_WATCHFRR_CONNECTION, - "%s state -> down : initial connection attempt failed", - dmn->name); + zlog_info( + "%s state -> down : initial connection attempt failed", + dmn->name); dmn->state = DAEMON_DOWN; } phase_check(); diff --git a/yang/embedmodel.py b/yang/embedmodel.py index 52671f99a8..624a11da9d 100644 --- a/yang/embedmodel.py +++ b/yang/embedmodel.py @@ -3,11 +3,18 @@ # YANG module to C wrapper # written 2018 by David Lamparter, placed in Public Domain. -import sys, string, re +import sys +import os +import string +import re inname = sys.argv[1] outname = sys.argv[2] +outdir = os.path.dirname(os.path.abspath(outname)) +if not os.path.isdir(outdir): + os.makedirs(outdir) + # these are regexes to avoid a compile-time/host dependency on yang-tools # or python-yang. Cross-compiling FRR is already somewhat involved, no need # to make it even harder. diff --git a/yang/frr-bfdd.yang b/yang/frr-bfdd.yang index f9ac2e43b0..b870bfd0c8 100644 --- a/yang/frr-bfdd.yang +++ b/yang/frr-bfdd.yang @@ -16,7 +16,7 @@ module frr-bfdd { prefix frr-route-types; } - organization "Free Range Routing"; + organization "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; diff --git a/yang/frr-eigrpd.yang b/yang/frr-eigrpd.yang index 0c62954570..092b714045 100644 --- a/yang/frr-eigrpd.yang +++ b/yang/frr-eigrpd.yang @@ -16,7 +16,7 @@ module frr-eigrpd { prefix frr-route-types; } - organization "Free Range Routing"; + organization "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; @@ -191,7 +191,7 @@ module frr-eigrpd { leaf-list neighbor { description "Specific EIGRP neighbor"; - type inet:ipv4-prefix; + type inet:ipv4-address; } list redistribute { diff --git a/yang/frr-filter.yang b/yang/frr-filter.yang index e79ede87b7..61ffa51552 100644 --- a/yang/frr-filter.yang +++ b/yang/frr-filter.yang @@ -10,7 +10,7 @@ module frr-filter { prefix yang; } - organization "Free Range Routing"; + organization "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; diff --git a/yang/frr-interface.yang b/yang/frr-interface.yang index 4f7f3beebd..1f3eebb2ab 100644 --- a/yang/frr-interface.yang +++ b/yang/frr-interface.yang @@ -4,7 +4,7 @@ module frr-interface { prefix frr-interface; organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; diff --git a/yang/frr-isisd.yang b/yang/frr-isisd.yang index faab1e55b2..57f81892e0 100644 --- a/yang/frr-isisd.yang +++ b/yang/frr-isisd.yang @@ -20,13 +20,18 @@ module frr-isisd { } organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; description "This module defines a model for managing FRR isisd daemon."; + revision 2020-04-06 { + description + "Group LSP timers in a container so that they can be displayed and + configured together"; + } revision 2019-12-17 { description "Changed default area is-type to level-1-2"; @@ -34,7 +39,7 @@ module frr-isisd { revision 2019-09-09 { description "Changed interface references to use - frr-interface:interface-ref typedef"; + frr-interface:interface-ref typedef"; } revision 2018-07-26 { description @@ -301,8 +306,8 @@ module frr-isisd { } grouping interface-config { - description "Interface configuration grouping"; - + description + "Interface configuration grouping"; leaf area-tag { type string; mandatory true; @@ -333,8 +338,9 @@ module frr-isisd { leaf bfd-monitoring { type boolean; - default false; - description "Monitor IS-IS peers on this circuit."; + default "false"; + description + "Monitor IS-IS peers on this circuit."; } container csnp-interval { @@ -490,8 +496,8 @@ module frr-isisd { leaf network-type { type network-type; - default "broadcast"; must "(. = \"point-to-point\") or (. = \"broadcast\")"; + default "broadcast"; description "Explicitly configured type of IS-IS circuit (broadcast or point-to-point)."; } @@ -570,38 +576,50 @@ module frr-isisd { } grouping adjacency-state { + description + "Adjacency state"; container adjacencies { config false; + description + "This container lists the adjacencies of + the local node."; list adjacency { + description + "List of operational adjacencies."; leaf neighbor-sys-type { type level; description "Level capability of neighboring system"; } + leaf neighbor-sysid { type system-id; description "The system-id of the neighbor"; } + leaf neighbor-extended-circuit-id { type extended-circuit-id; description "Circuit ID of the neighbor"; } + leaf neighbor-snpa { type snpa; description "SNPA of the neighbor"; } + leaf hold-timer { type uint16; - units seconds; + units "seconds"; description "The holding time in seconds for this adjacency. This value is based on received hello PDUs and the elapsed time since receipt."; } + leaf neighbor-priority { type uint8 { range "0 .. 127"; @@ -610,37 +628,36 @@ module frr-isisd { "Priority of the neighboring IS for becoming the DIS."; } + leaf state { type adj-state-type; description "This leaf describes the state of the interface."; } - - description - "List of operational adjacencies."; } - description - "This container lists the adjacencies of - the local node."; } - description - "Adjacency state"; } grouping event-counters { + description + "Grouping for IS-IS interface event counters"; container event-counters { config false; + description + "IS-IS interface event counters."; leaf adjacency-changes { type uint32; description "The number of times an adjacency state change has occurred on this interface."; } + leaf adjacency-number { type uint32; description "The number of adjacencies on this interface."; } + leaf init-fails { type uint32; description @@ -649,12 +666,14 @@ module frr-isisd { as PPP NCP failures. Failures to form an adjacency are counted by adjacency-rejects."; } + leaf adjacency-rejects { type uint32; description "The number of times an adjacency has been rejected on this interface."; } + leaf id-len-mismatch { type uint32; description @@ -662,6 +681,7 @@ module frr-isisd { field length different from that for this system has been received on this interface."; } + leaf max-area-addresses-mismatch { type uint32; description @@ -670,26 +690,26 @@ module frr-isisd { max area address field differing from that of this system."; } + leaf authentication-type-fails { type uint32; description "Number of authentication type mismatches."; } + leaf authentication-fails { type uint32; description "Number of authentication key failures."; } - description "IS-IS interface event counters."; } - description - "Grouping for IS-IS interface event counters"; } grouping interface-state { description "IS-IS interface operational state."; uses adjacency-state; + uses event-counters; } @@ -814,75 +834,75 @@ module frr-isisd { "MTU of an LSP."; } - container refresh-interval { + container timers { description - ""; - leaf level-1 { - type uint16; - units "seconds"; - default "900"; + "LSP-related timers"; + container level-1 { description - "LSP refresh interval for level-1."; - } + "Level-1 LSP-related timers"; + leaf refresh-interval { + type uint16; + units "seconds"; + default "900"; + description + "LSP refresh interval for level-1."; + } - leaf level-2 { - type uint16; - units "seconds"; - default "900"; - description - "LSP refresh interval for level-2."; - } - } + leaf maximum-lifetime { + type uint16 { + range "350..65535"; + } + units "seconds"; + must ". >= ../refresh-interval + 300"; + default "1200"; + description + "Maximum LSP lifetime for level-1."; + } - container maximum-lifetime { - description - "Maximum LSP lifetime."; - leaf level-1 { - type uint16 { - range "350..65535"; + leaf generation-interval { + type uint16 { + range "1..120"; + } + units "seconds"; + must ". < ../refresh-interval"; + default "30"; + description + "Minimum time allowed before level-1 LSP retransmissions."; } - units "seconds"; - must ". >= ../../refresh-interval/level-1 + 300"; - default "1200"; - description - "Maximum LSP lifetime for level-1."; } - leaf level-2 { - type uint16 { - range "350..65535"; - } - units "seconds"; - must ". >= ../../refresh-interval/level-2 + 300"; - default "1200"; + container level-2 { description - "Maximum LSP lifetime for level-2."; - } - } + "Level-2 LSP-related timers"; + leaf refresh-interval { + type uint16; + units "seconds"; + default "900"; + description + "LSP refresh interval for level-2."; + } - container generation-interval { - description - "Minimum LSP regeneration interval."; - leaf level-1 { - type uint16 { - range "1..120"; + leaf maximum-lifetime { + type uint16 { + range "350..65535"; + } + units "seconds"; + must ". >= ../refresh-interval + 300"; + default "1200"; + description + "Maximum LSP lifetime for level-2."; } - units "seconds"; - must ". < ../../refresh-interval/level-1"; - default "30"; - description - "Minimum time allowed before level-1 LSP retransmissions."; - } - leaf level-2 { - type uint16 { - range "1..120"; + leaf generation-interval { + type uint16 { + range "1..120"; + } + units "seconds"; + must ". < ../refresh-interval"; + default "30"; + description + "Minimum time allowed before level-2 LSP retransmissions."; } - units "seconds"; - must ". < ../../refresh-interval/level-2"; - default "30"; - description - "Minimum time allowed before level-2 LSP retransmissions."; } } } @@ -1152,6 +1172,7 @@ module frr-isisd { description "IS-IS interface parameters."; uses interface-config; + uses interface-state; } } diff --git a/yang/frr-module-translator.yang b/yang/frr-module-translator.yang index 3d64ec5399..6713eae76e 100644 --- a/yang/frr-module-translator.yang +++ b/yang/frr-module-translator.yang @@ -4,7 +4,7 @@ module frr-module-translator { prefix frr-module-translator; organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; diff --git a/yang/frr-nexthop.yang b/yang/frr-nexthop.yang index 7d8ce1b8ed..3657ecbd75 100644 --- a/yang/frr-nexthop.yang +++ b/yang/frr-nexthop.yang @@ -15,7 +15,7 @@ module frr-nexthop { } organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; diff --git a/yang/frr-ripd.yang b/yang/frr-ripd.yang index 94a9ebf3e1..12c72b39b5 100644 --- a/yang/frr-ripd.yang +++ b/yang/frr-ripd.yang @@ -17,7 +17,7 @@ module frr-ripd { } organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; @@ -118,7 +118,7 @@ module frr-ripd { "Enable RIP on the specified IP network."; } leaf-list interface { - type frr-interface:interface-ref; + type string; description "Enable RIP on the specified interface."; } @@ -179,14 +179,14 @@ module frr-ripd { } leaf-list passive-interface { when "../passive-default = 'false'"; - type frr-interface:interface-ref; + type string; description "A list of interfaces where the sending of RIP packets is disabled."; } leaf-list non-passive-interface { when "../passive-default = 'true'"; - type frr-interface:interface-ref; + type string; description "A list of interfaces where the sending of RIP packets is enabled."; diff --git a/yang/frr-ripngd.yang b/yang/frr-ripngd.yang index 831758af86..c58962f5cd 100644 --- a/yang/frr-ripngd.yang +++ b/yang/frr-ripngd.yang @@ -17,7 +17,7 @@ module frr-ripngd { } organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; @@ -76,7 +76,7 @@ module frr-ripngd { "Enable RIPng on the specified IPv6 network."; } leaf-list interface { - type frr-interface:interface-ref; + type string; description "Enable RIPng on the specified interface."; } diff --git a/yang/frr-route-map.yang b/yang/frr-route-map.yang index 34a7e28a77..106593d9d3 100644 --- a/yang/frr-route-map.yang +++ b/yang/frr-route-map.yang @@ -13,7 +13,7 @@ module frr-route-map { prefix frr-interface; } - organization "Free Range Routing"; + organization "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; @@ -179,17 +179,27 @@ module frr-route-map { description "Match a route tag"; value 10; } - - /* - * Protocol YANG models should augment the parent node to - * contain the routing protocol specific value. The protocol - * must also augment `condition-value` to include its specific - * values or expand the `when` statement on the existing cases. - */ - enum routing-protocol-specific { - description "Match a routing protocol specific type"; + /* zebra specific conditions. */ + enum ipv4-prefix-length { + description "Match IPv4 prefix length"; value 100; } + enum ipv6-prefix-length { + description "Match IPv6 prefix length"; + value 101; + } + enum ipv4-next-hop-prefix-length { + description "Match next-hop prefix length"; + value 102; + } + enum source-protocol { + description "Match source protocol"; + value 103; + } + enum source-instance { + description "Match source protocol instance"; + value 104; + } } } @@ -200,7 +210,7 @@ module frr-route-map { case interface { when "./condition = 'interface'"; leaf interface { - type frr-interface:interface-ref; + type string; } } case access-list-num { @@ -291,15 +301,9 @@ module frr-route-map { description "Set tag"; value 3; } - - /* - * Protocol YANG models should augment the parent node to - * contain the routing protocol specific value. The protocol - * must also augment `action-value` to include its specific - * values or expand the `when` statement on the existing cases. - */ - enum routing-protocol-specific { - description "Set a routing protocol specific action"; + /* zebra specific conditions. */ + enum source { + description "Set source address for route"; value 100; } } diff --git a/yang/frr-route-types.yang b/yang/frr-route-types.yang index f22c5ef890..8fdd10121e 100644 --- a/yang/frr-route-types.yang +++ b/yang/frr-route-types.yang @@ -4,7 +4,7 @@ module frr-route-types { prefix frr-route-types; organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; @@ -106,4 +106,12 @@ module frr-route-types { } } } + + typedef frr-route-types { + description "Route types as enumerated in `lib/route_types.txt`"; + type union { + type frr-route-types-v4; + type frr-route-types-v6; + } + } } diff --git a/yang/frr-vrrpd.yang b/yang/frr-vrrpd.yang index 3d3a4138fa..145387c4b4 100644 --- a/yang/frr-vrrpd.yang +++ b/yang/frr-vrrpd.yang @@ -16,7 +16,7 @@ module frr-vrrpd { } organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; diff --git a/yang/frr-zebra.yang b/yang/frr-zebra.yang index 74922a22f7..736bbc2c81 100644 --- a/yang/frr-zebra.yang +++ b/yang/frr-zebra.yang @@ -11,6 +11,10 @@ module frr-zebra { prefix inet; } + import frr-route-map { + prefix frr-route-map; + } + import frr-route-types { prefix frr-route-types; } @@ -28,7 +32,7 @@ module frr-zebra { } organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> FRR Development List: <mailto:dev@lists.frrouting.org>"; @@ -1985,4 +1989,63 @@ module frr-zebra { } // End interface model augmentation + + augment "/frr-route-map:lib" + + "/frr-route-map:route-map" + + "/frr-route-map:entry" + + "/frr-route-map:match-condition" + + "/frr-route-map:condition-value" { + case ipv4-prefix-length { + when "./condition = 'ipv4-prefix-length' or + ./condition = 'ipv4-next-hop-prefix-length'"; + leaf ipv4-prefix-length { + type uint8 { + range "0..32"; + } + } + } + case ipv6-prefix-length { + when "./condition = 'ipv6-prefix-length'"; + leaf ipv6-prefix-length { + type uint8 { + range "0..128"; + } + } + } + case source-protocol { + when "./condition = 'source-protocol'"; + leaf source-protocol { + type frr-route-types:frr-route-types; + } + } + case source-instance { + when "./condition = 'source-instance'"; + leaf source-instance { + type uint8 { + range "0..255"; + } + } + } + } + + augment "/frr-route-map:lib" + + "/frr-route-map:route-map" + + "/frr-route-map:entry" + + "/frr-route-map:set-action" + + "/frr-route-map:action-value" { + case source-v4 { + when "./action = 'source'"; + leaf source-v4 { + description "IPv4 address"; + type inet:ipv4-address; + } + } + case source-v6 { + when "./action = 'source'"; + leaf source-v6 { + description "IPv6 address"; + type inet:ipv6-address; + } + } + } } diff --git a/yang/ietf/frr-deviations-ietf-interfaces.yang b/yang/ietf/frr-deviations-ietf-interfaces.yang index 6528d66d22..704839fb60 100644 --- a/yang/ietf/frr-deviations-ietf-interfaces.yang +++ b/yang/ietf/frr-deviations-ietf-interfaces.yang @@ -8,7 +8,7 @@ module frr-deviations-ietf-interfaces { } organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> diff --git a/yang/ietf/frr-deviations-ietf-rip.yang b/yang/ietf/frr-deviations-ietf-rip.yang index 42ed8e3c09..39a1d7e71d 100644 --- a/yang/ietf/frr-deviations-ietf-rip.yang +++ b/yang/ietf/frr-deviations-ietf-rip.yang @@ -12,7 +12,7 @@ module frr-deviations-ietf-rip { } organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> diff --git a/yang/ietf/frr-deviations-ietf-routing.yang b/yang/ietf/frr-deviations-ietf-routing.yang index 62787e782c..15ceb6b929 100644 --- a/yang/ietf/frr-deviations-ietf-routing.yang +++ b/yang/ietf/frr-deviations-ietf-routing.yang @@ -8,7 +8,7 @@ module frr-deviations-ietf-routing { } organization - "Free Range Routing"; + "FRRouting"; contact "FRR Users List: <mailto:frog@lists.frrouting.org> diff --git a/yang/subdir.am b/yang/subdir.am index c1297dafd5..0e124c5ab0 100644 --- a/yang/subdir.am +++ b/yang/subdir.am @@ -21,10 +21,12 @@ EXTRA_DIST += yang/embedmodel.py dist_yangmodels_DATA += yang/frr-filter.yang dist_yangmodels_DATA += yang/frr-module-translator.yang +dist_yangmodels_DATA += yang/frr-nexthop.yang dist_yangmodels_DATA += yang/frr-test-module.yang dist_yangmodels_DATA += yang/frr-interface.yang dist_yangmodels_DATA += yang/frr-route-map.yang dist_yangmodels_DATA += yang/frr-route-types.yang +dist_yangmodels_DATA += yang/frr-zebra.yang dist_yangmodels_DATA += yang/ietf/ietf-routing-types.yang if BFDD diff --git a/zebra/kernel_socket.c b/zebra/kernel_socket.c index 5ab5210664..950690b943 100644 --- a/zebra/kernel_socket.c +++ b/zebra/kernel_socket.c @@ -1393,7 +1393,7 @@ static int kernel_read(struct thread *thread) */ if (rtm->rtm_msglen != nbytes) { zlog_debug( - "kernel_read: rtm->rtm_msglen %d, nbytes %d, type %d\n", + "kernel_read: rtm->rtm_msglen %d, nbytes %d, type %d", rtm->rtm_msglen, nbytes, rtm->rtm_type); return -1; } diff --git a/zebra/label_manager.c b/zebra/label_manager.c index caebdc0f08..5f2128a09c 100644 --- a/zebra/label_manager.c +++ b/zebra/label_manager.c @@ -4,7 +4,7 @@ * Copyright (C) 2017 by Bingen Eguzkitza, * Volta Networks Inc. * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -446,6 +446,15 @@ int lm_client_connect_response(uint8_t proto, uint16_t instance, int lm_get_chunk_response(struct label_manager_chunk *lmc, uint8_t proto, uint16_t instance, vrf_id_t vrf_id) { + if (!lmc) + flog_err(EC_ZEBRA_LM_CANNOT_ASSIGN_CHUNK, + "Unable to assign Label Chunk to %s instance %u", + zebra_route_string(proto), instance); + else if (IS_ZEBRA_DEBUG_PACKET) + zlog_debug("Assigned Label Chunk %u - %u to %s instance %u", + lmc->start, lmc->end, zebra_route_string(proto), + instance); + struct zserv *client = zserv_find_client(proto, instance); if (!client) { zlog_err("%s: could not find client for daemon %s instance %u", diff --git a/zebra/label_manager.h b/zebra/label_manager.h index 74e283e85e..4fee34d301 100644 --- a/zebra/label_manager.h +++ b/zebra/label_manager.h @@ -4,7 +4,7 @@ * Copyright (C) 2017 by Bingen Eguzkitza, * Volta Networks Inc. * - * This file is part of FreeRangeRouting (FRR) + * This file is part of FRRouting (FRR) * * FRR is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/zebra/main.c b/zebra/main.c index 5951c7e280..fb7e926c5c 100644 --- a/zebra/main.c +++ b/zebra/main.c @@ -53,6 +53,7 @@ #include "zebra/zebra_rnh.h" #include "zebra/zebra_pbr.h" #include "zebra/zebra_vxlan.h" +#include "zebra/zebra_routemap.h" #if defined(HANDLE_NETLINK_FUZZING) #include "zebra/kernel_netlink.h" @@ -173,13 +174,20 @@ static void sigint(void) work_queue_free_and_null(&zrouter.lsp_process_q); vrf_terminate(); + rtadv_terminate(); ns_walk_func(zebra_ns_early_shutdown); zebra_ns_notify_close(); access_list_reset(); prefix_list_reset(); - route_map_finish(); + /* + * zebra_routemap_finish will + * 1 set rmap upd timer to 0 so that rmap update wont be scheduled again + * 2 Put off the rmap update thread + * 3 route_map_finish + */ + zebra_routemap_finish(); list_delete(&zrouter.client_list); @@ -238,6 +246,7 @@ struct quagga_signal_t zebra_signals[] = { static const struct frr_yang_module_info *const zebra_yang_modules[] = { &frr_interface_info, &frr_route_map_info, + &frr_zebra_info, }; FRR_DAEMON_INFO( @@ -322,17 +331,21 @@ int main(int argc, char **argv) case 'a': allow_delete = 1; break; - case 'e': - zrouter.multipath_num = atoi(optarg); - if (zrouter.multipath_num > MULTIPATH_NUM - || zrouter.multipath_num <= 0) { + case 'e': { + unsigned long int parsed_multipath = + strtoul(optarg, NULL, 10); + if (parsed_multipath == 0 + || parsed_multipath > MULTIPATH_NUM + || parsed_multipath > UINT32_MAX) { flog_err( EC_ZEBRA_BAD_MULTIPATH_NUM, - "Multipath Number specified must be less than %d and greater than 0", + "Multipath Number specified must be less than %u and greater than 0", MULTIPATH_NUM); return 1; } + zrouter.multipath_num = parsed_multipath; break; + } case 'o': vrf_default_name_configured = optarg; break; diff --git a/zebra/redistribute.c b/zebra/redistribute.c index d1148061b9..4d6346151a 100644 --- a/zebra/redistribute.c +++ b/zebra/redistribute.c @@ -150,6 +150,43 @@ static void zebra_redistribute(struct zserv *client, int type, } } +/* + * Function to check if prefix is candidate for + * redistribute. + */ +static bool zebra_redistribute_check(const struct route_entry *re, + struct zserv *client, + const struct prefix *p, int afi) +{ + /* Process only if there is valid re */ + if (!re) + return false; + + /* If default route and redistributed */ + if (is_default_prefix(p) + && vrf_bitmap_check(client->redist_default[afi], re->vrf_id)) + return true; + + /* If redistribute in enabled for zebra route all */ + if (vrf_bitmap_check(client->redist[afi][ZEBRA_ROUTE_ALL], re->vrf_id)) + return true; + + /* + * If multi-instance then check for route + * redistribution for given instance. + */ + if (re->instance + && redist_check_instance(&client->mi_redist[afi][re->type], + re->instance)) + return true; + + /* If redistribution is enabled for give route type. */ + if (vrf_bitmap_check(client->redist[afi][re->type], re->vrf_id)) + return true; + + return false; +} + /* Either advertise a route for redistribution to registered clients or */ /* withdraw redistribution if add cannot be done for client */ void redistribute_update(const struct prefix *p, const struct prefix *src_p, @@ -158,7 +195,6 @@ void redistribute_update(const struct prefix *p, const struct prefix *src_p, { struct listnode *node, *nnode; struct zserv *client; - int send_redistribute; int afi; char buf[PREFIX_STRLEN]; @@ -173,8 +209,7 @@ void redistribute_update(const struct prefix *p, const struct prefix *src_p, afi = family2afi(p->family); if (!afi) { flog_warn(EC_ZEBRA_REDISTRIBUTE_UNKNOWN_AF, - "%s: Unknown AFI/SAFI prefix received\n", - __FUNCTION__); + "%s: Unknown AFI/SAFI prefix received\n", __func__); return; } if (!zebra_check_addr(p)) { @@ -186,25 +221,7 @@ void redistribute_update(const struct prefix *p, const struct prefix *src_p, for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { - send_redistribute = 0; - - if (is_default_prefix(p) - && vrf_bitmap_check(client->redist_default[afi], - re->vrf_id)) - send_redistribute = 1; - else if (vrf_bitmap_check(client->redist[afi][ZEBRA_ROUTE_ALL], - re->vrf_id)) - send_redistribute = 1; - else if (re->instance - && redist_check_instance( - &client->mi_redist[afi][re->type], - re->instance)) - send_redistribute = 1; - else if (vrf_bitmap_check(client->redist[afi][re->type], - re->vrf_id)) - send_redistribute = 1; - - if (send_redistribute) { + if (zebra_redistribute_check(re, client, p, afi)) { if (IS_ZEBRA_DEBUG_RIB) { zlog_debug( "%s: client %s %s(%u), type=%d, distance=%d, metric=%d", @@ -216,18 +233,9 @@ void redistribute_update(const struct prefix *p, const struct prefix *src_p, } zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD, client, p, src_p, re); - } else if (prev_re - && ((re->instance - && redist_check_instance( - &client->mi_redist[afi] - [prev_re->type], - re->instance)) - || vrf_bitmap_check( - client->redist[afi][prev_re->type], - re->vrf_id))) { + } else if (zebra_redistribute_check(prev_re, client, p, afi)) zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_DEL, client, p, src_p, prev_re); - } } } @@ -269,7 +277,7 @@ void redistribute_delete(const struct prefix *p, const struct prefix *src_p, /* Add DISTANCE_INFINITY check. */ if (old_re && (old_re->distance == DISTANCE_INFINITY)) { if (IS_ZEBRA_DEBUG_RIB) - zlog_debug("\tSkipping due to Infinite Distance"); + zlog_debug(" Skipping due to Infinite Distance"); return; } @@ -292,45 +300,24 @@ void redistribute_delete(const struct prefix *p, const struct prefix *src_p, } for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { - if (new_re) { - /* Skip this client if it will receive an update for the - * 'new' re - */ - if (is_default_prefix(p) - && vrf_bitmap_check(client->redist_default[afi], - new_re->vrf_id)) - continue; - else if (vrf_bitmap_check( - client->redist[afi][ZEBRA_ROUTE_ALL], - new_re->vrf_id)) - continue; - else if (new_re->instance - && redist_check_instance( - &client->mi_redist[afi][new_re->type], - new_re->instance)) - continue; - else if (vrf_bitmap_check( - client->redist[afi][new_re->type], - new_re->vrf_id)) - continue; - } + /* Do not send unsolicited messages to synchronous clients. */ + if (client->synchronous) + continue; + /* + * Skip this client if it will receive an update for the + * 'new' re + */ + if (zebra_redistribute_check(new_re, client, p, afi)) + continue; /* Send a delete for the 'old' re to any subscribed client. */ - if (old_re - && (vrf_bitmap_check(client->redist[afi][ZEBRA_ROUTE_ALL], - old_re->vrf_id) - || (old_re->instance - && redist_check_instance( - &client->mi_redist[afi][old_re->type], - old_re->instance)) - || vrf_bitmap_check(client->redist[afi][old_re->type], - old_re->vrf_id))) { + if (zebra_redistribute_check(old_re, client, p, afi)) zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_DEL, client, p, src_p, old_re); - } } } + void zebra_redistribute_add(ZAPI_HANDLER_ARGS) { afi_t afi = 0; @@ -473,6 +460,12 @@ void zebra_interface_up_update(struct interface *ifp) if (ifp->ptm_status || !ifp->ptm_enable) { for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { + /* Do not send unsolicited messages to synchronous + * clients. + */ + if (client->synchronous) + continue; + zsend_interface_update(ZEBRA_INTERFACE_UP, client, ifp); zsend_interface_link_params(client, ifp); @@ -491,6 +484,10 @@ void zebra_interface_down_update(struct interface *ifp) ifp->name, ifp->vrf_id); for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { + /* Do not send unsolicited messages to synchronous clients. */ + if (client->synchronous) + continue; + zsend_interface_update(ZEBRA_INTERFACE_DOWN, client, ifp); } } @@ -506,6 +503,10 @@ void zebra_interface_add_update(struct interface *ifp) ifp->vrf_id); for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { + /* Do not send unsolicited messages to synchronous clients. */ + if (client->synchronous) + continue; + client->ifadd_cnt++; zsend_interface_add(client, ifp); zsend_interface_link_params(client, ifp); @@ -522,6 +523,10 @@ void zebra_interface_delete_update(struct interface *ifp) ifp->name, ifp->vrf_id); for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { + /* Do not send unsolicited messages to synchronous clients. */ + if (client->synchronous) + continue; + client->ifdel_cnt++; zsend_interface_delete(client, ifp); } @@ -553,12 +558,17 @@ void zebra_interface_address_add_update(struct interface *ifp, router_id_add_address(ifc); - for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) + for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { + /* Do not send unsolicited messages to synchronous clients. */ + if (client->synchronous) + continue; + if (CHECK_FLAG(ifc->conf, ZEBRA_IFC_REAL)) { client->connected_rt_add_cnt++; zsend_interface_address(ZEBRA_INTERFACE_ADDRESS_ADD, client, ifp, ifc); } + } } /* Interface address deletion. */ @@ -582,12 +592,17 @@ void zebra_interface_address_delete_update(struct interface *ifp, router_id_del_address(ifc); - for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) + for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { + /* Do not send unsolicited messages to synchronous clients. */ + if (client->synchronous) + continue; + if (CHECK_FLAG(ifc->conf, ZEBRA_IFC_REAL)) { client->connected_rt_del_cnt++; zsend_interface_address(ZEBRA_INTERFACE_ADDRESS_DELETE, client, ifp, ifc); } + } } /* Interface VRF change. May need to delete from clients not interested in @@ -604,6 +619,10 @@ void zebra_interface_vrf_update_del(struct interface *ifp, vrf_id_t new_vrf_id) ifp->name, ifp->vrf_id, new_vrf_id); for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { + /* Do not send unsolicited messages to synchronous clients. */ + if (client->synchronous) + continue; + /* Need to delete if the client is not interested in the new * VRF. */ zsend_interface_update(ZEBRA_INTERFACE_DOWN, client, ifp); @@ -627,6 +646,10 @@ void zebra_interface_vrf_update_add(struct interface *ifp, vrf_id_t old_vrf_id) ifp->name, old_vrf_id, ifp->vrf_id); for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { + /* Do not send unsolicited messages to synchronous clients. */ + if (client->synchronous) + continue; + /* Need to add if the client is interested in the new VRF. */ client->ifadd_cnt++; zsend_interface_add(client, ifp); @@ -914,6 +937,11 @@ void zebra_interface_parameters_update(struct interface *ifp) zlog_debug("MESSAGE: ZEBRA_INTERFACE_LINK_PARAMS %s(%u)", ifp->name, ifp->vrf_id); - for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) + for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { + /* Do not send unsolicited messages to synchronous clients. */ + if (client->synchronous) + continue; + zsend_interface_link_params(client, ifp); + } } diff --git a/zebra/rib.h b/zebra/rib.h index 931c97638e..3717a12814 100644 --- a/zebra/rib.h +++ b/zebra/rib.h @@ -107,7 +107,7 @@ struct route_entry { /* Uptime. */ time_t uptime; - /* Type fo this route. */ + /* Type of this route. */ int type; /* VRF identifier. */ @@ -347,10 +347,16 @@ extern int rib_add(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type, struct prefix_ipv6 *src_p, const struct nexthop *nh, uint32_t nhe_id, uint32_t table_id, uint32_t metric, uint32_t mtu, uint8_t distance, route_tag_t tag); - +/* + * Multipath route apis. + */ extern int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, struct prefix_ipv6 *src_p, struct route_entry *re, struct nexthop_group *ng); +extern int rib_add_multipath_nhe(afi_t afi, safi_t safi, struct prefix *p, + struct prefix_ipv6 *src_p, + struct route_entry *re, + struct nhg_hash_entry *nhe); extern void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type, unsigned short instance, int flags, struct prefix *p, diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c index e40bf45f59..b6224b3da9 100644 --- a/zebra/rt_netlink.c +++ b/zebra/rt_netlink.c @@ -718,14 +718,15 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id, if (IS_ZEBRA_DEBUG_KERNEL) { char buf[PREFIX_STRLEN]; char buf2[PREFIX_STRLEN]; - zlog_debug("%s %s%s%s vrf %u(%u) metric: %d Admin Distance: %d", - nl_msg_type_to_str(h->nlmsg_type), - prefix2str(&p, buf, sizeof(buf)), - src_p.prefixlen ? " from " : "", - src_p.prefixlen - ? prefix2str(&src_p, buf2, sizeof(buf2)) - : "", - vrf_id, table, metric, distance); + zlog_debug( + "%s %s%s%s vrf %s(%u) table_id: %u metric: %d Admin Distance: %d", + nl_msg_type_to_str(h->nlmsg_type), + prefix2str(&p, buf, sizeof(buf)), + src_p.prefixlen ? " from " : "", + src_p.prefixlen ? prefix2str(&src_p, buf2, sizeof(buf2)) + : "", + vrf_id_to_name(vrf_id), vrf_id, table, metric, + distance); } afi_t afi = AFI_IP; @@ -911,9 +912,8 @@ static int netlink_route_change_read_multicast(struct nlmsghdr *h, ifp = if_lookup_by_index(iif, vrf); zlog_debug( "MCAST VRF: %s(%d) %s (%s,%s) IIF: %s(%d) OIF: %s jiffies: %lld", - (zvrf ? zvrf->vrf->name : "Unknown"), vrf, - nl_msg_type_to_str(h->nlmsg_type), sbuf, gbuf, - ifp ? ifp->name : "Unknown", iif, oif_list, + zvrf_name(zvrf), vrf, nl_msg_type_to_str(h->nlmsg_type), + sbuf, gbuf, ifp ? ifp->name : "Unknown", iif, oif_list, m->lastused); } return 0; @@ -1111,7 +1111,8 @@ static int build_label_stack(struct mpls_label_stack *nh_label, * @param nlmsg: nlmsghdr structure to fill in. * @param req_size: The size allocated for the message. */ -static void _netlink_route_build_singlepath(const char *routedesc, int bytelen, +static void _netlink_route_build_singlepath(const struct prefix *p, + const char *routedesc, int bytelen, const struct nexthop *nexthop, struct nlmsghdr *nlmsg, struct rtmsg *rtmsg, @@ -1121,9 +1122,12 @@ static void _netlink_route_build_singlepath(const char *routedesc, int bytelen, mpls_lse_t out_lse[MPLS_MAX_LABELS]; char label_buf[256]; int num_labels = 0; + struct vrf *vrf; assert(nexthop); + vrf = vrf_lookup_by_id(nexthop->vrf_id); + /* * label_buf is *only* currently used within debugging. * As such when we assign it we are guarding it inside @@ -1176,10 +1180,10 @@ static void _netlink_route_build_singlepath(const char *routedesc, int bytelen, if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - " 5549: _netlink_route_build_singlepath() (%s): " - "nexthop via %s %s if %u(%u)", - routedesc, ipv4_ll_buf, label_buf, - nexthop->ifindex, nexthop->vrf_id); + " 5549: _netlink_route_build_singlepath() (%s): %pFX nexthop via %s %s if %u vrf %s(%u)", + routedesc, p, ipv4_ll_buf, label_buf, + nexthop->ifindex, VRF_LOGNAME(vrf), + nexthop->vrf_id); return; } @@ -1202,10 +1206,10 @@ static void _netlink_route_build_singlepath(const char *routedesc, int bytelen, if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - "netlink_route_multipath() (%s): " - "nexthop via %s %s if %u(%u)", - routedesc, inet_ntoa(nexthop->gate.ipv4), - label_buf, nexthop->ifindex, nexthop->vrf_id); + "netlink_route_multipath() (%s): %pFX nexthop via %s %s if %u vrf %s(%u)", + routedesc, p, inet_ntoa(nexthop->gate.ipv4), + label_buf, nexthop->ifindex, VRF_LOGNAME(vrf), + nexthop->vrf_id); } if (nexthop->type == NEXTHOP_TYPE_IPV6 @@ -1225,10 +1229,10 @@ static void _netlink_route_build_singlepath(const char *routedesc, int bytelen, if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - "netlink_route_multipath() (%s): " - "nexthop via %s %s if %u(%u)", - routedesc, inet6_ntoa(nexthop->gate.ipv6), - label_buf, nexthop->ifindex, nexthop->vrf_id); + "netlink_route_multipath() (%s): %pFX nexthop via %s %s if %u vrf %s(%u)", + routedesc, p, inet6_ntoa(nexthop->gate.ipv6), + label_buf, nexthop->ifindex, VRF_LOGNAME(vrf), + nexthop->vrf_id); } /* @@ -1251,9 +1255,9 @@ static void _netlink_route_build_singlepath(const char *routedesc, int bytelen, if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - "netlink_route_multipath() (%s): " - "nexthop via if %u(%u)", - routedesc, nexthop->ifindex, nexthop->vrf_id); + "netlink_route_multipath() (%s): %pFX nexthop via if %u vrf %s(%u)", + routedesc, p, nexthop->ifindex, + VRF_LOGNAME(vrf), nexthop->vrf_id); } } @@ -1273,16 +1277,16 @@ static void _netlink_route_build_singlepath(const char *routedesc, int bytelen, * @param src: pointer pointing to a location where * the prefsrc should be stored. */ -static void _netlink_route_build_multipath(const char *routedesc, int bytelen, - const struct nexthop *nexthop, - struct rtattr *rta, - struct rtnexthop *rtnh, - struct rtmsg *rtmsg, - const union g_addr **src) +static void +_netlink_route_build_multipath(const struct prefix *p, const char *routedesc, + int bytelen, const struct nexthop *nexthop, + struct rtattr *rta, struct rtnexthop *rtnh, + struct rtmsg *rtmsg, const union g_addr **src) { mpls_lse_t out_lse[MPLS_MAX_LABELS]; char label_buf[256]; int num_labels = 0; + struct vrf *vrf; rtnh->rtnh_len = sizeof(*rtnh); rtnh->rtnh_flags = 0; @@ -1291,6 +1295,8 @@ static void _netlink_route_build_multipath(const char *routedesc, int bytelen, assert(nexthop); + vrf = vrf_lookup_by_id(nexthop->vrf_id); + /* * label_buf is *only* currently used within debugging. * As such when we assign it we are guarding it inside @@ -1340,6 +1346,8 @@ static void _netlink_route_build_multipath(const char *routedesc, int bytelen, bytelen); rtnh->rtnh_len += sizeof(struct rtattr) + bytelen; rtnh->rtnh_ifindex = nexthop->ifindex; + if (nexthop->weight) + rtnh->rtnh_hops = nexthop->weight - 1; if (nexthop->rmap_src.ipv4.s_addr != INADDR_ANY) *src = &nexthop->rmap_src; @@ -1348,10 +1356,10 @@ static void _netlink_route_build_multipath(const char *routedesc, int bytelen, if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - " 5549: netlink_route_build_multipath() (%s): " - "nexthop via %s %s if %u", - routedesc, ipv4_ll_buf, label_buf, - nexthop->ifindex); + " 5549: netlink_route_build_multipath() (%s): %pFX nexthop via %s %s if %u vrf %s(%u)", + routedesc, p, ipv4_ll_buf, label_buf, + nexthop->ifindex, VRF_LOGNAME(vrf), + nexthop->vrf_id); return; } @@ -1367,10 +1375,10 @@ static void _netlink_route_build_multipath(const char *routedesc, int bytelen, if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - "netlink_route_multipath() (%s): " - "nexthop via %s %s if %u", - routedesc, inet_ntoa(nexthop->gate.ipv4), - label_buf, nexthop->ifindex); + "netlink_route_multipath() (%s): %pFX nexthop via %s %s if %u vrf %s(%u)", + routedesc, p, inet_ntoa(nexthop->gate.ipv4), + label_buf, nexthop->ifindex, VRF_LOGNAME(vrf), + nexthop->vrf_id); } if (nexthop->type == NEXTHOP_TYPE_IPV6 || nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX) { @@ -1385,10 +1393,10 @@ static void _netlink_route_build_multipath(const char *routedesc, int bytelen, if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - "netlink_route_multipath() (%s): " - "nexthop via %s %s if %u", - routedesc, inet6_ntoa(nexthop->gate.ipv6), - label_buf, nexthop->ifindex); + "netlink_route_multipath() (%s): %pFX nexthop via %s %s if %u vrf %s(%u)", + routedesc, p, inet6_ntoa(nexthop->gate.ipv6), + label_buf, nexthop->ifindex, VRF_LOGNAME(vrf), + nexthop->vrf_id); } /* @@ -1408,16 +1416,17 @@ static void _netlink_route_build_multipath(const char *routedesc, int bytelen, if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - "netlink_route_multipath() (%s): " - "nexthop via if %u", - routedesc, nexthop->ifindex); + "netlink_route_multipath() (%s): %pFX nexthop via if %u vrf %s(%u)", + routedesc, p, nexthop->ifindex, + VRF_LOGNAME(vrf), nexthop->vrf_id); } if (nexthop->weight) rtnh->rtnh_hops = nexthop->weight - 1; } -static inline void _netlink_mpls_build_singlepath(const char *routedesc, +static inline void _netlink_mpls_build_singlepath(const struct prefix *p, + const char *routedesc, const zebra_nhlfe_t *nhlfe, struct nlmsghdr *nlmsg, struct rtmsg *rtmsg, @@ -1428,23 +1437,24 @@ static inline void _netlink_mpls_build_singlepath(const char *routedesc, family = NHLFE_FAMILY(nhlfe); bytelen = (family == AF_INET ? 4 : 16); - _netlink_route_build_singlepath(routedesc, bytelen, nhlfe->nexthop, + _netlink_route_build_singlepath(p, routedesc, bytelen, nhlfe->nexthop, nlmsg, rtmsg, req_size, cmd); } static inline void -_netlink_mpls_build_multipath(const char *routedesc, const zebra_nhlfe_t *nhlfe, - struct rtattr *rta, struct rtnexthop *rtnh, - struct rtmsg *rtmsg, const union g_addr **src) +_netlink_mpls_build_multipath(const struct prefix *p, const char *routedesc, + const zebra_nhlfe_t *nhlfe, struct rtattr *rta, + struct rtnexthop *rtnh, struct rtmsg *rtmsg, + const union g_addr **src) { int bytelen; uint8_t family; family = NHLFE_FAMILY(nhlfe); bytelen = (family == AF_INET ? 4 : 16); - _netlink_route_build_multipath(routedesc, bytelen, nhlfe->nexthop, rta, - rtnh, rtmsg, src); + _netlink_route_build_multipath(p, routedesc, bytelen, nhlfe->nexthop, + rta, rtnh, rtmsg, src); } @@ -1464,9 +1474,9 @@ static void _netlink_route_debug(int cmd, const struct prefix *p, if (IS_ZEBRA_DEBUG_KERNEL) { char buf[PREFIX_STRLEN]; zlog_debug( - "netlink_route_multipath(): %s %s vrf %u(%u)", + "netlink_route_multipath(): %s %s vrf %s(%u) table_id: %u", nl_msg_type_to_str(cmd), - prefix2str(p, buf, sizeof(buf)), + prefix2str(p, buf, sizeof(buf)), vrf_id_to_name(vrfid), vrfid, tableid); } } @@ -1518,6 +1528,30 @@ static int netlink_neigh_update(int cmd, int ifindex, uint32_t addr, char *lla, 0); } +static bool nexthop_set_src(const struct nexthop *nexthop, int family, + union g_addr *src) +{ + if (family == AF_INET) { + if (nexthop->rmap_src.ipv4.s_addr != INADDR_ANY) { + src->ipv4 = nexthop->rmap_src.ipv4; + return true; + } else if (nexthop->src.ipv4.s_addr != INADDR_ANY) { + src->ipv4 = nexthop->src.ipv4; + return true; + } + } else if (family == AF_INET6) { + if (!IN6_IS_ADDR_UNSPECIFIED(&nexthop->rmap_src.ipv6)) { + src->ipv6 = nexthop->rmap_src.ipv6; + return true; + } else if (!IN6_IS_ADDR_UNSPECIFIED(&nexthop->src.ipv6)) { + src->ipv6 = nexthop->src.ipv6; + return true; + } + } + + return false; +} + /* * Routing table change via netlink interface, using a dataplane context object */ @@ -1528,7 +1562,7 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) unsigned int nexthop_num; int family; const char *routedesc; - int setsrc = 0; + bool setsrc = false; union g_addr src; const struct prefix *p, *src_p; uint32_t table_id; @@ -1645,8 +1679,29 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) if (kernel_nexthops_supported()) { /* Kernel supports nexthop objects */ + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug( + "netlink_route_multipath(): %pFX nhg_id is %u", + p, dplane_ctx_get_nhe_id(ctx)); addattr32(&req.n, sizeof(req), RTA_NH_ID, dplane_ctx_get_nhe_id(ctx)); + + /* Have to determine src still */ + for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx), nexthop)) { + if (setsrc) + break; + + setsrc = nexthop_set_src(nexthop, family, &src); + } + + if (setsrc) { + if (family == AF_INET) + addattr_l(&req.n, sizeof(req), RTA_PREFSRC, + &src.ipv4, bytelen); + else if (family == AF_INET6) + addattr_l(&req.n, sizeof(req), RTA_PREFSRC, + &src.ipv6, bytelen); + } goto skip; } @@ -1694,32 +1749,8 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) if (setsrc) continue; - if (family == AF_INET) { - if (nexthop->rmap_src.ipv4.s_addr - != 0) { - src.ipv4 = - nexthop->rmap_src.ipv4; - setsrc = 1; - } else if (nexthop->src.ipv4.s_addr - != 0) { - src.ipv4 = - nexthop->src.ipv4; - setsrc = 1; - } - } else if (family == AF_INET6) { - if (!IN6_IS_ADDR_UNSPECIFIED( - &nexthop->rmap_src.ipv6)) { - src.ipv6 = - nexthop->rmap_src.ipv6; - setsrc = 1; - } else if ( - !IN6_IS_ADDR_UNSPECIFIED( - &nexthop->src.ipv6)) { - src.ipv6 = - nexthop->src.ipv6; - setsrc = 1; - } - } + setsrc = nexthop_set_src(nexthop, family, &src); + continue; } @@ -1730,13 +1761,13 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) : "single-path"; _netlink_route_build_singlepath( - routedesc, bytelen, nexthop, &req.n, + p, routedesc, bytelen, nexthop, &req.n, &req.r, sizeof(req), cmd); nexthop_num++; break; } } - if (setsrc && (cmd == RTM_NEWROUTE)) { + if (setsrc) { if (family == AF_INET) addattr_l(&req.n, sizeof(req), RTA_PREFSRC, &src.ipv4, bytelen); @@ -1762,32 +1793,7 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) if (setsrc) continue; - if (family == AF_INET) { - if (nexthop->rmap_src.ipv4.s_addr - != 0) { - src.ipv4 = - nexthop->rmap_src.ipv4; - setsrc = 1; - } else if (nexthop->src.ipv4.s_addr - != 0) { - src.ipv4 = - nexthop->src.ipv4; - setsrc = 1; - } - } else if (family == AF_INET6) { - if (!IN6_IS_ADDR_UNSPECIFIED( - &nexthop->rmap_src.ipv6)) { - src.ipv6 = - nexthop->rmap_src.ipv6; - setsrc = 1; - } else if ( - !IN6_IS_ADDR_UNSPECIFIED( - &nexthop->src.ipv6)) { - src.ipv6 = - nexthop->src.ipv6; - setsrc = 1; - } - } + setsrc = nexthop_set_src(nexthop, family, &src); continue; } @@ -1800,8 +1806,8 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) nexthop_num++; _netlink_route_build_multipath( - routedesc, bytelen, nexthop, rta, rtnh, - &req.r, &src1); + p, routedesc, bytelen, nexthop, rta, + rtnh, &req.r, &src1); rtnh = RTNH_NEXT(rtnh); if (!setsrc && src1) { @@ -1814,7 +1820,7 @@ static int netlink_route_multipath(int cmd, struct zebra_dplane_ctx *ctx) } } } - if (setsrc && (cmd == RTM_NEWROUTE)) { + if (setsrc) { if (family == AF_INET) addattr_l(&req.n, sizeof(req), RTA_PREFSRC, &src.ipv4, bytelen); @@ -1989,6 +1995,12 @@ static int netlink_nexthop(int cmd, struct zebra_dplane_ctx *ctx) addattr32(&req.n, req_size, NHA_ID, id); if (cmd == RTM_NEWNEXTHOP) { + /* + * We distinguish between a "group", which is a collection + * of ids, and a singleton nexthop with an id. The + * group is installed as an id that just refers to a list of + * other ids. + */ if (dplane_ctx_get_nhe_nh_grp_count(ctx)) _netlink_nexthop_build_group( &req.n, req_size, id, @@ -2075,14 +2087,13 @@ static int netlink_nexthop(int cmd, struct zebra_dplane_ctx *ctx) } } - nexthop_done: - if (IS_ZEBRA_DEBUG_KERNEL) { - char buf[NEXTHOP_STRLEN]; +nexthop_done: - snprintfrr(buf, sizeof(buf), "%pNHv", nh); - zlog_debug("%s: ID (%u): %s (%u) %s ", __func__, - id, buf, nh->vrf_id, label_buf); - } + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug("%s: ID (%u): %pNHv vrf %s(%u) %s ", + __func__, id, nh, + vrf_id_to_name(nh->vrf_id), + nh->vrf_id, label_buf); } req.nhm.nh_protocol = zebra2proto(dplane_ctx_get_nhe_type(ctx)); @@ -2110,43 +2121,19 @@ static int netlink_nexthop(int cmd, struct zebra_dplane_ctx *ctx) */ enum zebra_dplane_result kernel_nexthop_update(struct zebra_dplane_ctx *ctx) { + enum dplane_op_e op; int cmd = 0; int ret = 0; - switch (dplane_ctx_get_op(ctx)) { - case DPLANE_OP_NH_DELETE: - cmd = RTM_DELNEXTHOP; - break; - case DPLANE_OP_NH_INSTALL: - case DPLANE_OP_NH_UPDATE: + op = dplane_ctx_get_op(ctx); + if (op == DPLANE_OP_NH_INSTALL || op == DPLANE_OP_NH_UPDATE) cmd = RTM_NEWNEXTHOP; - break; - case DPLANE_OP_ROUTE_INSTALL: - case DPLANE_OP_ROUTE_UPDATE: - case DPLANE_OP_ROUTE_DELETE: - case DPLANE_OP_ROUTE_NOTIFY: - case DPLANE_OP_LSP_INSTALL: - case DPLANE_OP_LSP_UPDATE: - case DPLANE_OP_LSP_DELETE: - case DPLANE_OP_LSP_NOTIFY: - case DPLANE_OP_PW_INSTALL: - case DPLANE_OP_PW_UNINSTALL: - case DPLANE_OP_SYS_ROUTE_ADD: - case DPLANE_OP_SYS_ROUTE_DELETE: - case DPLANE_OP_ADDR_INSTALL: - case DPLANE_OP_ADDR_UNINSTALL: - case DPLANE_OP_MAC_INSTALL: - case DPLANE_OP_MAC_DELETE: - case DPLANE_OP_NEIGH_INSTALL: - case DPLANE_OP_NEIGH_UPDATE: - case DPLANE_OP_NEIGH_DELETE: - case DPLANE_OP_VTEP_ADD: - case DPLANE_OP_VTEP_DELETE: - case DPLANE_OP_NONE: - flog_err( - EC_ZEBRA_NHG_FIB_UPDATE, - "Context received for kernel nexthop update with incorrect OP code (%u)", - dplane_ctx_get_op(ctx)); + else if (op == DPLANE_OP_NH_DELETE) + cmd = RTM_DELNEXTHOP; + else { + flog_err(EC_ZEBRA_NHG_FIB_UPDATE, + "Context received for kernel nexthop update with incorrect OP code (%u)", + op); return ZEBRA_DPLANE_REQUEST_FAILURE; } @@ -2535,12 +2522,28 @@ int kernel_neigh_update(int add, int ifindex, uint32_t addr, char *lla, addr, lla, llalen, ns_id); } -/* - * Add remote VTEP to the flood list for this VxLAN interface (VNI). This - * is done by adding an FDB entry with a MAC of 00:00:00:00:00:00. +/** + * netlink_update_neigh_ctx_internal() - Common helper api for evpn + * neighbor updates using dataplane context object. + * @ctx: Dataplane context + * @cmd: Netlink command (RTM_NEWNEIGH or RTM_DELNEIGH) + * @mac: A neighbor cache link layer address + * @ip: A neighbor cache n/w layer destination address + * @replace_obj: Whether NEW request should replace existing object or + * add to the end of the list + * @family: AF_* netlink family + * @type: RTN_* route type + * @flags: NTF_* flags + * @state: NUD_* states + * + * Return: Result status */ -static int netlink_vxlan_flood_update_ctx(const struct zebra_dplane_ctx *ctx, - int cmd) +static int netlink_update_neigh_ctx_internal(const struct zebra_dplane_ctx *ctx, + int cmd, const struct ethaddr *mac, + const struct ipaddr *ip, + bool replace_obj, uint8_t family, + uint8_t type, uint8_t flags, + uint16_t state) { uint8_t protocol = RTPROT_ZEBRA; struct { @@ -2548,34 +2551,62 @@ static int netlink_vxlan_flood_update_ctx(const struct zebra_dplane_ctx *ctx, struct ndmsg ndm; char buf[256]; } req; - uint8_t dst_mac[6] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; - const struct ipaddr *addr; + int ipa_len; + enum dplane_op_e op; memset(&req, 0, sizeof(req)); + op = dplane_ctx_get_op(ctx); + req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg)); req.n.nlmsg_flags = NLM_F_REQUEST; if (cmd == RTM_NEWNEIGH) - req.n.nlmsg_flags |= (NLM_F_CREATE | NLM_F_APPEND); + req.n.nlmsg_flags |= + NLM_F_CREATE + | (replace_obj ? NLM_F_REPLACE : NLM_F_APPEND); req.n.nlmsg_type = cmd; - req.ndm.ndm_family = PF_BRIDGE; - req.ndm.ndm_state = NUD_NOARP | NUD_PERMANENT; - req.ndm.ndm_flags |= NTF_SELF; /* Handle by "self", not "master" */ - + req.ndm.ndm_family = family; + req.ndm.ndm_type = type; + req.ndm.ndm_state = state; + req.ndm.ndm_flags = flags; + req.ndm.ndm_ifindex = dplane_ctx_get_ifindex(ctx); addattr_l(&req.n, sizeof(req), NDA_PROTOCOL, &protocol, sizeof(protocol)); - addattr_l(&req.n, sizeof(req), NDA_LLADDR, &dst_mac, 6); - req.ndm.ndm_ifindex = dplane_ctx_get_ifindex(ctx); + if (mac) + addattr_l(&req.n, sizeof(req), NDA_LLADDR, mac, 6); - addr = dplane_ctx_neigh_get_ipaddr(ctx); + ipa_len = IS_IPADDR_V4(ip) ? IPV4_MAX_BYTELEN : IPV6_MAX_BYTELEN; + addattr_l(&req.n, sizeof(req), NDA_DST, &ip->ip.addr, ipa_len); + + if (op == DPLANE_OP_MAC_INSTALL || op == DPLANE_OP_MAC_DELETE) { + vlanid_t vid = dplane_ctx_mac_get_vlan(ctx); - addattr_l(&req.n, sizeof(req), NDA_DST, &(addr->ipaddr_v4), 4); + if (vid > 0) + addattr16(&req.n, sizeof(req), NDA_VLAN, vid); + + addattr32(&req.n, sizeof(req), NDA_MASTER, + dplane_ctx_mac_get_br_ifindex(ctx)); + } return netlink_talk_info(netlink_talk_filter, &req.n, dplane_ctx_get_ns(ctx), 0); } +/* + * Add remote VTEP to the flood list for this VxLAN interface (VNI). This + * is done by adding an FDB entry with a MAC of 00:00:00:00:00:00. + */ +static int netlink_vxlan_flood_update_ctx(const struct zebra_dplane_ctx *ctx, + int cmd) +{ + struct ethaddr dst_mac = {.octet = {0}}; + + return netlink_update_neigh_ctx_internal( + ctx, cmd, &dst_mac, dplane_ctx_neigh_get_ipaddr(ctx), false, + PF_BRIDGE, 0, NTF_SELF, (NUD_NOARP | NUD_PERMANENT)); +} + #ifndef NDA_RTA #define NDA_RTA(r) \ ((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct ndmsg)))) @@ -2675,7 +2706,7 @@ static int netlink_macfdb_change(struct nlmsghdr *h, int len, ns_id_t ns_id) if (filter_vlan && vid != filter_vlan) { if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("\tFiltered due to filter vlan: %d", + zlog_debug(" Filtered due to filter vlan: %d", filter_vlan); return 0; } @@ -2689,8 +2720,9 @@ static int netlink_macfdb_change(struct nlmsghdr *h, int len, ns_id_t ns_id) /* Drop "permanent" entries. */ if (ndm->ndm_state & NUD_PERMANENT) { if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("\tDropping entry because of NUD_PERMANENT"); - return 0; + zlog_debug( + " Dropping entry because of NUD_PERMANENT"); + return 0; } if (IS_ZEBRA_IF_VXLAN(ifp)) @@ -2862,10 +2894,12 @@ static int netlink_request_specific_mac_in_bridge(struct zebra_ns *zns, addattr32(&req.n, sizeof(req), NDA_MASTER, br_if->ifindex); if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("%s: Tx family %s IF %s(%u) MAC %s vid %u", __func__, - nl_family_to_str(req.ndm.ndm_family), br_if->name, - br_if->ifindex, - prefix_mac2str(mac, buf, sizeof(buf)), vid); + zlog_debug( + "%s: Tx family %s IF %s(%u) vrf %s(%u) MAC %s vid %u", + __func__, nl_family_to_str(req.ndm.ndm_family), + br_if->name, br_if->ifindex, + vrf_id_to_name(br_if->vrf_id), br_if->vrf_id, + prefix_mac2str(mac, buf, sizeof(buf)), vid); return netlink_request(&zns->netlink_cmd, &req.n); } @@ -2896,92 +2930,50 @@ int netlink_macfdb_read_specific_mac(struct zebra_ns *zns, /* * Netlink-specific handler for MAC updates using dataplane context object. */ -static enum zebra_dplane_result -netlink_macfdb_update_ctx(struct zebra_dplane_ctx *ctx) +static int netlink_macfdb_update_ctx(struct zebra_dplane_ctx *ctx, int cmd) { - uint8_t protocol = RTPROT_ZEBRA; - struct { - struct nlmsghdr n; - struct ndmsg ndm; - char buf[256]; - } req; - int ret; - int dst_alen; - int vid_present = 0; - int cmd; - struct in_addr vtep_ip; + struct ipaddr vtep_ip; vlanid_t vid; + uint8_t flags; + uint16_t state; - if (dplane_ctx_get_op(ctx) == DPLANE_OP_MAC_INSTALL) - cmd = RTM_NEWNEIGH; - else - cmd = RTM_DELNEIGH; - - memset(&req, 0, sizeof(req)); - - req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg)); - req.n.nlmsg_flags = NLM_F_REQUEST; - if (cmd == RTM_NEWNEIGH) - req.n.nlmsg_flags |= (NLM_F_CREATE | NLM_F_REPLACE); - req.n.nlmsg_type = cmd; - req.ndm.ndm_family = AF_BRIDGE; - req.ndm.ndm_flags |= NTF_SELF | NTF_MASTER; - req.ndm.ndm_state = NUD_REACHABLE; + flags = (NTF_SELF | NTF_MASTER); + state = NUD_REACHABLE; if (dplane_ctx_mac_is_sticky(ctx)) - req.ndm.ndm_state |= NUD_NOARP; + state |= NUD_NOARP; else - req.ndm.ndm_flags |= NTF_EXT_LEARNED; - - addattr_l(&req.n, sizeof(req), - NDA_PROTOCOL, &protocol, sizeof(protocol)); - addattr_l(&req.n, sizeof(req), NDA_LLADDR, - dplane_ctx_mac_get_addr(ctx), 6); - req.ndm.ndm_ifindex = dplane_ctx_get_ifindex(ctx); - - dst_alen = 4; // TODO: hardcoded - vtep_ip = *(dplane_ctx_mac_get_vtep_ip(ctx)); - addattr_l(&req.n, sizeof(req), NDA_DST, &vtep_ip, dst_alen); + flags |= NTF_EXT_LEARNED; - vid = dplane_ctx_mac_get_vlan(ctx); - - if (vid > 0) { - addattr16(&req.n, sizeof(req), NDA_VLAN, vid); - vid_present = 1; - } - addattr32(&req.n, sizeof(req), NDA_MASTER, - dplane_ctx_mac_get_br_ifindex(ctx)); + vtep_ip.ipaddr_v4 = *(dplane_ctx_mac_get_vtep_ip(ctx)); + SET_IPADDR_V4(&vtep_ip); if (IS_ZEBRA_DEBUG_KERNEL) { char ipbuf[PREFIX_STRLEN]; char buf[ETHER_ADDR_STRLEN]; - char dst_buf[PREFIX_STRLEN + 10]; char vid_buf[20]; - if (vid_present) + vid = dplane_ctx_mac_get_vlan(ctx); + if (vid > 0) snprintf(vid_buf, sizeof(vid_buf), " VLAN %u", vid); else vid_buf[0] = '\0'; - inet_ntop(AF_INET, &vtep_ip, ipbuf, sizeof(ipbuf)); - snprintf(dst_buf, sizeof(dst_buf), " dst %s", ipbuf); - prefix_mac2str(dplane_ctx_mac_get_addr(ctx), buf, sizeof(buf)); + const struct ethaddr *mac = dplane_ctx_mac_get_addr(ctx); - zlog_debug("Tx %s family %s IF %s(%u)%s %sMAC %s%s", - nl_msg_type_to_str(cmd), - nl_family_to_str(req.ndm.ndm_family), + zlog_debug("Tx %s family %s IF %s(%u)%s %sMAC %s dst %s", + nl_msg_type_to_str(cmd), nl_family_to_str(AF_BRIDGE), dplane_ctx_get_ifname(ctx), dplane_ctx_get_ifindex(ctx), vid_buf, dplane_ctx_mac_is_sticky(ctx) ? "sticky " : "", - buf, dst_buf); + prefix_mac2str(mac, buf, sizeof(buf)), + ipaddr2str(&vtep_ip, ipbuf, sizeof(ipbuf))); } - ret = netlink_talk_info(netlink_talk_filter, &req.n, - dplane_ctx_get_ns(ctx), 0); - if (ret == 0) - return ZEBRA_DPLANE_REQUEST_SUCCESS; - else - return ZEBRA_DPLANE_REQUEST_FAILURE; + return netlink_update_neigh_ctx_internal( + ctx, cmd, dplane_ctx_mac_get_addr(ctx), + dplane_ctx_neigh_get_ipaddr(ctx), true, AF_BRIDGE, 0, flags, + state); } /* @@ -3023,6 +3015,7 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) struct interface *link_if; struct ethaddr mac; struct ipaddr ip; + struct vrf *vrf; char buf[ETHER_ADDR_STRLEN]; char buf2[INET6_ADDRSTRLEN]; int mac_present = 0; @@ -3037,6 +3030,7 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) if (!ifp || !ifp->info) return 0; + vrf = vrf_lookup_by_id(ifp->vrf_id); zif = (struct zebra_if *)ifp->info; /* Parse attributes and extract fields of interest. */ @@ -3044,10 +3038,10 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) netlink_parse_rtattr(tb, NDA_MAX, NDA_RTA(ndm), len); if (!tb[NDA_DST]) { - zlog_debug("%s family %s IF %s(%u) - no DST", + zlog_debug("%s family %s IF %s(%u) vrf %s(%u) - no DST", nl_msg_type_to_str(h->nlmsg_type), nl_family_to_str(ndm->ndm_family), ifp->name, - ndm->ndm_ifindex); + ndm->ndm_ifindex, VRF_LOGNAME(vrf), ifp->vrf_id); return 0; } @@ -3099,12 +3093,13 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) if (RTA_PAYLOAD(tb[NDA_LLADDR]) != ETH_ALEN) { if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - "%s family %s IF %s(%u) - LLADDR is not MAC, len %lu", + "%s family %s IF %s(%u) vrf %s(%u) - LLADDR is not MAC, len %lu", nl_msg_type_to_str( h->nlmsg_type), nl_family_to_str( ndm->ndm_family), ifp->name, ndm->ndm_ifindex, + VRF_LOGNAME(vrf), ifp->vrf_id, (unsigned long)RTA_PAYLOAD( tb[NDA_LLADDR])); return 0; @@ -3119,10 +3114,10 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - "Rx %s family %s IF %s(%u) IP %s MAC %s state 0x%x flags 0x%x", + "Rx %s family %s IF %s(%u) vrf %s(%u) IP %s MAC %s state 0x%x flags 0x%x", nl_msg_type_to_str(h->nlmsg_type), nl_family_to_str(ndm->ndm_family), ifp->name, - ndm->ndm_ifindex, + ndm->ndm_ifindex, VRF_LOGNAME(vrf), ifp->vrf_id, ipaddr2str(&ip, buf2, sizeof(buf2)), mac_present ? prefix_mac2str(&mac, buf, sizeof(buf)) @@ -3144,10 +3139,10 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) } if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("Rx %s family %s IF %s(%u) IP %s", + zlog_debug("Rx %s family %s IF %s(%u) vrf %s(%u) IP %s", nl_msg_type_to_str(h->nlmsg_type), nl_family_to_str(ndm->ndm_family), ifp->name, - ndm->ndm_ifindex, + ndm->ndm_ifindex, VRF_LOGNAME(vrf), ifp->vrf_id, ipaddr2str(&ip, buf2, sizeof(buf2))); /* Process the delete - it may result in re-adding the neighbor if it is @@ -3301,9 +3296,10 @@ int netlink_neigh_read_specific_ip(struct ipaddr *ip, zebra_dplane_info_from_zns(&dp_info, zns, true /*is_cmd*/); if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("%s: neigh request IF %s(%u) IP %s vrf_id %u", + zlog_debug("%s: neigh request IF %s(%u) IP %s vrf %s(%u)", __func__, vlan_if->name, vlan_if->ifindex, - ipaddr2str(ip, buf, sizeof(buf)), vlan_if->vrf_id); + ipaddr2str(ip, buf, sizeof(buf)), + vrf_id_to_name(vlan_if->vrf_id), vlan_if->vrf_id); ret = netlink_request_specific_neigh_in_vlan(zns, RTM_GETNEIGH, ip, vlan_if->ifindex); @@ -3361,21 +3357,11 @@ int netlink_neigh_change(struct nlmsghdr *h, ns_id_t ns_id) static int netlink_neigh_update_ctx(const struct zebra_dplane_ctx *ctx, int cmd) { - uint8_t protocol = RTPROT_ZEBRA; - struct { - struct nlmsghdr n; - struct ndmsg ndm; - char buf[256]; - } req; - int ipa_len; - char buf[INET6_ADDRSTRLEN]; - char buf2[ETHER_ADDR_STRLEN]; const struct ipaddr *ip; const struct ethaddr *mac; uint8_t flags; uint16_t state; - - memset(&req, 0, sizeof(req)); + uint8_t family; ip = dplane_ctx_neigh_get_ipaddr(ctx); mac = dplane_ctx_neigh_get_mac(ctx); @@ -3385,37 +3371,23 @@ static int netlink_neigh_update_ctx(const struct zebra_dplane_ctx *ctx, flags = neigh_flags_to_netlink(dplane_ctx_neigh_get_flags(ctx)); state = neigh_state_to_netlink(dplane_ctx_neigh_get_state(ctx)); - req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg)); - req.n.nlmsg_flags = NLM_F_REQUEST; - if (cmd == RTM_NEWNEIGH) - req.n.nlmsg_flags |= (NLM_F_CREATE | NLM_F_REPLACE); - req.n.nlmsg_type = cmd; // RTM_NEWNEIGH or RTM_DELNEIGH - req.ndm.ndm_family = IS_IPADDR_V4(ip) ? AF_INET : AF_INET6; - req.ndm.ndm_state = state; - req.ndm.ndm_ifindex = dplane_ctx_get_ifindex(ctx); - req.ndm.ndm_type = RTN_UNICAST; - req.ndm.ndm_flags = flags; + family = IS_IPADDR_V4(ip) ? AF_INET : AF_INET6; - addattr_l(&req.n, sizeof(req), - NDA_PROTOCOL, &protocol, sizeof(protocol)); - ipa_len = IS_IPADDR_V4(ip) ? IPV4_MAX_BYTELEN : IPV6_MAX_BYTELEN; - addattr_l(&req.n, sizeof(req), NDA_DST, &ip->ip.addr, ipa_len); - if (mac) - addattr_l(&req.n, sizeof(req), NDA_LLADDR, mac, 6); + if (IS_ZEBRA_DEBUG_KERNEL) { + char buf[INET6_ADDRSTRLEN]; + char buf2[ETHER_ADDR_STRLEN]; - if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("Tx %s family %s IF %s(%u) Neigh %s MAC %s flags 0x%x state 0x%x", - nl_msg_type_to_str(cmd), - nl_family_to_str(req.ndm.ndm_family), - dplane_ctx_get_ifname(ctx), - dplane_ctx_get_ifindex(ctx), - ipaddr2str(ip, buf, sizeof(buf)), - mac ? prefix_mac2str(mac, buf2, sizeof(buf2)) - : "null", - flags, state); + zlog_debug( + "Tx %s family %s IF %s(%u) Neigh %s MAC %s flags 0x%x state 0x%x", + nl_msg_type_to_str(cmd), nl_family_to_str(family), + dplane_ctx_get_ifname(ctx), dplane_ctx_get_ifindex(ctx), + ipaddr2str(ip, buf, sizeof(buf)), + mac ? prefix_mac2str(mac, buf2, sizeof(buf2)) : "null", + flags, state); + } - return netlink_talk_info(netlink_talk_filter, &req.n, - dplane_ctx_get_ns(ctx), 0); + return netlink_update_neigh_ctx_internal( + ctx, cmd, mac, ip, true, family, RTN_UNICAST, flags, state); } /* @@ -3423,7 +3395,13 @@ static int netlink_neigh_update_ctx(const struct zebra_dplane_ctx *ctx, */ enum zebra_dplane_result kernel_mac_update_ctx(struct zebra_dplane_ctx *ctx) { - return netlink_macfdb_update_ctx(ctx); + int cmd = dplane_ctx_get_op(ctx) == DPLANE_OP_MAC_INSTALL + ? RTM_NEWNEIGH + : RTM_DELNEIGH; + int ret = netlink_macfdb_update_ctx(ctx, cmd); + + return (ret == 0 ? ZEBRA_DPLANE_REQUEST_SUCCESS + : ZEBRA_DPLANE_REQUEST_FAILURE); } enum zebra_dplane_result kernel_neigh_update_ctx(struct zebra_dplane_ctx *ctx) @@ -3464,6 +3442,7 @@ int netlink_mpls_multipath(int cmd, struct zebra_dplane_ctx *ctx) unsigned int nexthop_num; const char *routedesc; int route_type; + struct prefix p = {0}; struct { struct nlmsghdr n; @@ -3550,8 +3529,7 @@ int netlink_mpls_multipath(int cmd, struct zebra_dplane_ctx *ctx) NEXTHOP_FLAG_FIB)))) { /* Add the gateway */ _netlink_mpls_build_singlepath( - routedesc, nhlfe, - &req.n, &req.r, + &p, routedesc, nhlfe, &req.n, &req.r, sizeof(req), cmd); nexthop_num++; @@ -3591,9 +3569,9 @@ int netlink_mpls_multipath(int cmd, struct zebra_dplane_ctx *ctx) nexthop_num++; /* Build the multipath */ - _netlink_mpls_build_multipath(routedesc, nhlfe, - rta, rtnh, &req.r, - &src1); + _netlink_mpls_build_multipath(&p, routedesc, + nhlfe, rta, rtnh, + &req.r, &src1); rtnh = RTNH_NEXT(rtnh); } } diff --git a/zebra/rtadv.c b/zebra/rtadv.c index 60ac471b5a..a22e39dc48 100644 --- a/zebra/rtadv.c +++ b/zebra/rtadv.c @@ -204,9 +204,12 @@ static void rtadv_send_packet(int sock, struct interface *ifp, } /* Logging of packet. */ - if (IS_ZEBRA_DEBUG_PACKET) - zlog_debug("%s(%u): Tx RA, socket %u", ifp->name, ifp->ifindex, - sock); + if (IS_ZEBRA_DEBUG_PACKET) { + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + + zlog_debug("%s(%s:%u): Tx RA, socket %u", ifp->name, + VRF_LOGNAME(vrf), ifp->ifindex, sock); + } /* Fill in sockaddr_in6. */ memset(&addr, 0, sizeof(struct sockaddr_in6)); @@ -333,16 +336,6 @@ static void rtadv_send_packet(int sock, struct interface *ifp, IPV6_ADDR_COPY(&pinfo->nd_opt_pi_prefix, &rprefix->prefix.prefix); -#ifdef DEBUG - { - uint8_t buf[INET6_ADDRSTRLEN]; - - zlog_debug("DEBUG %s", - inet_ntop(AF_INET6, &pinfo->nd_opt_pi_prefix, - buf, INET6_ADDRSTRLEN)); - } -#endif /* DEBUG */ - len += sizeof(struct nd_opt_prefix_info); } @@ -388,9 +381,11 @@ static void rtadv_send_packet(int sock, struct interface *ifp, sizeof(struct nd_opt_rdnss) + sizeof(struct in6_addr); if (len + opt_len > max_len) { + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + zlog_warn( - "%s(%u): Tx RA: RDNSS option would exceed MTU, omitting it", - ifp->name, ifp->ifindex); + "%s(%s:%u): Tx RA: RDNSS option would exceed MTU, omitting it", + ifp->name, VRF_LOGNAME(vrf), ifp->ifindex); goto no_more_opts; } struct nd_opt_rdnss *opt = (struct nd_opt_rdnss *)(buf + len); @@ -510,10 +505,17 @@ static int rtadv_timer(struct thread *thread) <= 0) zif->rtadv.inFastRexmit = 0; - if (IS_ZEBRA_DEBUG_SEND) + if (IS_ZEBRA_DEBUG_SEND) { + struct vrf *vrf = + vrf_lookup_by_id( + ifp->vrf_id); + zlog_debug( - "Fast RA Rexmit on interface %s", - ifp->name); + "Fast RA Rexmit on interface %s(%s:%u)", + ifp->name, + VRF_LOGNAME(vrf), + ifp->ifindex); + } rtadv_send_packet(rtadv_get_socket(zvrf), ifp, RA_ENABLE); @@ -612,9 +614,14 @@ static void rtadv_process_advert(uint8_t *msg, unsigned int len, inet_ntop(AF_INET6, &addr->sin6_addr, addr_str, INET6_ADDRSTRLEN); if (len < sizeof(struct nd_router_advert)) { - if (IS_ZEBRA_DEBUG_PACKET) - zlog_debug("%s(%u): Rx RA with invalid length %d from %s", - ifp->name, ifp->ifindex, len, addr_str); + if (IS_ZEBRA_DEBUG_PACKET) { + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + + zlog_debug( + "%s(%s:%u): Rx RA with invalid length %d from %s", + ifp->name, VRF_LOGNAME(vrf), ifp->ifindex, len, + addr_str); + } return; } @@ -622,9 +629,14 @@ static void rtadv_process_advert(uint8_t *msg, unsigned int len, rtadv_process_optional(msg + sizeof(struct nd_router_advert), len - sizeof(struct nd_router_advert), ifp, addr); - if (IS_ZEBRA_DEBUG_PACKET) - zlog_debug("%s(%u): Rx RA with non-linklocal source address from %s", - ifp->name, ifp->ifindex, addr_str); + if (IS_ZEBRA_DEBUG_PACKET) { + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + + zlog_debug( + "%s(%s:%u): Rx RA with non-linklocal source address from %s", + ifp->name, VRF_LOGNAME(vrf), ifp->ifindex, + addr_str); + } return; } @@ -703,9 +715,12 @@ static void rtadv_process_packet(uint8_t *buf, unsigned int len, return; } - if (IS_ZEBRA_DEBUG_PACKET) - zlog_debug("%s(%u): Rx RA/RS len %d from %s", ifp->name, - ifp->ifindex, len, addr_str); + if (IS_ZEBRA_DEBUG_PACKET) { + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + + zlog_debug("%s(%s:%u): Rx RA/RS len %d from %s", ifp->name, + VRF_LOGNAME(vrf), ifp->ifindex, len, addr_str); + } if (if_is_loopback(ifp) || CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_VRF_LOOPBACK)) @@ -718,8 +733,11 @@ static void rtadv_process_packet(uint8_t *buf, unsigned int len, /* ICMP message length check. */ if (len < sizeof(struct icmp6_hdr)) { - zlog_debug("%s(%u): Rx RA with Invalid ICMPV6 packet length %d", - ifp->name, ifp->ifindex, len); + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + + zlog_debug( + "%s(%s:%u): Rx RA with Invalid ICMPV6 packet length %d", + ifp->name, VRF_LOGNAME(vrf), ifp->ifindex, len); return; } @@ -728,15 +746,20 @@ static void rtadv_process_packet(uint8_t *buf, unsigned int len, /* ICMP message type check. */ if (icmph->icmp6_type != ND_ROUTER_SOLICIT && icmph->icmp6_type != ND_ROUTER_ADVERT) { - zlog_debug("%s(%u): Rx RA - Unwanted ICMPV6 message type %d", - ifp->name, ifp->ifindex, icmph->icmp6_type); + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + + zlog_debug("%s(%s:%u): Rx RA - Unwanted ICMPV6 message type %d", + ifp->name, VRF_LOGNAME(vrf), ifp->ifindex, + icmph->icmp6_type); return; } /* Hoplimit check. */ if (hoplimit >= 0 && hoplimit != 255) { - zlog_debug("%s(%u): Rx RA - Invalid hoplimit %d", ifp->name, - ifp->ifindex, hoplimit); + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + + zlog_debug("%s(%s:%u): Rx RA - Invalid hoplimit %d", ifp->name, + VRF_LOGNAME(vrf), ifp->ifindex, hoplimit); return; } @@ -1055,25 +1078,34 @@ static void zebra_interface_radv_set(ZAPI_HANDLER_ARGS, int enable) unsigned int ra_interval = ra_interval_rxd; - if (IS_ZEBRA_DEBUG_EVENT) - zlog_debug("%u: IF %u RA %s from client %s, interval %ums", - zvrf_id(zvrf), ifindex, + if (IS_ZEBRA_DEBUG_EVENT) { + struct vrf *vrf = zvrf->vrf; + + zlog_debug("%s:%u: IF %u RA %s from client %s, interval %ums", + VRF_LOGNAME(vrf), zvrf_id(zvrf), ifindex, enable ? "enable" : "disable", zebra_route_string(client->proto), ra_interval); + } /* Locate interface and check VRF match. */ ifp = if_lookup_by_index(ifindex, zvrf->vrf->vrf_id); if (!ifp) { + struct vrf *vrf = zvrf->vrf; + flog_warn(EC_ZEBRA_UNKNOWN_INTERFACE, - "%u: IF %u RA %s client %s - interface unknown", - zvrf_id(zvrf), ifindex, enable ? "enable" : "disable", + "%s:%u: IF %u RA %s client %s - interface unknown", + VRF_LOGNAME(vrf), zvrf_id(zvrf), ifindex, + enable ? "enable" : "disable", zebra_route_string(client->proto)); return; } if (ifp->vrf_id != zvrf_id(zvrf)) { + struct vrf *vrf = zvrf->vrf; + zlog_debug( - "%u: IF %u RA %s client %s - VRF mismatch, IF VRF %u", - zvrf_id(zvrf), ifindex, enable ? "enable" : "disable", + "%s:%u: IF %u RA %s client %s - VRF mismatch, IF VRF %u", + VRF_LOGNAME(vrf), zvrf_id(zvrf), ifindex, + enable ? "enable" : "disable", zebra_route_string(client->proto), ifp->vrf_id); return; } @@ -2329,6 +2361,13 @@ static void rtadv_event(struct zebra_vrf *zvrf, enum rtadv_event event, int val) { struct rtadv *rtadv = &zvrf->rtadv; + if (IS_ZEBRA_DEBUG_EVENT) { + struct vrf *vrf = zvrf->vrf; + + zlog_debug("%s(%s) with event: %d and val: %d", __func__, + VRF_LOGNAME(vrf), event, val); + } + switch (event) { case RTADV_START: thread_add_read(zrouter.master, rtadv_read, zvrf, val, @@ -2371,20 +2410,26 @@ void rtadv_init(struct zebra_vrf *zvrf) } } -void rtadv_terminate(struct zebra_vrf *zvrf) +void rtadv_vrf_terminate(struct zebra_vrf *zvrf) { rtadv_event(zvrf, RTADV_STOP, 0); if (zvrf->rtadv.sock >= 0) { close(zvrf->rtadv.sock); zvrf->rtadv.sock = -1; - } else if (zrouter.rtadv_sock >= 0) { - close(zrouter.rtadv_sock); - zrouter.rtadv_sock = -1; } + zvrf->rtadv.adv_if_count = 0; zvrf->rtadv.adv_msec_if_count = 0; } +void rtadv_terminate(void) +{ + if (zrouter.rtadv_sock >= 0) { + close(zrouter.rtadv_sock); + zrouter.rtadv_sock = -1; + } +} + void rtadv_cmd_init(void) { hook_register(zebra_if_extra_info, nd_dump_vty); @@ -2445,10 +2490,13 @@ static int if_join_all_router(int sock, struct interface *ifp) ifp->name, ifp->ifindex, sock, safe_strerror(errno)); - if (IS_ZEBRA_DEBUG_EVENT) + if (IS_ZEBRA_DEBUG_EVENT) { + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + zlog_debug( - "%s(%u): Join All-Routers multicast group, socket %u", - ifp->name, ifp->ifindex, sock); + "%s(%s:%u): Join All-Routers multicast group, socket %u", + ifp->name, VRF_LOGNAME(vrf), ifp->ifindex, sock); + } return 0; } @@ -2465,17 +2513,22 @@ static int if_leave_all_router(int sock, struct interface *ifp) ret = setsockopt(sock, IPPROTO_IPV6, IPV6_LEAVE_GROUP, (char *)&mreq, sizeof(mreq)); - if (ret < 0) + if (ret < 0) { + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); + flog_err_sys( EC_LIB_SOCKET, - "%s(%u): Failed to leave group, socket %u error %s", - ifp->name, ifp->ifindex, sock, safe_strerror(errno)); + "%s(%s:%u): Failed to leave group, socket %u error %s", + ifp->name, VRF_LOGNAME(vrf), ifp->ifindex, sock, + safe_strerror(errno)); + } + if (IS_ZEBRA_DEBUG_EVENT) { + struct vrf *vrf = vrf_lookup_by_id(ifp->vrf_id); - if (IS_ZEBRA_DEBUG_EVENT) zlog_debug( - "%s(%u): Leave All-Routers multicast group, socket %u", - ifp->name, ifp->ifindex, sock); - + "%s(%s:%u): Leave All-Routers multicast group, socket %u", + ifp->name, VRF_LOGNAME(vrf), ifp->ifindex, sock); + } return 0; } diff --git a/zebra/rtadv.h b/zebra/rtadv.h index 64b28cbfd6..68a5bbcdbe 100644 --- a/zebra/rtadv.h +++ b/zebra/rtadv.h @@ -153,7 +153,8 @@ typedef enum { } ipv6_nd_suppress_ra_status; extern void rtadv_init(struct zebra_vrf *zvrf); -extern void rtadv_terminate(struct zebra_vrf *zvrf); +extern void rtadv_vrf_terminate(struct zebra_vrf *zvrf); +extern void rtadv_terminate(void); extern void rtadv_stop_ra(struct interface *ifp); extern void rtadv_stop_ra_all(void); extern void rtadv_cmd_init(void); diff --git a/zebra/subdir.am b/zebra/subdir.am index 1d49de5410..f281afce94 100644 --- a/zebra/subdir.am +++ b/zebra/subdir.am @@ -75,6 +75,7 @@ zebra_zebra_SOURCES = \ zebra/zebra_mlag.c \ zebra/zebra_mlag_vty.c \ zebra/zebra_l2.c \ + zebra/zebra_northbound.c \ zebra/zebra_memory.c \ zebra/zebra_dplane.c \ zebra/zebra_mpls.c \ @@ -191,5 +192,10 @@ zebra_zebra_fpm_la_SOURCES += zebra/zebra_fpm_dt.c endif endif +nodist_zebra_zebra_SOURCES = \ + yang/frr-nexthop.yang.c \ + yang/frr-zebra.yang.c \ + # end + zebra_zebra_cumulus_mlag_la_SOURCES = zebra/zebra_mlag_private.c zebra_zebra_cumulus_mlag_la_LDFLAGS = -avoid-version -module -shared -export-dynamic diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index 88e3ce68d3..a58df82698 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -383,9 +383,14 @@ static void zebra_interface_nbr_address_add_update(struct interface *ifp, p->prefixlen, ifc->ifp->name); } - for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) + for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { + /* Do not send unsolicited messages to synchronous clients. */ + if (client->synchronous) + continue; + zsend_interface_nbr_address(ZEBRA_INTERFACE_NBR_ADDRESS_ADD, client, ifp, ifc); + } } /* Interface address deletion. */ @@ -407,9 +412,14 @@ static void zebra_interface_nbr_address_delete_update(struct interface *ifp, p->prefixlen, ifc->ifp->name); } - for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) + for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { + /* Do not send unsolicited messages to synchronous clients. */ + if (client->synchronous) + continue; + zsend_interface_nbr_address(ZEBRA_INTERFACE_NBR_ADDRESS_DELETE, client, ifp, ifc); + } } /* Send addresses on interface to client */ @@ -1063,7 +1073,7 @@ static void zread_rnh_register(ZAPI_HANDLER_ARGS) if (IS_ZEBRA_DEBUG_NHT) zlog_debug( - "rnh_register msg from client %s: hdr->length=%d, type=%s vrf=%u\n", + "rnh_register msg from client %s: hdr->length=%d, type=%s vrf=%u", zebra_route_string(client->proto), hdr->length, (type == RNH_NEXTHOP_TYPE) ? "nexthop" : "route", zvrf->vrf->vrf_id); @@ -1152,7 +1162,7 @@ static void zread_rnh_unregister(ZAPI_HANDLER_ARGS) if (IS_ZEBRA_DEBUG_NHT) zlog_debug( - "rnh_unregister msg from client %s: hdr->length=%d vrf: %u\n", + "rnh_unregister msg from client %s: hdr->length=%d vrf: %u", zebra_route_string(client->proto), hdr->length, zvrf->vrf->vrf_id); @@ -1403,6 +1413,132 @@ void zserv_nexthop_num_warn(const char *caller, const struct prefix *p, } } +/* + * Create a new nexthop based on a zapi nexthop. + */ +static struct nexthop *nexthop_from_zapi(struct route_entry *re, + const struct zapi_nexthop *api_nh, + const struct zapi_route *api) +{ + struct nexthop *nexthop = NULL; + struct ipaddr vtep_ip; + struct interface *ifp; + char nhbuf[INET6_ADDRSTRLEN] = ""; + + switch (api_nh->type) { + case NEXTHOP_TYPE_IFINDEX: + nexthop = nexthop_from_ifindex(api_nh->ifindex, api_nh->vrf_id); + break; + case NEXTHOP_TYPE_IPV4: + if (IS_ZEBRA_DEBUG_RECV) { + inet_ntop(AF_INET, &api_nh->gate.ipv4, nhbuf, + sizeof(nhbuf)); + zlog_debug("%s: nh=%s, vrf_id=%d", __func__, + nhbuf, api_nh->vrf_id); + } + nexthop = nexthop_from_ipv4(&api_nh->gate.ipv4, NULL, + api_nh->vrf_id); + break; + case NEXTHOP_TYPE_IPV4_IFINDEX: + if (IS_ZEBRA_DEBUG_RECV) { + inet_ntop(AF_INET, &api_nh->gate.ipv4, nhbuf, + sizeof(nhbuf)); + zlog_debug("%s: nh=%s, vrf_id=%d, ifindex=%d", + __func__, nhbuf, api_nh->vrf_id, + api_nh->ifindex); + } + + nexthop = nexthop_from_ipv4_ifindex( + &api_nh->gate.ipv4, NULL, api_nh->ifindex, + api_nh->vrf_id); + + ifp = if_lookup_by_index(api_nh->ifindex, api_nh->vrf_id); + if (ifp && connected_is_unnumbered(ifp)) + SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK); + + /* Special handling for IPv4 routes sourced from EVPN: + * the nexthop and associated MAC need to be installed. + */ + if (CHECK_FLAG(api->flags, ZEBRA_FLAG_EVPN_ROUTE)) { + memset(&vtep_ip, 0, sizeof(struct ipaddr)); + vtep_ip.ipa_type = IPADDR_V4; + memcpy(&(vtep_ip.ipaddr_v4), &(api_nh->gate.ipv4), + sizeof(struct in_addr)); + zebra_vxlan_evpn_vrf_route_add( + api_nh->vrf_id, &api_nh->rmac, + &vtep_ip, &api->prefix); + } + break; + case NEXTHOP_TYPE_IPV6: + if (IS_ZEBRA_DEBUG_RECV) { + inet_ntop(AF_INET6, &api_nh->gate.ipv6, nhbuf, + sizeof(nhbuf)); + zlog_debug("%s: nh=%s, vrf_id=%d", __func__, + nhbuf, api_nh->vrf_id); + } + nexthop = nexthop_from_ipv6(&api_nh->gate.ipv6, api_nh->vrf_id); + break; + case NEXTHOP_TYPE_IPV6_IFINDEX: + if (IS_ZEBRA_DEBUG_RECV) { + inet_ntop(AF_INET6, &api_nh->gate.ipv6, nhbuf, + sizeof(nhbuf)); + zlog_debug("%s: nh=%s, vrf_id=%d, ifindex=%d", + __func__, nhbuf, api_nh->vrf_id, + api_nh->ifindex); + } + nexthop = nexthop_from_ipv6_ifindex(&api_nh->gate.ipv6, + api_nh->ifindex, + api_nh->vrf_id); + + /* Special handling for IPv6 routes sourced from EVPN: + * the nexthop and associated MAC need to be installed. + */ + if (CHECK_FLAG(api->flags, ZEBRA_FLAG_EVPN_ROUTE)) { + memset(&vtep_ip, 0, sizeof(struct ipaddr)); + vtep_ip.ipa_type = IPADDR_V6; + memcpy(&vtep_ip.ipaddr_v6, &(api_nh->gate.ipv6), + sizeof(struct in6_addr)); + zebra_vxlan_evpn_vrf_route_add( + api_nh->vrf_id, &api_nh->rmac, + &vtep_ip, &api->prefix); + } + break; + case NEXTHOP_TYPE_BLACKHOLE: + if (IS_ZEBRA_DEBUG_RECV) + zlog_debug("%s: nh blackhole %d", + __func__, api_nh->bh_type); + + nexthop = nexthop_from_blackhole(api_nh->bh_type); + break; + } + + /* Return early if we couldn't process the zapi nexthop */ + if (nexthop == NULL) { + goto done; + } + + if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_ONLINK)) + SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK); + + if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_WEIGHT)) + nexthop->weight = api_nh->weight; + + if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP)) { + if (api_nh->backup_idx < api->backup_nexthop_num) { + /* Capture backup info */ + SET_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP); + nexthop->backup_idx = api_nh->backup_idx; + } else { + /* Warn about invalid backup index */ + if (IS_ZEBRA_DEBUG_RECV || IS_ZEBRA_DEBUG_EVENT) + zlog_debug("%s: invalid backup nh idx %d", + __func__, api_nh->backup_idx); + } + } +done: + return nexthop; +} + static void zread_route_add(ZAPI_HANDLER_ARGS) { struct stream *s; @@ -1411,12 +1547,15 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) afi_t afi; struct prefix_ipv6 *src_p = NULL; struct route_entry *re; - struct nexthop *nexthop = NULL; + struct nexthop *nexthop = NULL, *last_nh; struct nexthop_group *ng = NULL; + struct nhg_backup_info *bnhg = NULL; int i, ret; vrf_id_t vrf_id; - struct ipaddr vtep_ip; - struct interface *ifp; + struct nhg_hash_entry nhe; + enum lsp_types_t label_type; + char nhbuf[NEXTHOP_STRLEN]; + char labelbuf[MPLS_LABEL_STRLEN]; s = msg; if (zapi_route_decode(s, &api) < 0) { @@ -1430,8 +1569,8 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) char buf_prefix[PREFIX_STRLEN]; prefix2str(&api.prefix, buf_prefix, sizeof(buf_prefix)); - zlog_debug("%s: p=%s, flags=0x%x", - __func__, buf_prefix, api.flags); + zlog_debug("%s: p=%s, msg flags=0x%x, flags=0x%x", + __func__, buf_prefix, (int)api.message, api.flags); } /* Allocate new route. */ @@ -1459,6 +1598,15 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) return; } + /* Report misuse of the backup flag */ + if (CHECK_FLAG(api.message, ZAPI_MESSAGE_BACKUP_NEXTHOPS) && + api.backup_nexthop_num == 0) { + if (IS_ZEBRA_DEBUG_RECV || IS_ZEBRA_DEBUG_EVENT) + zlog_debug("%s: client %s: BACKUP flag set but no backup nexthops, prefix %pFX", + __func__, + zebra_route_string(client->proto), &api.prefix); + } + /* Use temporary list of nexthops */ ng = nexthop_group_new(); @@ -1469,130 +1617,138 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) */ for (i = 0; i < api.nexthop_num; i++) { api_nh = &api.nexthops[i]; - ifindex_t ifindex = 0; - nexthop = NULL; + /* Convert zapi nexthop */ + nexthop = nexthop_from_zapi(re, api_nh, &api); + if (!nexthop) { + flog_warn( + EC_ZEBRA_NEXTHOP_CREATION_FAILED, + "%s: Nexthops Specified: %d but we failed to properly create one", + __func__, api.nexthop_num); + nexthop_group_delete(&ng); + XFREE(MTYPE_RE, re); + return; + } - if (IS_ZEBRA_DEBUG_RECV) - zlog_debug("nh type %d", api_nh->type); + /* MPLS labels for BGP-LU or Segment Routing */ + if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_LABEL) + && api_nh->type != NEXTHOP_TYPE_IFINDEX + && api_nh->type != NEXTHOP_TYPE_BLACKHOLE + && api_nh->label_num > 0) { - switch (api_nh->type) { - case NEXTHOP_TYPE_IFINDEX: - nexthop = nexthop_from_ifindex(api_nh->ifindex, - api_nh->vrf_id); - break; - case NEXTHOP_TYPE_IPV4: - if (IS_ZEBRA_DEBUG_RECV) { - char nhbuf[INET6_ADDRSTRLEN] = {0}; + label_type = lsp_type_from_re_type(client->proto); + nexthop_add_labels(nexthop, label_type, + api_nh->label_num, + &api_nh->labels[0]); + } - inet_ntop(AF_INET, &api_nh->gate.ipv4, nhbuf, - INET6_ADDRSTRLEN); - zlog_debug("%s: nh=%s, vrf_id=%d", __func__, - nhbuf, api_nh->vrf_id); - } - nexthop = nexthop_from_ipv4(&api_nh->gate.ipv4, - NULL, api_nh->vrf_id); - break; - case NEXTHOP_TYPE_IPV4_IFINDEX: + if (IS_ZEBRA_DEBUG_RECV) { + labelbuf[0] = '\0'; + nhbuf[0] = '\0'; - memset(&vtep_ip, 0, sizeof(struct ipaddr)); - ifindex = api_nh->ifindex; - if (IS_ZEBRA_DEBUG_RECV) { - char nhbuf[INET6_ADDRSTRLEN] = {0}; + nexthop2str(nexthop, nhbuf, sizeof(nhbuf)); - inet_ntop(AF_INET, &api_nh->gate.ipv4, nhbuf, - INET6_ADDRSTRLEN); - zlog_debug( - "%s: nh=%s, vrf_id=%d (re->vrf_id=%d), ifindex=%d", - __func__, nhbuf, api_nh->vrf_id, - re->vrf_id, ifindex); - } - nexthop = nexthop_from_ipv4_ifindex( - &api_nh->gate.ipv4, NULL, ifindex, - api_nh->vrf_id); - - ifp = if_lookup_by_index(ifindex, api_nh->vrf_id); - if (ifp && connected_is_unnumbered(ifp)) - SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK); - /* Special handling for IPv4 routes sourced from EVPN: - * the nexthop and associated MAC need to be installed. - */ - if (CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) { - vtep_ip.ipa_type = IPADDR_V4; - memcpy(&(vtep_ip.ipaddr_v4), - &(api_nh->gate.ipv4), - sizeof(struct in_addr)); - zebra_vxlan_evpn_vrf_route_add( - api_nh->vrf_id, &api_nh->rmac, - &vtep_ip, &api.prefix); - } - break; - case NEXTHOP_TYPE_IPV6: - nexthop = nexthop_from_ipv6(&api_nh->gate.ipv6, - api_nh->vrf_id); - break; - case NEXTHOP_TYPE_IPV6_IFINDEX: - memset(&vtep_ip, 0, sizeof(struct ipaddr)); - ifindex = api_nh->ifindex; - nexthop = nexthop_from_ipv6_ifindex(&api_nh->gate.ipv6, - ifindex, - api_nh->vrf_id); - - /* Special handling for IPv6 routes sourced from EVPN: - * the nexthop and associated MAC need to be installed. - */ - if (CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) { - vtep_ip.ipa_type = IPADDR_V6; - memcpy(&vtep_ip.ipaddr_v6, &(api_nh->gate.ipv6), - sizeof(struct in6_addr)); - zebra_vxlan_evpn_vrf_route_add( - api_nh->vrf_id, &api_nh->rmac, - &vtep_ip, &api.prefix); + if (nexthop->nh_label && + nexthop->nh_label->num_labels > 0) { + mpls_label2str(nexthop->nh_label->num_labels, + nexthop->nh_label->label, + labelbuf, sizeof(labelbuf), + false); } - break; - case NEXTHOP_TYPE_BLACKHOLE: - nexthop = nexthop_from_blackhole(api_nh->bh_type); - break; + + zlog_debug("%s: nh=%s, vrf_id=%d %s", + __func__, nhbuf, api_nh->vrf_id, labelbuf); } + /* Add new nexthop to temporary list. This list is + * canonicalized - sorted - so that it can be hashed later + * in route processing. We expect that the sender has sent + * the list sorted, and the zapi client api attempts to enforce + * that, so this should be inexpensive - but it is necessary + * to support shared nexthop-groups. + */ + nexthop_group_add_sorted(ng, nexthop); + } + + /* Allocate temporary list of backup nexthops, if necessary */ + if (api.backup_nexthop_num > 0) { + if (IS_ZEBRA_DEBUG_RECV) + zlog_debug("%s: adding %d backup nexthops", + __func__, api.backup_nexthop_num); + + bnhg = zebra_nhg_backup_alloc(); + nexthop = NULL; + last_nh = NULL; + } + + /* Copy backup nexthops also, if present */ + for (i = 0; i < api.backup_nexthop_num; i++) { + api_nh = &api.backup_nexthops[i]; + + /* Convert zapi backup nexthop */ + nexthop = nexthop_from_zapi(re, api_nh, &api); if (!nexthop) { flog_warn( EC_ZEBRA_NEXTHOP_CREATION_FAILED, - "%s: Nexthops Specified: %d but we failed to properly create one", - __func__, api.nexthop_num); + "%s: Backup Nexthops Specified: %d but we failed to properly create one", + __func__, api.backup_nexthop_num); nexthop_group_delete(&ng); + zebra_nhg_backup_free(&bnhg); XFREE(MTYPE_RE, re); return; } - if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_ONLINK)) - SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK); - - if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_WEIGHT)) - nexthop->weight = api_nh->weight; + /* Backup nexthops can't have backups; that's not valid. */ + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) { + if (IS_ZEBRA_DEBUG_RECV) { + nexthop2str(nexthop, nhbuf, sizeof(nhbuf)); + zlog_debug("%s: backup nh %s with BACKUP flag!", + __func__, nhbuf); + } + UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP); + nexthop->backup_idx = 0; + } /* MPLS labels for BGP-LU or Segment Routing */ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_LABEL) && api_nh->type != NEXTHOP_TYPE_IFINDEX - && api_nh->type != NEXTHOP_TYPE_BLACKHOLE) { - enum lsp_types_t label_type; + && api_nh->type != NEXTHOP_TYPE_BLACKHOLE + && api_nh->label_num > 0) { label_type = lsp_type_from_re_type(client->proto); - - if (IS_ZEBRA_DEBUG_RECV) { - zlog_debug( - "%s: adding %d labels of type %d (1st=%u)", - __func__, api_nh->label_num, label_type, - api_nh->labels[0]); - } - nexthop_add_labels(nexthop, label_type, api_nh->label_num, &api_nh->labels[0]); } - /* Add new nexthop to temporary list */ - nexthop_group_add_sorted(ng, nexthop); + if (IS_ZEBRA_DEBUG_RECV) { + labelbuf[0] = '\0'; + nhbuf[0] = '\0'; + + nexthop2str(nexthop, nhbuf, sizeof(nhbuf)); + + if (nexthop->nh_label && + nexthop->nh_label->num_labels > 0) { + mpls_label2str(nexthop->nh_label->num_labels, + nexthop->nh_label->label, + labelbuf, sizeof(labelbuf), + false); + } + + zlog_debug("%s: backup nh=%s, vrf_id=%d %s", + __func__, nhbuf, api_nh->vrf_id, labelbuf); + } + + /* Note that the order of the backup nexthops is significant, + * so we don't sort this list as we do the primary nexthops, + * we just append. + */ + if (last_nh) + NEXTHOP_APPEND(last_nh, nexthop); + else + bnhg->nhe->nhg.nexthop = nexthop; + + last_nh = nexthop; } if (CHECK_FLAG(api.message, ZAPI_MESSAGE_DISTANCE)) @@ -1610,6 +1766,7 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) "%s: Received SRC Prefix but afi is not v6", __func__); nexthop_group_delete(&ng); + zebra_nhg_backup_free(&bnhg); XFREE(MTYPE_RE, re); return; } @@ -1621,10 +1778,28 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) "%s: Received safi: %d but we can only accept UNICAST or MULTICAST", __func__, api.safi); nexthop_group_delete(&ng); + zebra_nhg_backup_free(&bnhg); XFREE(MTYPE_RE, re); return; } - ret = rib_add_multipath(afi, api.safi, &api.prefix, src_p, re, ng); + + /* Include backup info with the route. We use a temporary nhe here; + * if this is a new/unknown nhe, a new copy will be allocated + * and stored. + */ + zebra_nhe_init(&nhe, afi, ng->nexthop); + nhe.nhg.nexthop = ng->nexthop; + nhe.backup_info = bnhg; + ret = rib_add_multipath_nhe(afi, api.safi, &api.prefix, src_p, + re, &nhe); + + /* At this point, these allocations are not needed: 're' has been + * retained or freed, and if 're' still exists, it is using + * a reference to a shared group object. + */ + nexthop_group_delete(&ng); + if (bnhg) + zebra_nhg_backup_free(&bnhg); /* Stats */ switch (api.prefix.family) { @@ -1740,6 +1915,10 @@ void zsend_capabilities_all_clients(void) zvrf = vrf_info_lookup(VRF_DEFAULT); for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { + /* Do not send unsolicited messages to synchronous clients. */ + if (client->synchronous) + continue; + zsend_capabilities(client, zvrf); } } @@ -1751,13 +1930,18 @@ static void zread_hello(ZAPI_HANDLER_ARGS) uint8_t proto; unsigned short instance; uint8_t notify; + uint8_t synchronous; STREAM_GETC(msg, proto); STREAM_GETW(msg, instance); STREAM_GETC(msg, notify); + STREAM_GETC(msg, synchronous); if (notify) client->notify_owner = true; + if (synchronous) + client->synchronous = true; + /* accept only dynamic routing protocols */ if ((proto < ZEBRA_ROUTE_MAX) && (proto > ZEBRA_ROUTE_CONNECT)) { zlog_notice( @@ -1774,8 +1958,10 @@ static void zread_hello(ZAPI_HANDLER_ARGS) zebra_gr_client_reconnect(client); } - zsend_capabilities(client, zvrf); - zebra_vrf_update_all(client); + if (!client->synchronous) { + zsend_capabilities(client, zvrf); + zebra_vrf_update_all(client); + } stream_failure: return; } @@ -2039,17 +2225,6 @@ static void zread_get_label_chunk(struct zserv *client, struct stream *msg, /* call hook to get a chunk using wrapper */ lm_get_chunk_call(&lmc, proto, instance, keep, size, base, vrf_id); - if (!lmc) - flog_err( - EC_ZEBRA_LM_CANNOT_ASSIGN_CHUNK, - "Unable to assign Label Chunk of size %u to %s instance %u", - size, zebra_route_string(proto), instance); - else - if (IS_ZEBRA_DEBUG_PACKET) - zlog_debug("Assigned Label Chunk %u - %u to %s instance %u", - lmc->start, lmc->end, - zebra_route_string(proto), instance); - stream_failure: return; } @@ -2418,9 +2593,11 @@ static inline void zread_rule(ZAPI_HANDLER_ARGS) } if (!(zpr.rule.filter.dst_ip.family == AF_INET || zpr.rule.filter.dst_ip.family == AF_INET6)) { - zlog_warn("Unsupported PBR IP family: %s (%" PRIu8 ")", - family2str(zpr.rule.filter.dst_ip.family), - zpr.rule.filter.dst_ip.family); + zlog_warn( + "Unsupported PBR destination IP family: %s (%" PRIu8 + ")", + family2str(zpr.rule.filter.dst_ip.family), + zpr.rule.filter.dst_ip.family); return; } @@ -2512,14 +2689,18 @@ static inline void zread_ipset_entry(ZAPI_HANDLER_ARGS) if (!(zpi.dst.family == AF_INET || zpi.dst.family == AF_INET6)) { - zlog_warn("Unsupported PBR IP family: %s (%" PRIu8 ")", - family2str(zpi.dst.family), zpi.dst.family); + zlog_warn( + "Unsupported PBR destination IP family: %s (%" PRIu8 + ")", + family2str(zpi.dst.family), zpi.dst.family); goto stream_failure; } if (!(zpi.src.family == AF_INET || zpi.src.family == AF_INET6)) { - zlog_warn("Unsupported PBR IP family: %s (%" PRIu8 ")", - family2str(zpi.src.family), zpi.src.family); + zlog_warn( + "Unsupported PBR source IP family: %s (%" PRIu8 + ")", + family2str(zpi.src.family), zpi.src.family); goto stream_failure; } diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c index 459d2bc620..a2365ee76b 100644 --- a/zebra/zebra_dplane.c +++ b/zebra/zebra_dplane.c @@ -113,10 +113,15 @@ struct dplane_route_info { struct dplane_nexthop_info nhe; /* Nexthops */ + uint32_t zd_nhg_id; struct nexthop_group zd_ng; + /* Backup nexthops (if present) */ + struct nexthop_group backup_ng; + /* "Previous" nexthops, used only in route updates without netlink */ struct nexthop_group zd_old_ng; + struct nexthop_group old_backup_ng; /* TODO -- use fixed array of nexthops, to avoid mallocs? */ @@ -472,6 +477,14 @@ static void dplane_ctx_free(struct zebra_dplane_ctx **pctx) (*pctx)->u.rinfo.zd_ng.nexthop = NULL; } + /* Free backup info also (if present) */ + if ((*pctx)->u.rinfo.backup_ng.nexthop) { + /* This deals with recursive nexthops too */ + nexthops_free((*pctx)->u.rinfo.backup_ng.nexthop); + + (*pctx)->u.rinfo.backup_ng.nexthop = NULL; + } + if ((*pctx)->u.rinfo.zd_old_ng.nexthop) { /* This deals with recursive nexthops too */ nexthops_free((*pctx)->u.rinfo.zd_old_ng.nexthop); @@ -479,6 +492,13 @@ static void dplane_ctx_free(struct zebra_dplane_ctx **pctx) (*pctx)->u.rinfo.zd_old_ng.nexthop = NULL; } + if ((*pctx)->u.rinfo.old_backup_ng.nexthop) { + /* This deals with recursive nexthops too */ + nexthops_free((*pctx)->u.rinfo.old_backup_ng.nexthop); + + (*pctx)->u.rinfo.old_backup_ng.nexthop = NULL; + } + break; case DPLANE_OP_NH_INSTALL: @@ -1038,6 +1058,12 @@ void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh) nexthop_group_copy_nh_sorted(&(ctx->u.rinfo.zd_ng), nh); } +uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx *ctx) +{ + DPLANE_CTX_VALID(ctx); + return ctx->u.rinfo.zd_nhg_id; +} + const struct nexthop_group *dplane_ctx_get_ng( const struct zebra_dplane_ctx *ctx) { @@ -1046,14 +1072,30 @@ const struct nexthop_group *dplane_ctx_get_ng( return &(ctx->u.rinfo.zd_ng); } -const struct nexthop_group *dplane_ctx_get_old_ng( - const struct zebra_dplane_ctx *ctx) +const struct nexthop_group * +dplane_ctx_get_backup_ng(const struct zebra_dplane_ctx *ctx) +{ + DPLANE_CTX_VALID(ctx); + + return &(ctx->u.rinfo.backup_ng); +} + +const struct nexthop_group * +dplane_ctx_get_old_ng(const struct zebra_dplane_ctx *ctx) { DPLANE_CTX_VALID(ctx); return &(ctx->u.rinfo.zd_old_ng); } +const struct nexthop_group * +dplane_ctx_get_old_backup_ng(const struct zebra_dplane_ctx *ctx) +{ + DPLANE_CTX_VALID(ctx); + + return &(ctx->u.rinfo.old_backup_ng); +} + const struct zebra_dplane_info *dplane_ctx_get_ns( const struct zebra_dplane_ctx *ctx) { @@ -1514,6 +1556,13 @@ static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, /* Copy nexthops; recursive info is included too */ copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop), re->nhe->nhg.nexthop, NULL); + ctx->u.rinfo.zd_nhg_id = re->nhe->id; + + /* Copy backup nexthop info, if present */ + if (re->nhe->backup_info && re->nhe->backup_info->nhe) { + copy_nexthops(&(ctx->u.rinfo.backup_ng.nexthop), + re->nhe->backup_info->nhe->nhg.nexthop, NULL); + } /* Ensure that the dplane nexthops' flags are clear. */ for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop)) @@ -1532,9 +1581,8 @@ static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE)); #ifdef HAVE_NETLINK - if (re->nhe_id) { - struct nhg_hash_entry *nhe = - zebra_nhg_resolve(zebra_nhg_lookup_id(re->nhe_id)); + if (re->nhe) { + struct nhg_hash_entry *nhe = zebra_nhg_resolve(re->nhe); ctx->u.rinfo.nhe.id = nhe->id; /* @@ -1581,7 +1629,6 @@ static int dplane_ctx_nexthop_init(struct zebra_dplane_ctx *ctx, { struct zebra_vrf *zvrf = NULL; struct zebra_ns *zns = NULL; - int ret = EINVAL; if (!ctx || !nhe) @@ -1850,6 +1897,17 @@ dplane_route_update_internal(struct route_node *rn, */ copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop), old_re->nhe->nhg.nexthop, NULL); + + if (zebra_nhg_get_backup_nhg(old_re->nhe) != NULL) { + struct nexthop_group *nhg; + struct nexthop **nh; + + nhg = zebra_nhg_get_backup_nhg(old_re->nhe); + nh = &(ctx->u.rinfo.old_backup_ng.nexthop); + + if (nhg->nexthop) + copy_nexthops(nh, nhg->nexthop, NULL); + } #endif /* !HAVE_NETLINK */ } diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h index c0b04e71b0..9ce4df197c 100644 --- a/zebra/zebra_dplane.h +++ b/zebra/zebra_dplane.h @@ -270,11 +270,19 @@ void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance); uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx); void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh); + +uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx *ctx); const struct nexthop_group *dplane_ctx_get_ng( const struct zebra_dplane_ctx *ctx); const struct nexthop_group *dplane_ctx_get_old_ng( const struct zebra_dplane_ctx *ctx); +/* Backup nexthop information (list of nexthops) if present. */ +const struct nexthop_group * +dplane_ctx_get_backup_ng(const struct zebra_dplane_ctx *ctx); +const struct nexthop_group * +dplane_ctx_get_old_backup_ng(const struct zebra_dplane_ctx *ctx); + /* Accessors for nexthop information */ uint32_t dplane_ctx_get_nhe_id(const struct zebra_dplane_ctx *ctx); afi_t dplane_ctx_get_nhe_afi(const struct zebra_dplane_ctx *ctx); diff --git a/zebra/zebra_fpm_protobuf.c b/zebra/zebra_fpm_protobuf.c index ade4b636d6..4b31cc0281 100644 --- a/zebra/zebra_fpm_protobuf.c +++ b/zebra/zebra_fpm_protobuf.c @@ -294,7 +294,7 @@ int zfpm_protobuf_encode_route(rib_dest_t *dest, struct route_entry *re, return 0; } - len = fpm__message__pack(msg, (uint8_t *)in_buf); + len = fpm__message__pack(msg, in_buf); assert(len <= in_buf_len); QPB_RESET_STACK_ALLOCATOR(allocator); diff --git a/zebra/zebra_mlag.c b/zebra/zebra_mlag.c index cf2fe26489..8ba7998f50 100644 --- a/zebra/zebra_mlag.c +++ b/zebra/zebra_mlag.c @@ -322,7 +322,7 @@ static int zebra_mlag_post_data_from_main_thread(struct thread *thread) STREAM_GETL(s, msg_type); if (IS_ZEBRA_DEBUG_MLAG) zlog_debug( - "%s: Posting MLAG data for msg_type:0x%x to interested cleints", + "%s: Posting MLAG data for msg_type:0x%x to interested clients", __func__, msg_type); msg_len = s->endp - ZEBRA_MLAG_METADATA_LEN; @@ -364,7 +364,7 @@ stream_failure: /* * Start the MLAG Thread, this will be used to write client data on to - * MLAG Process and to read the data from MLAG and post to cleints. + * MLAG Process and to read the data from MLAG and post to clients. * when all clients are un-registered, this Thread will be * suspended. */ diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c index d373fdf370..999e91486d 100644 --- a/zebra/zebra_mpls.c +++ b/zebra/zebra_mpls.c @@ -98,14 +98,14 @@ static void lsp_free(struct hash *lsp_table, zebra_lsp_t **plsp); static char *nhlfe2str(zebra_nhlfe_t *nhlfe, char *buf, int size); static int nhlfe_nhop_match(zebra_nhlfe_t *nhlfe, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex); + const union g_addr *gate, ifindex_t ifindex); static zebra_nhlfe_t *nhlfe_find(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, - enum nexthop_types_t gtype, union g_addr *gate, - ifindex_t ifindex); + enum nexthop_types_t gtype, + const union g_addr *gate, ifindex_t ifindex); static zebra_nhlfe_t *nhlfe_add(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, - enum nexthop_types_t gtype, union g_addr *gate, - ifindex_t ifindex, uint8_t num_labels, - mpls_label_t *labels); + enum nexthop_types_t gtype, + const union g_addr *gate, ifindex_t ifindex, + uint8_t num_labels, mpls_label_t *labels); static int nhlfe_del(zebra_nhlfe_t *snhlfe); static void nhlfe_out_label_update(zebra_nhlfe_t *nhlfe, struct mpls_label_stack *nh_label); @@ -117,13 +117,13 @@ static void nhlfe_print(zebra_nhlfe_t *nhlfe, struct vty *vty); static void lsp_print(zebra_lsp_t *lsp, void *ctxt); static void *slsp_alloc(void *p); static int snhlfe_match(zebra_snhlfe_t *snhlfe, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex); + const union g_addr *gate, ifindex_t ifindex); static zebra_snhlfe_t *snhlfe_find(zebra_slsp_t *slsp, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex); + const union g_addr *gate, ifindex_t ifindex); static zebra_snhlfe_t *snhlfe_add(zebra_slsp_t *slsp, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex, + const union g_addr *gate, ifindex_t ifindex, mpls_label_t out_label); static int snhlfe_del(zebra_snhlfe_t *snhlfe); static int snhlfe_del_all(zebra_slsp_t *slsp); @@ -960,7 +960,7 @@ static wq_item_status lsp_process(struct work_queue *wq, void *data) UNSET_FLAG(lsp->flags, LSP_FLAG_CHANGED); /* We leave the INSTALLED flag set here - * so we know an update in in-flight. + * so we know an update is in-flight. */ /* @@ -1149,7 +1149,7 @@ static char *nhlfe2str(zebra_nhlfe_t *nhlfe, char *buf, int size) * Check if NHLFE matches with search info passed. */ static int nhlfe_nhop_match(zebra_nhlfe_t *nhlfe, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex) + const union g_addr *gate, ifindex_t ifindex) { struct nexthop *nhop; int cmp = 1; @@ -1191,8 +1191,8 @@ static int nhlfe_nhop_match(zebra_nhlfe_t *nhlfe, enum nexthop_types_t gtype, * Locate NHLFE that matches with passed info. */ static zebra_nhlfe_t *nhlfe_find(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, - enum nexthop_types_t gtype, union g_addr *gate, - ifindex_t ifindex) + enum nexthop_types_t gtype, + const union g_addr *gate, ifindex_t ifindex) { zebra_nhlfe_t *nhlfe; @@ -1214,9 +1214,9 @@ static zebra_nhlfe_t *nhlfe_find(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, * check done. */ static zebra_nhlfe_t *nhlfe_add(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, - enum nexthop_types_t gtype, union g_addr *gate, - ifindex_t ifindex, uint8_t num_labels, - mpls_label_t labels[]) + enum nexthop_types_t gtype, + const union g_addr *gate, ifindex_t ifindex, + uint8_t num_labels, mpls_label_t labels[]) { zebra_nhlfe_t *nhlfe; struct nexthop *nexthop; @@ -1520,7 +1520,7 @@ static struct list *hash_get_sorted_list(struct hash *hash, void *cmp) /* * Compare two LSPs based on their label values. */ -static int lsp_cmp(zebra_lsp_t *lsp1, zebra_lsp_t *lsp2) +static int lsp_cmp(const zebra_lsp_t *lsp1, const zebra_lsp_t *lsp2) { if (lsp1->ile.in_label < lsp2->ile.in_label) return -1; @@ -1547,7 +1547,7 @@ static void *slsp_alloc(void *p) /* * Compare two static LSPs based on their label values. */ -static int slsp_cmp(zebra_slsp_t *slsp1, zebra_slsp_t *slsp2) +static int slsp_cmp(const zebra_slsp_t *slsp1, const zebra_slsp_t *slsp2) { if (slsp1->ile.in_label < slsp2->ile.in_label) return -1; @@ -1562,7 +1562,7 @@ static int slsp_cmp(zebra_slsp_t *slsp1, zebra_slsp_t *slsp2) * Check if static NHLFE matches with search info passed. */ static int snhlfe_match(zebra_snhlfe_t *snhlfe, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex) + const union g_addr *gate, ifindex_t ifindex) { int cmp = 1; @@ -1593,7 +1593,7 @@ static int snhlfe_match(zebra_snhlfe_t *snhlfe, enum nexthop_types_t gtype, */ static zebra_snhlfe_t *snhlfe_find(zebra_slsp_t *slsp, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex) + const union g_addr *gate, ifindex_t ifindex) { zebra_snhlfe_t *snhlfe; @@ -1615,7 +1615,7 @@ static zebra_snhlfe_t *snhlfe_find(zebra_slsp_t *slsp, */ static zebra_snhlfe_t *snhlfe_add(zebra_slsp_t *slsp, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex, + const union g_addr *gate, ifindex_t ifindex, mpls_label_t out_label) { zebra_snhlfe_t *snhlfe; @@ -2746,7 +2746,7 @@ int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type, mpls_label_t in_label, uint8_t num_out_labels, mpls_label_t out_labels[], enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex) + const union g_addr *gate, ifindex_t ifindex) { struct hash *lsp_table; zebra_ile_t tmp_ile; @@ -2759,11 +2759,12 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type, if (!lsp_table) return -1; - /* If entry is present, exit. */ + /* Find or create LSP object */ tmp_ile.in_label = in_label; lsp = hash_get(lsp_table, &tmp_ile, lsp_alloc); if (!lsp) return -1; + nhlfe = nhlfe_find(lsp, type, gtype, gate, ifindex); if (nhlfe) { struct nexthop *nh = nhlfe->nexthop; @@ -2780,8 +2781,8 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type, return 0; if (IS_ZEBRA_DEBUG_MPLS) { - char buf2[BUFSIZ]; - char buf3[BUFSIZ]; + char buf2[MPLS_LABEL_STRLEN]; + char buf3[MPLS_LABEL_STRLEN]; nhlfe2str(nhlfe, buf, BUFSIZ); mpls_label2str(num_out_labels, out_labels, buf2, @@ -2842,7 +2843,7 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type, */ int mpls_lsp_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, mpls_label_t in_label, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex) + const union g_addr *gate, ifindex_t ifindex) { struct hash *lsp_table; zebra_ile_t tmp_ile; @@ -3056,11 +3057,12 @@ int zebra_mpls_static_lsp_add(struct zebra_vrf *zvrf, mpls_label_t in_label, if (!slsp_table) return -1; - /* If entry is present, exit. */ + /* Find or create LSP. */ tmp_ile.in_label = in_label; slsp = hash_get(slsp_table, &tmp_ile, slsp_alloc); if (!slsp) return -1; + snhlfe = snhlfe_find(slsp, gtype, gate, ifindex); if (snhlfe) { if (snhlfe->out_label == out_label) diff --git a/zebra/zebra_mpls.h b/zebra/zebra_mpls.h index 2489e8e510..33cb614346 100644 --- a/zebra/zebra_mpls.h +++ b/zebra/zebra_mpls.h @@ -288,7 +288,7 @@ int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type, mpls_label_t in_label, uint8_t num_out_labels, mpls_label_t out_labels[], enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex); + const union g_addr *gate, ifindex_t ifindex); /* * Uninstall a particular NHLFE in the forwarding table. If this is @@ -296,7 +296,7 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type, */ int mpls_lsp_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, mpls_label_t in_label, enum nexthop_types_t gtype, - union g_addr *gate, ifindex_t ifindex); + const union g_addr *gate, ifindex_t ifindex); /* * Uninstall all NHLFEs for a particular LSP forwarding entry. diff --git a/zebra/zebra_netns_id.c b/zebra/zebra_netns_id.c index ea4b07a87d..77a9a7c368 100644 --- a/zebra/zebra_netns_id.c +++ b/zebra/zebra_netns_id.c @@ -143,7 +143,7 @@ static ns_id_t extract_nsid(struct nlmsghdr *nlh, char *buf) void *tail = (void *)((char *)nlh + NETLINK_ALIGN(nlh->nlmsg_len)); struct nlattr *attr; - for (attr = (struct nlattr *)((char *)buf + offset); + for (attr = (struct nlattr *)(buf + offset); NETLINK_NLATTR_LEN(tail, attr) >= sizeof(struct nlattr) && attr->nla_len >= sizeof(struct nlattr) && attr->nla_len <= NETLINK_NLATTR_LEN(tail, attr); diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c index f0d43756b5..fceddcb745 100644 --- a/zebra/zebra_nhg.c +++ b/zebra/zebra_nhg.c @@ -63,6 +63,9 @@ static struct nhg_hash_entry * depends_find_id_add(struct nhg_connected_tree_head *head, uint32_t id); static void depends_decrement_free(struct nhg_connected_tree_head *head); +static struct nhg_backup_info * +nhg_backup_copy(const struct nhg_backup_info *orig); + static void nhg_connected_free(struct nhg_connected *dep) { @@ -295,7 +298,7 @@ static void zebra_nhg_set_if(struct nhg_hash_entry *nhe, struct interface *ifp) static void zebra_nhg_connect_depends(struct nhg_hash_entry *nhe, - struct nhg_connected_tree_head nhg_depends) + struct nhg_connected_tree_head *nhg_depends) { struct nhg_connected *rb_node_dep = NULL; @@ -304,31 +307,58 @@ zebra_nhg_connect_depends(struct nhg_hash_entry *nhe, * for now. Otherwise, their might be a time trade-off for repeated * alloc/frees as startup. */ - nhe->nhg_depends = nhg_depends; + nhe->nhg_depends = *nhg_depends; /* Attach backpointer to anything that it depends on */ zebra_nhg_dependents_init(nhe); if (!zebra_nhg_depends_is_empty(nhe)) { frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nhe %p (%u), dep %p (%u)", + __func__, nhe, nhe->id, + rb_node_dep->nhe, + rb_node_dep->nhe->id); + zebra_nhg_dependents_add(rb_node_dep->nhe, nhe); } } +} - /* Add the ifp now if its not a group or recursive and has ifindex */ - if (zebra_nhg_depends_is_empty(nhe) && nhe->nhg.nexthop - && nhe->nhg.nexthop->ifindex) { - struct interface *ifp = NULL; +/* Init an nhe, for use in a hash lookup for example */ +void zebra_nhe_init(struct nhg_hash_entry *nhe, afi_t afi, + const struct nexthop *nh) +{ + memset(nhe, 0, sizeof(struct nhg_hash_entry)); + nhe->vrf_id = VRF_DEFAULT; + nhe->type = ZEBRA_ROUTE_NHG; + nhe->afi = AFI_UNSPEC; - ifp = if_lookup_by_index(nhe->nhg.nexthop->ifindex, - nhe->nhg.nexthop->vrf_id); - if (ifp) - zebra_nhg_set_if(nhe, ifp); - else - flog_err( - EC_ZEBRA_IF_LOOKUP_FAILED, - "Zebra failed to lookup an interface with ifindex=%d in vrf=%u for NHE id=%u", - nhe->nhg.nexthop->ifindex, - nhe->nhg.nexthop->vrf_id, nhe->id); + /* There are some special rules that apply to groups representing + * a single nexthop. + */ + if (nh && (nh->next == NULL)) { + switch (nh->type) { + case (NEXTHOP_TYPE_IFINDEX): + case (NEXTHOP_TYPE_BLACKHOLE): + /* + * This switch case handles setting the afi different + * for ipv4/v6 routes. Ifindex/blackhole nexthop + * objects cannot be ambiguous, they must be Address + * Family specific. If we get here, we will either use + * the AF of the route, or the one we got passed from + * here from the kernel. + */ + nhe->afi = afi; + break; + case (NEXTHOP_TYPE_IPV4_IFINDEX): + case (NEXTHOP_TYPE_IPV4): + nhe->afi = AFI_IP; + break; + case (NEXTHOP_TYPE_IPV6_IFINDEX): + case (NEXTHOP_TYPE_IPV6): + nhe->afi = AFI_IP6; + break; + } } } @@ -341,7 +371,7 @@ struct nhg_hash_entry *zebra_nhg_alloc(void) return nhe; } -static struct nhg_hash_entry *zebra_nhg_copy(const struct nhg_hash_entry *copy, +static struct nhg_hash_entry *zebra_nhg_copy(const struct nhg_hash_entry *orig, uint32_t id) { struct nhg_hash_entry *nhe; @@ -350,14 +380,18 @@ static struct nhg_hash_entry *zebra_nhg_copy(const struct nhg_hash_entry *copy, nhe->id = id; - nexthop_group_copy(&(nhe->nhg), &(copy->nhg)); + nexthop_group_copy(&(nhe->nhg), &(orig->nhg)); - nhe->vrf_id = copy->vrf_id; - nhe->afi = copy->afi; - nhe->type = copy->type ? copy->type : ZEBRA_ROUTE_NHG; + nhe->vrf_id = orig->vrf_id; + nhe->afi = orig->afi; + nhe->type = orig->type ? orig->type : ZEBRA_ROUTE_NHG; nhe->refcnt = 0; nhe->dplane_ref = zebra_router_get_next_sequence(); + /* Copy backup info also, if present */ + if (orig->backup_info) + nhe->backup_info = nhg_backup_copy(orig->backup_info); + return nhe; } @@ -372,7 +406,25 @@ static void *zebra_nhg_hash_alloc(void *arg) /* Mark duplicate nexthops in a group at creation time. */ nexthop_group_mark_duplicates(&(nhe->nhg)); - zebra_nhg_connect_depends(nhe, copy->nhg_depends); + zebra_nhg_connect_depends(nhe, &(copy->nhg_depends)); + + /* Add the ifp now if it's not a group or recursive and has ifindex */ + if (zebra_nhg_depends_is_empty(nhe) && nhe->nhg.nexthop + && nhe->nhg.nexthop->ifindex) { + struct interface *ifp = NULL; + + ifp = if_lookup_by_index(nhe->nhg.nexthop->ifindex, + nhe->nhg.nexthop->vrf_id); + if (ifp) + zebra_nhg_set_if(nhe, ifp); + else + flog_err( + EC_ZEBRA_IF_LOOKUP_FAILED, + "Zebra failed to lookup an interface with ifindex=%d in vrf=%u for NHE id=%u", + nhe->nhg.nexthop->ifindex, + nhe->nhg.nexthop->vrf_id, nhe->id); + } + zebra_nhg_insert_id(nhe); return nhe; @@ -381,12 +433,17 @@ static void *zebra_nhg_hash_alloc(void *arg) uint32_t zebra_nhg_hash_key(const void *arg) { const struct nhg_hash_entry *nhe = arg; + uint32_t val, key = 0x5a351234; + + val = nexthop_group_hash(&(nhe->nhg)); + if (nhe->backup_info) { + val = jhash_2words(val, + nexthop_group_hash( + &(nhe->backup_info->nhe->nhg)), + key); + } - uint32_t key = 0x5a351234; - - key = jhash_3words(nhe->vrf_id, nhe->afi, - nexthop_group_hash(&(nhe->nhg)), - key); + key = jhash_3words(nhe->vrf_id, nhe->afi, val, key); return key; } @@ -398,6 +455,50 @@ uint32_t zebra_nhg_id_key(const void *arg) return nhe->id; } +/* Helper with common nhg/nhe nexthop comparison logic */ +static bool nhg_compare_nexthops(const struct nexthop *nh1, + const struct nexthop *nh2) +{ + if (nh1 && !nh2) + return false; + + if (!nh1 && nh2) + return false; + + /* + * We have to check the active flag of each individual one, + * not just the overall active_num. This solves the special case + * issue of a route with a nexthop group with one nexthop + * resolving to itself and thus marking it inactive. If we + * have two different routes each wanting to mark a different + * nexthop inactive, they need to hash to two different groups. + * + * If we just hashed on num_active, they would hash the same + * which is incorrect. + * + * ex) + * 1.1.1.0/24 + * -> 1.1.1.1 dummy1 (inactive) + * -> 1.1.2.1 dummy2 + * + * 1.1.2.0/24 + * -> 1.1.1.1 dummy1 + * -> 1.1.2.1 dummy2 (inactive) + * + * Without checking each individual one, they would hash to + * the same group and both have 1.1.1.1 dummy1 marked inactive. + * + */ + if (CHECK_FLAG(nh1->flags, NEXTHOP_FLAG_ACTIVE) + != CHECK_FLAG(nh2->flags, NEXTHOP_FLAG_ACTIVE)) + return false; + + if (!nexthop_same(nh1, nh2)) + return false; + + return true; +} + bool zebra_nhg_hash_equal(const void *arg1, const void *arg2) { const struct nhg_hash_entry *nhe1 = arg1; @@ -415,45 +516,44 @@ bool zebra_nhg_hash_equal(const void *arg1, const void *arg2) if (nhe1->afi != nhe2->afi) return false; - /* Nexthops should be sorted */ + /* Nexthops should be in-order, so we simply compare them in-place */ for (nexthop1 = nhe1->nhg.nexthop, nexthop2 = nhe2->nhg.nexthop; nexthop1 || nexthop2; nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) { - if (nexthop1 && !nexthop2) - return false; - if (!nexthop1 && nexthop2) + if (!nhg_compare_nexthops(nexthop1, nexthop2)) return false; + } - /* - * We have to check the active flag of each individual one, - * not just the overall active_num. This solves the special case - * issue of a route with a nexthop group with one nexthop - * resolving to itself and thus marking it inactive. If we - * have two different routes each wanting to mark a different - * nexthop inactive, they need to hash to two different groups. - * - * If we just hashed on num_active, they would hash the same - * which is incorrect. - * - * ex) - * 1.1.1.0/24 - * -> 1.1.1.1 dummy1 (inactive) - * -> 1.1.2.1 dummy2 - * - * 1.1.2.0/24 - * -> 1.1.1.1 dummy1 - * -> 1.1.2.1 dummy2 (inactive) - * - * Without checking each individual one, they would hash to - * the same group and both have 1.1.1.1 dummy1 marked inactive. - * - */ - if (CHECK_FLAG(nexthop1->flags, NEXTHOP_FLAG_ACTIVE) - != CHECK_FLAG(nexthop2->flags, NEXTHOP_FLAG_ACTIVE)) - return false; + /* If there's no backup info, comparison is done. */ + if ((nhe1->backup_info == NULL) && (nhe2->backup_info == NULL)) + return true; - if (!nexthop_same(nexthop1, nexthop2)) + /* Compare backup info also - test the easy things first */ + if (nhe1->backup_info && (nhe2->backup_info == NULL)) + return false; + if (nhe2->backup_info && (nhe1->backup_info == NULL)) + return false; + + /* Compare number of backups before actually comparing any */ + for (nexthop1 = nhe1->backup_info->nhe->nhg.nexthop, + nexthop2 = nhe2->backup_info->nhe->nhg.nexthop; + nexthop1 && nexthop2; + nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) { + ; + } + + /* Did we find the end of one list before the other? */ + if (nexthop1 || nexthop2) + return false; + + /* Have to compare the backup nexthops */ + for (nexthop1 = nhe1->backup_info->nhe->nhg.nexthop, + nexthop2 = nhe2->backup_info->nhe->nhg.nexthop; + nexthop1 || nexthop2; + nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) { + + if (!nhg_compare_nexthops(nexthop1, nexthop2)) return false; } @@ -512,29 +612,185 @@ static void handle_recursive_depend(struct nhg_connected_tree_head *nhg_depends, resolved_ng.nexthop = nh; + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: head %p, nh %pNHv", + __func__, nhg_depends, nh); + depend = zebra_nhg_rib_find(0, &resolved_ng, afi); + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nh %pNHv => %p (%u)", + __func__, nh, depend, + depend ? depend->id : 0); + if (depend) depends_add(nhg_depends, depend); } +/* + * Lookup an nhe in the global hash, using data from another nhe. If 'lookup' + * has an id value, that's used. Create a new global/shared nhe if not found. + */ +static bool zebra_nhe_find(struct nhg_hash_entry **nhe, /* return value */ + struct nhg_hash_entry *lookup, + struct nhg_connected_tree_head *nhg_depends, + afi_t afi) +{ + bool created = false; + bool recursive = false; + struct nhg_hash_entry *newnhe, *backup_nhe; + struct nexthop *nh = NULL; + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: id %u, lookup %p, vrf %d, type %d, depends %p", + __func__, lookup->id, lookup, + lookup->vrf_id, lookup->type, + nhg_depends); + + if (lookup->id) + (*nhe) = zebra_nhg_lookup_id(lookup->id); + else + (*nhe) = hash_lookup(zrouter.nhgs, lookup); + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: lookup => %p (%u)", + __func__, (*nhe), + (*nhe) ? (*nhe)->id : 0); + + /* If we found an existing object, we're done */ + if (*nhe) + goto done; + + /* We're going to create/insert a new nhe: + * assign the next global id value if necessary. + */ + if (lookup->id == 0) + lookup->id = ++id_counter; + newnhe = hash_get(zrouter.nhgs, lookup, zebra_nhg_hash_alloc); + created = true; + + /* Mail back the new object */ + *nhe = newnhe; + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: => created %p (%u)", __func__, newnhe, + newnhe->id); + + /* Only hash/lookup the depends if the first lookup + * fails to find something. This should hopefully save a + * lot of cycles for larger ecmp sizes. + */ + if (nhg_depends) { + /* If you don't want to hash on each nexthop in the + * nexthop group struct you can pass the depends + * directly. Kernel-side we do this since it just looks + * them up via IDs. + */ + zebra_nhg_connect_depends(newnhe, nhg_depends); + goto done; + } + + /* Prepare dependency relationships if this is not a + * singleton nexthop. There are two cases: a single + * recursive nexthop, where we need a relationship to the + * resolving nexthop; or a group of nexthops, where we need + * relationships with the corresponding singletons. + */ + zebra_nhg_depends_init(lookup); + + nh = newnhe->nhg.nexthop; + + if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)) + SET_FLAG(newnhe->flags, NEXTHOP_GROUP_VALID); + + if (nh->next == NULL) { + if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) { + /* Single recursive nexthop */ + handle_recursive_depend(&newnhe->nhg_depends, + nh->resolved, afi); + recursive = true; + } + } else { + /* List of nexthops */ + for (nh = newnhe->nhg.nexthop; nh; nh = nh->next) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: depends NH %pNHv %s", + __func__, nh, + CHECK_FLAG(nh->flags, + NEXTHOP_FLAG_RECURSIVE) ? + "(R)" : ""); + + depends_find_add(&newnhe->nhg_depends, nh, afi); + } + } + + if (recursive) + SET_FLAG((*nhe)->flags, NEXTHOP_GROUP_RECURSIVE); + + if (zebra_nhg_get_backup_nhg(newnhe) == NULL || + zebra_nhg_get_backup_nhg(newnhe)->nexthop == NULL) + goto done; + + /* If there are backup nexthops, add them to the backup + * depends tree. The rules here are a little different. + */ + recursive = false; + backup_nhe = newnhe->backup_info->nhe; + + nh = backup_nhe->nhg.nexthop; + + /* Singleton recursive NH */ + if (nh->next == NULL && + CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: backup depend NH %pNHv (R)", + __func__, nh); + + /* Single recursive nexthop */ + handle_recursive_depend(&backup_nhe->nhg_depends, + nh->resolved, afi); + recursive = true; + } else { + /* One or more backup NHs */ + for (; nh; nh = nh->next) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: backup depend NH %pNHv %s", + __func__, nh, + CHECK_FLAG(nh->flags, + NEXTHOP_FLAG_RECURSIVE) ? + "(R)" : ""); + + depends_find_add(&backup_nhe->nhg_depends, + nh, afi); + } + } + + if (recursive) + SET_FLAG(backup_nhe->flags, NEXTHOP_GROUP_RECURSIVE); + +done: + + return created; +} + +/* + * Lookup or create an nhe, based on an nhg or an nhe id. + */ static bool zebra_nhg_find(struct nhg_hash_entry **nhe, uint32_t id, struct nexthop_group *nhg, struct nhg_connected_tree_head *nhg_depends, vrf_id_t vrf_id, afi_t afi, int type) { struct nhg_hash_entry lookup = {}; - - uint32_t old_id_counter = id_counter; - bool created = false; - bool recursive = false; - /* - * If it has an id at this point, we must have gotten it from the kernel - */ - lookup.id = id ? id : ++id_counter; + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: id %u, nhg %p, vrf %d, type %d, depends %p", + __func__, id, nhg, vrf_id, type, + nhg_depends); + /* Use a temporary nhe and call into the superset/common code */ + lookup.id = id; lookup.type = type ? type : ZEBRA_ROUTE_NHG; lookup.nhg = *nhg; @@ -567,53 +823,8 @@ static bool zebra_nhg_find(struct nhg_hash_entry **nhe, uint32_t id, } } - if (id) - (*nhe) = zebra_nhg_lookup_id(id); - else - (*nhe) = hash_lookup(zrouter.nhgs, &lookup); - - /* If it found an nhe in our tables, this new ID is unused */ - if (*nhe) - id_counter = old_id_counter; - - if (!(*nhe)) { - /* Only hash/lookup the depends if the first lookup - * fails to find something. This should hopefully save a - * lot of cycles for larger ecmp sizes. - */ - if (nhg_depends) - /* If you don't want to hash on each nexthop in the - * nexthop group struct you can pass the depends - * directly. Kernel-side we do this since it just looks - * them up via IDs. - */ - lookup.nhg_depends = *nhg_depends; - else { - if (nhg->nexthop->next) { - zebra_nhg_depends_init(&lookup); - - /* If its a group, create a dependency tree */ - struct nexthop *nh = NULL; - - for (nh = nhg->nexthop; nh; nh = nh->next) - depends_find_add(&lookup.nhg_depends, - nh, afi); - } else if (CHECK_FLAG(nhg->nexthop->flags, - NEXTHOP_FLAG_RECURSIVE)) { - zebra_nhg_depends_init(&lookup); - handle_recursive_depend(&lookup.nhg_depends, - nhg->nexthop->resolved, - afi); - recursive = true; - } - } - - (*nhe) = hash_get(zrouter.nhgs, &lookup, zebra_nhg_hash_alloc); - created = true; + created = zebra_nhe_find(nhe, &lookup, nhg_depends, afi); - if (recursive) - SET_FLAG((*nhe)->flags, NEXTHOP_GROUP_RECURSIVE); - } return created; } @@ -629,6 +840,10 @@ zebra_nhg_find_nexthop(uint32_t id, struct nexthop *nh, afi_t afi, int type) zebra_nhg_find(&nhe, id, &nhg, NULL, vrf_id, afi, type); + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nh %pNHv => %p (%u)", + __func__, nh, nhe, nhe ? nhe->id : 0); + return nhe; } @@ -807,6 +1022,9 @@ done: static void zebra_nhg_release(struct nhg_hash_entry *nhe) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nhe %p (%u)", __func__, nhe, nhe->id); + /* Remove it from any lists it may be on */ zebra_nhg_depends_release(nhe); zebra_nhg_dependents_release(nhe); @@ -872,6 +1090,10 @@ static int nhg_ctx_process_new(struct nhg_ctx *ctx) lookup = zebra_nhg_lookup_id(id); + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: id %u, count %d, lookup => %p", + __func__, id, count, lookup); + if (lookup) { /* This is already present in our table, hence an update * that we did not initate. @@ -919,6 +1141,11 @@ static int nhg_ctx_process_new(struct nhg_ctx *ctx) */ kernel_nhe = zebra_nhg_copy(nhe, id); + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: copying kernel nhe (%u), dup of %u", + __func__, id, nhe->id); + zebra_nhg_insert_id(kernel_nhe); zebra_nhg_set_unhashable(kernel_nhe); } else if (zebra_nhg_contains_unhashable(nhe)) { @@ -926,10 +1153,18 @@ static int nhg_ctx_process_new(struct nhg_ctx *ctx) * depend, so lets mark this group as unhashable as well * and release it from the non-ID hash. */ + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nhe %p (%u) unhashable", + __func__, nhe, nhe->id); + hash_release(zrouter.nhgs, nhe); zebra_nhg_set_unhashable(nhe); } else { /* It actually created a new nhe */ + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nhe %p (%u) is new", + __func__, nhe, nhe->id); + SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID); SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED); } @@ -1038,6 +1273,10 @@ int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp, { struct nhg_ctx *ctx = NULL; + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nh %pNHv, id %u, count %d", + __func__, nh, id, (int)count); + if (id > id_counter) /* Increase our counter so we don't try to create * an ID that already exists @@ -1111,12 +1350,17 @@ static struct nhg_hash_entry *depends_find_singleton(const struct nexthop *nh, /* The copy may have allocated labels; free them if necessary. */ nexthop_del_labels(&lookup); + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nh %pNHv => %p (%u)", + __func__, nh, nhe, nhe ? nhe->id : 0); + return nhe; } static struct nhg_hash_entry *depends_find(const struct nexthop *nh, afi_t afi) { struct nhg_hash_entry *nhe = NULL; + char rbuf[10]; if (!nh) goto done; @@ -1124,10 +1368,18 @@ static struct nhg_hash_entry *depends_find(const struct nexthop *nh, afi_t afi) /* We are separating these functions out to increase handling speed * in the non-recursive case (by not alloc/freeing) */ - if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) + if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) { nhe = depends_find_recursive(nh, afi); - else + strlcpy(rbuf, "(R)", sizeof(rbuf)); + } else { nhe = depends_find_singleton(nh, afi); + rbuf[0] = '\0'; + } + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nh %pNHv %s => %p (%u)", + __func__, nh, rbuf, + nhe, nhe ? nhe->id : 0); done: return nhe; @@ -1136,6 +1388,10 @@ done: static void depends_add(struct nhg_connected_tree_head *head, struct nhg_hash_entry *depend) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: head %p nh %pNHv", + __func__, head, depend->nhg.nexthop); + /* If NULL is returned, it was successfully added and * needs to have its refcnt incremented. * @@ -1154,6 +1410,10 @@ depends_find_add(struct nhg_connected_tree_head *head, struct nexthop *nh, depend = depends_find(nh, afi); + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nh %pNHv => %p", + __func__, nh, depend); + if (depend) depends_add(head, depend); @@ -1179,7 +1439,7 @@ static void depends_decrement_free(struct nhg_connected_tree_head *head) nhg_connected_tree_free(head); } -/* Rib-side, you get a nexthop group struct */ +/* Find an nhe based on a list of nexthops */ struct nhg_hash_entry * zebra_nhg_rib_find(uint32_t id, struct nexthop_group *nhg, afi_t rt_afi) { @@ -1195,13 +1455,107 @@ zebra_nhg_rib_find(uint32_t id, struct nexthop_group *nhg, afi_t rt_afi) zebra_nhg_find(&nhe, id, nhg, NULL, vrf_id, rt_afi, 0); + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: => nhe %p (%u)", + __func__, nhe, nhe ? nhe->id : 0); + + return nhe; +} + +/* Find an nhe based on a route's nhe */ +struct nhg_hash_entry * +zebra_nhg_rib_find_nhe(struct nhg_hash_entry *rt_nhe, afi_t rt_afi) +{ + struct nhg_hash_entry *nhe = NULL; + + if (!(rt_nhe && rt_nhe->nhg.nexthop)) { + flog_err(EC_ZEBRA_TABLE_LOOKUP_FAILED, + "No nexthop passed to %s", __func__); + return NULL; + } + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: rt_nhe %p (%u)", + __func__, rt_nhe, + rt_nhe ? rt_nhe->id : 0); + + zebra_nhe_find(&nhe, rt_nhe, NULL, rt_afi); + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: => nhe %p (%u)", + __func__, nhe, nhe ? nhe->id : 0); + return nhe; } +/* + * Allocate backup nexthop info object. Typically these are embedded in + * nhg_hash_entry objects. + */ +struct nhg_backup_info *zebra_nhg_backup_alloc(void) +{ + struct nhg_backup_info *p; + + p = XCALLOC(MTYPE_NHG, sizeof(struct nhg_backup_info)); + + p->nhe = zebra_nhg_alloc(); + + /* Identify the embedded group used to hold the list of backups */ + SET_FLAG(p->nhe->flags, NEXTHOP_GROUP_BACKUP); + + return p; +} + +/* + * Free backup nexthop info object, deal with any embedded allocations + */ +void zebra_nhg_backup_free(struct nhg_backup_info **p) +{ + if (p && *p) { + if ((*p)->nhe) + zebra_nhg_free((*p)->nhe); + + XFREE(MTYPE_NHG, (*p)); + } +} + +/* Accessor for backup nexthop group */ +struct nexthop_group *zebra_nhg_get_backup_nhg(struct nhg_hash_entry *nhe) +{ + struct nexthop_group *p = NULL; + + if (nhe) { + if (nhe->backup_info && nhe->backup_info->nhe) + p = &(nhe->backup_info->nhe->nhg); + } + + return p; +} + +/* + * Helper to return a copy of a backup_info - note that this is a shallow + * copy, meant to be used when creating a new nhe from info passed in with + * a route e.g. + */ +static struct nhg_backup_info * +nhg_backup_copy(const struct nhg_backup_info *orig) +{ + struct nhg_backup_info *b; + + b = zebra_nhg_backup_alloc(); + + /* Copy list of nexthops */ + nexthop_group_copy(&(b->nhe->nhg), &(orig->nhe->nhg)); + + return b; +} + static void zebra_nhg_free_members(struct nhg_hash_entry *nhe) { nexthops_free(nhe->nhg.nexthop); + zebra_nhg_backup_free(&nhe->backup_info); + /* Decrement to remove connection ref */ nhg_connected_tree_decrement_ref(&nhe->nhg_depends); nhg_connected_tree_free(&nhe->nhg_depends); @@ -1210,6 +1564,21 @@ static void zebra_nhg_free_members(struct nhg_hash_entry *nhe) void zebra_nhg_free(struct nhg_hash_entry *nhe) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) { + /* Group or singleton? */ + if (nhe->nhg.nexthop && nhe->nhg.nexthop->next) + zlog_debug("%s: nhe %p (%u), refcnt %d", + __func__, nhe, + (nhe ? nhe->id : 0), + (nhe ? nhe->refcnt : 0)); + else + zlog_debug("%s: nhe %p (%u), refcnt %d, NH %pNHv", + __func__, nhe, + (nhe ? nhe->id : 0), + (nhe ? nhe->refcnt : 0), + nhe->nhg.nexthop); + } + if (nhe->refcnt) zlog_debug("nhe_id=%u hash refcnt=%d", nhe->id, nhe->refcnt); @@ -1225,6 +1594,11 @@ void zebra_nhg_hash_free(void *p) void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nhe %p (%u) %d => %d", + __func__, nhe, nhe->id, nhe->refcnt, + nhe->refcnt - 1); + nhe->refcnt--; if (!zebra_nhg_depends_is_empty(nhe)) @@ -1236,6 +1610,11 @@ void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe) void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: nhe %p (%u) %d => %d", + __func__, nhe, nhe->id, nhe->refcnt, + nhe->refcnt + 1); + nhe->refcnt++; if (!zebra_nhg_depends_is_empty(nhe)) @@ -1385,6 +1764,10 @@ static int nexthop_active(afi_t afi, struct route_entry *re, nexthop->resolved = NULL; re->nexthop_mtu = 0; + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: re %p, nexthop %pNHv", + __func__, re, nexthop); + /* * If the kernel has sent us a NEW route, then * by golly gee whiz it's a good route. @@ -1411,7 +1794,7 @@ static int nexthop_active(afi_t afi, struct route_entry *re, if (!ifp) { if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug( - "\t%s: Onlink and interface: %u[%u] does not exist", + " %s: Onlink and interface: %u[%u] does not exist", __func__, nexthop->ifindex, nexthop->vrf_id); return 0; @@ -1422,14 +1805,14 @@ static int nexthop_active(afi_t afi, struct route_entry *re, if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug( - "\t%s: Onlink and interface %s is not operative", + " %s: Onlink and interface %s is not operative", __func__, ifp->name); return 0; } if (!if_is_operative(ifp)) { if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug( - "\t%s: Interface %s is not unnumbered", + " %s: Interface %s is not unnumbered", __func__, ifp->name); return 0; } @@ -1441,7 +1824,7 @@ static int nexthop_active(afi_t afi, struct route_entry *re, && memcmp(&nexthop->gate.ipv6, &top->p.u.prefix6, 16) == 0)) { if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug( - "\t:%s: Attempting to install a max prefixlength route through itself", + " :%s: Attempting to install a max prefixlength route through itself", __func__); return 0; } @@ -1469,7 +1852,7 @@ static int nexthop_active(afi_t afi, struct route_entry *re, zvrf = zebra_vrf_lookup_by_id(nexthop->vrf_id); if (!table || !zvrf) { if (IS_ZEBRA_DEBUG_RIB_DETAILED) - zlog_debug("\t%s: Table not found", __func__); + zlog_debug(" %s: Table not found", __func__); return 0; } @@ -1487,7 +1870,7 @@ static int nexthop_active(afi_t afi, struct route_entry *re, || ((afi == AFI_IP6) && (rn->p.prefixlen != 128))) { if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug( - "\t%s: Matched against ourself and prefix length is not max bit length", + " %s: Matched against ourself and prefix length is not max bit length", __func__); return 0; } @@ -1500,7 +1883,7 @@ static int nexthop_active(afi_t afi, struct route_entry *re, && !rnh_resolve_via_default(zvrf, p.family)) { if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug( - "\t:%s: Resolved against default route", + " :%s: Resolved against default route", __func__); return 0; } @@ -1533,6 +1916,12 @@ static int nexthop_active(afi_t afi, struct route_entry *re, || nexthop->type == NEXTHOP_TYPE_IPV6) nexthop->ifindex = newhop->ifindex; } + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: CONNECT match %p (%u), newhop %pNHv", + __func__, match, + match->nhe->id, newhop); + return 1; } else if (CHECK_FLAG(re->flags, ZEBRA_FLAG_ALLOW_RECURSION)) { resolved = 0; @@ -1543,6 +1932,11 @@ static int nexthop_active(afi_t afi, struct route_entry *re, if (!nexthop_valid_resolve(nexthop, newhop)) continue; + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: RECURSIVE match %p (%u), newhop %pNHv", + __func__, match, + match->nhe->id, newhop); + SET_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE); nexthop_set_resolved(afi, newhop, nexthop); @@ -1552,8 +1946,9 @@ static int nexthop_active(afi_t afi, struct route_entry *re, re->nexthop_mtu = match->mtu; if (!resolved && IS_ZEBRA_DEBUG_RIB_DETAILED) - zlog_debug("\t%s: Recursion failed to find", - __func__); + zlog_debug( + " %s: Recursion failed to find", + __func__); return resolved; } else if (re->type == ZEBRA_ROUTE_STATIC) { resolved = 0; @@ -1564,6 +1959,11 @@ static int nexthop_active(afi_t afi, struct route_entry *re, if (!nexthop_valid_resolve(nexthop, newhop)) continue; + if (IS_ZEBRA_DEBUG_RIB_DETAILED) + zlog_debug("%s: STATIC match %p (%u), newhop %pNHv", + __func__, match, + match->nhe->id, newhop); + SET_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE); nexthop_set_resolved(afi, newhop, nexthop); @@ -1574,24 +1974,25 @@ static int nexthop_active(afi_t afi, struct route_entry *re, if (!resolved && IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug( - "\t%s: Static route unable to resolve", + " %s: Static route unable to resolve", __func__); return resolved; } else { if (IS_ZEBRA_DEBUG_RIB_DETAILED) { zlog_debug( - "\t%s: Route Type %s has not turned on recursion", + " %s: Route Type %s has not turned on recursion", __func__, zebra_route_string(re->type)); if (re->type == ZEBRA_ROUTE_BGP && !CHECK_FLAG(re->flags, ZEBRA_FLAG_IBGP)) zlog_debug( - "\tEBGP: see \"disable-ebgp-connected-route-check\" or \"disable-connected-check\""); + " EBGP: see \"disable-ebgp-connected-route-check\" or \"disable-connected-check\""); } return 0; } } if (IS_ZEBRA_DEBUG_RIB_DETAILED) - zlog_debug("\t%s: Nexthop did not lookup in table", __func__); + zlog_debug(" %s: Nexthop did not lookup in table", + __func__); return 0; } @@ -1681,9 +2082,10 @@ static unsigned nexthop_active_check(struct route_node *rn, default: break; } + if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) { if (IS_ZEBRA_DEBUG_RIB_DETAILED) - zlog_debug("\t%s: Unable to find a active nexthop", + zlog_debug(" %s: Unable to find active nexthop", __func__); return 0; } @@ -1713,7 +2115,7 @@ static unsigned nexthop_active_check(struct route_node *rn, zvrf = zebra_vrf_lookup_by_id(nexthop->vrf_id); if (!zvrf) { if (IS_ZEBRA_DEBUG_RIB_DETAILED) - zlog_debug("\t%s: zvrf is NULL", __func__); + zlog_debug(" %s: zvrf is NULL", __func__); return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); } @@ -1734,46 +2136,68 @@ static unsigned nexthop_active_check(struct route_node *rn, return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); } +/* Helper function called after resolution to walk nhg rb trees + * and toggle the NEXTHOP_GROUP_VALID flag if the nexthop + * is active on singleton NHEs. + */ +static bool zebra_nhg_set_valid_if_active(struct nhg_hash_entry *nhe) +{ + struct nhg_connected *rb_node_dep = NULL; + bool valid = false; + + if (!zebra_nhg_depends_is_empty(nhe)) { + /* Is at least one depend valid? */ + frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) { + if (zebra_nhg_set_valid_if_active(rb_node_dep->nhe)) + valid = true; + } + + goto done; + } + + /* should be fully resolved singleton at this point */ + if (CHECK_FLAG(nhe->nhg.nexthop->flags, NEXTHOP_FLAG_ACTIVE)) + valid = true; + +done: + if (valid) + SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID); + + return valid; +} + /* - * Iterate over all nexthops of the given RIB entry and refresh their - * ACTIVE flag. If any nexthop is found to toggle the ACTIVE flag, - * the whole re structure is flagged with ROUTE_ENTRY_CHANGED. - * - * Return value is the new number of active nexthops. + * Process a list of nexthops, given the head of the list, determining + * whether each one is ACTIVE/installable at this time. */ -int nexthop_active_update(struct route_node *rn, struct route_entry *re) +static uint32_t nexthop_list_active_update(struct route_node *rn, + struct route_entry *re, + struct nexthop *nexthop) { - struct nexthop_group new_grp = {}; - struct nexthop *nexthop; union g_addr prev_src; unsigned int prev_active, new_active; ifindex_t prev_index; - uint8_t curr_active = 0; + uint32_t counter = 0; - afi_t rt_afi = family2afi(rn->p.family); - - UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED); - - /* Copy over the nexthops in current state */ - nexthop_group_copy(&new_grp, &(re->nhe->nhg)); - - for (nexthop = new_grp.nexthop; nexthop; nexthop = nexthop->next) { + /* Process nexthops one-by-one */ + for ( ; nexthop; nexthop = nexthop->next) { /* No protocol daemon provides src and so we're skipping - * tracking it */ + * tracking it + */ prev_src = nexthop->rmap_src; prev_active = CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); prev_index = nexthop->ifindex; /* * We need to respect the multipath_num here * as that what we should be able to install from - * a multipath perpsective should not be a data plane + * a multipath perspective should not be a data plane * decision point. */ new_active = nexthop_active_check(rn, re, nexthop); - if (new_active && curr_active >= zrouter.multipath_num) { + if (new_active && counter >= zrouter.multipath_num) { struct nexthop *nh; /* Set it and its resolved nexthop as inactive. */ @@ -1784,7 +2208,7 @@ int nexthop_active_update(struct route_node *rn, struct route_entry *re) } if (new_active) - curr_active++; + counter++; /* Don't allow src setting on IPv6 addr for now */ if (prev_active != new_active || prev_index != nexthop->ifindex @@ -1800,48 +2224,122 @@ int nexthop_active_update(struct route_node *rn, struct route_entry *re) SET_FLAG(re->status, ROUTE_ENTRY_CHANGED); } + return counter; +} + +/* + * Iterate over all nexthops of the given RIB entry and refresh their + * ACTIVE flag. If any nexthop is found to toggle the ACTIVE flag, + * the whole re structure is flagged with ROUTE_ENTRY_CHANGED. + * + * Return value is the new number of active nexthops. + */ +int nexthop_active_update(struct route_node *rn, struct route_entry *re) +{ + struct nhg_hash_entry *curr_nhe; + uint32_t curr_active = 0, backup_active = 0; + + afi_t rt_afi = family2afi(rn->p.family); + + UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED); + + /* Make a local copy of the existing nhe, so we don't work on/modify + * the shared nhe. + */ + curr_nhe = zebra_nhg_copy(re->nhe, re->nhe->id); + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: re %p nhe %p (%u), curr_nhe %p", + __func__, re, re->nhe, re->nhe->id, + curr_nhe); + + /* Clear the existing id, if any: this will avoid any confusion + * if the id exists, and will also force the creation + * of a new nhe reflecting the changes we may make in this local copy. + */ + curr_nhe->id = 0; + + /* Process nexthops */ + curr_active = nexthop_list_active_update(rn, re, curr_nhe->nhg.nexthop); + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: re %p curr_active %u", __func__, re, + curr_active); + + /* If there are no backup nexthops, we are done */ + if (zebra_nhg_get_backup_nhg(curr_nhe) == NULL) + goto backups_done; + + backup_active = nexthop_list_active_update( + rn, re, zebra_nhg_get_backup_nhg(curr_nhe)->nexthop); + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: re %p backup_active %u", __func__, re, + backup_active); + +backups_done: + + /* + * Ref or create an nhe that matches the current state of the + * nexthop(s). + */ if (CHECK_FLAG(re->status, ROUTE_ENTRY_CHANGED)) { struct nhg_hash_entry *new_nhe = NULL; - new_nhe = zebra_nhg_rib_find(0, &new_grp, rt_afi); + new_nhe = zebra_nhg_rib_find_nhe(curr_nhe, rt_afi); + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: re %p CHANGED: nhe %p (%u) => new_nhe %p (%u)", + __func__, re, re->nhe, + re->nhe->id, new_nhe, new_nhe->id); route_entry_update_nhe(re, new_nhe); } - if (curr_active) { - struct nhg_hash_entry *nhe = NULL; - - nhe = zebra_nhg_lookup_id(re->nhe_id); - if (nhe) - SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID); - else - flog_err( - EC_ZEBRA_TABLE_LOOKUP_FAILED, - "Active update on NHE id=%u that we do not have in our tables", - re->nhe_id); - } + /* Walk the NHE depends tree and toggle NEXTHOP_GROUP_VALID + * flag where appropriate. + */ + if (curr_active) + zebra_nhg_set_valid_if_active(re->nhe); /* - * Do not need these nexthops anymore since they - * were either copied over into an nhe or not + * Do not need the old / copied nhe anymore since it + * was either copied over into a new nhe or not * used at all. */ - nexthops_free(new_grp.nexthop); + zebra_nhg_free(curr_nhe); return curr_active; } -/* Convert a nhe into a group array */ -uint8_t zebra_nhg_nhe2grp(struct nh_grp *grp, struct nhg_hash_entry *nhe, - int max_num) +/* Recursively construct a grp array of fully resolved IDs. + * + * This function allows us to account for groups within groups, + * by converting them into a flat array of IDs. + * + * nh_grp is modified at every level of recursion to append + * to it the next unique, fully resolved ID from the entire tree. + * + * + * Note: + * I'm pretty sure we only allow ONE level of group within group currently. + * But making this recursive just in case that ever changes. + */ +static uint8_t zebra_nhg_nhe2grp_internal(struct nh_grp *grp, + uint8_t curr_index, + struct nhg_hash_entry *nhe, + int max_num) { struct nhg_connected *rb_node_dep = NULL; struct nhg_hash_entry *depend = NULL; - uint8_t i = 0; + uint8_t i = curr_index; frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) { bool duplicate = false; + if (i >= max_num) + goto done; + depend = rb_node_dep->nhe; /* @@ -1858,27 +2356,78 @@ uint8_t zebra_nhg_nhe2grp(struct nh_grp *grp, struct nhg_hash_entry *nhe, } } - /* Check for duplicate IDs, kernel doesn't like that */ - for (int j = 0; j < i; j++) { - if (depend->id == grp[j].id) - duplicate = true; - } + if (!zebra_nhg_depends_is_empty(depend)) { + /* This is a group within a group */ + i = zebra_nhg_nhe2grp_internal(grp, i, depend, max_num); + } else { + if (!CHECK_FLAG(depend->flags, NEXTHOP_GROUP_VALID)) { + if (IS_ZEBRA_DEBUG_RIB_DETAILED + || IS_ZEBRA_DEBUG_NHG) + zlog_debug( + "%s: Nexthop ID (%u) not valid, not appending to dataplane install group", + __func__, depend->id); + continue; + } + + /* If the nexthop not installed/queued for install don't + * put in the ID array. + */ + if (!(CHECK_FLAG(depend->flags, NEXTHOP_GROUP_INSTALLED) + || CHECK_FLAG(depend->flags, + NEXTHOP_GROUP_QUEUED))) { + if (IS_ZEBRA_DEBUG_RIB_DETAILED + || IS_ZEBRA_DEBUG_NHG) + zlog_debug( + "%s: Nexthop ID (%u) not installed or queued for install, not appending to dataplane install group", + __func__, depend->id); + continue; + } + + /* Check for duplicate IDs, ignore if found. */ + for (int j = 0; j < i; j++) { + if (depend->id == grp[j].id) { + duplicate = true; + break; + } + } + + if (duplicate) { + if (IS_ZEBRA_DEBUG_RIB_DETAILED + || IS_ZEBRA_DEBUG_NHG) + zlog_debug( + "%s: Nexthop ID (%u) is duplicate, not appending to dataplane install group", + __func__, depend->id); + continue; + } - if (!duplicate) { grp[i].id = depend->id; - /* We aren't using weights for anything right now */ grp[i].weight = depend->nhg.nexthop->weight; i++; } - - if (i >= max_num) - goto done; } + if (nhe->backup_info == NULL || nhe->backup_info->nhe == NULL) + goto done; + + /* TODO -- For now, we are not trying to use or install any + * backup info in this nexthop-id path: we aren't prepared + * to use the backups here yet. We're just debugging what we find. + */ + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: skipping backup nhe", __func__); + done: return i; } +/* Convert a nhe into a group array */ +uint8_t zebra_nhg_nhe2grp(struct nh_grp *grp, struct nhg_hash_entry *nhe, + int max_num) +{ + /* Call into the recursive function */ + return zebra_nhg_nhe2grp_internal(grp, 0, nhe, max_num); +} + void zebra_nhg_install_kernel(struct nhg_hash_entry *nhe) { struct nhg_connected *rb_node_dep = NULL; @@ -1891,7 +2440,8 @@ void zebra_nhg_install_kernel(struct nhg_hash_entry *nhe) zebra_nhg_install_kernel(rb_node_dep->nhe); } - if (!CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED) + if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_VALID) + && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED) && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED)) { /* Change its type to us since we are installing it */ nhe->type = ZEBRA_ROUTE_NHG; @@ -1952,7 +2502,7 @@ void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx) id = dplane_ctx_get_nhe_id(ctx); - if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) + if (IS_ZEBRA_DEBUG_DPLANE_DETAIL || IS_ZEBRA_DEBUG_NHG_DETAIL) zlog_debug( "Nexthop dplane ctx %p, op %s, nexthop ID (%u), result %s", ctx, dplane_op2str(op), id, dplane_res2str(status)); diff --git a/zebra/zebra_nhg.h b/zebra/zebra_nhg.h index dc3a47c020..0a9e97ab48 100644 --- a/zebra/zebra_nhg.h +++ b/zebra/zebra_nhg.h @@ -50,6 +50,9 @@ struct nhg_hash_entry { struct nexthop_group nhg; + /* If supported, a mapping of backup nexthops. */ + struct nhg_backup_info *backup_info; + /* If this is not a group, it * will be a single nexthop * and must have an interface @@ -72,6 +75,7 @@ struct nhg_hash_entry { * faster with ID's. */ struct nhg_connected_tree_head nhg_depends, nhg_dependents; + /* * Is this nexthop group valid, ie all nexthops are fully resolved. * What is fully resolved? It's a nexthop that is either self contained @@ -102,11 +106,25 @@ struct nhg_hash_entry { * from the kernel. Therefore, it is unhashable. */ #define NEXTHOP_GROUP_UNHASHABLE (1 << 4) + +/* + * Backup nexthop support - identify groups that are backups for + * another group. + */ +#define NEXTHOP_GROUP_BACKUP (1 << 5) + }; /* Was this one we created, either this session or previously? */ #define ZEBRA_NHG_CREATED(NHE) ((NHE->type) == ZEBRA_ROUTE_NHG) +/* + * Backup nexthops: this is a group object itself, so + * that the backup nexthops can use the same code as a normal object. + */ +struct nhg_backup_info { + struct nhg_hash_entry *nhe; +}; enum nhg_ctx_op_e { NHG_CTX_OP_NONE = 0, @@ -162,13 +180,26 @@ bool zebra_nhg_kernel_nexthops_enabled(void); /** * NHE abstracted tree functions. - * Use these where possible instead of the direct ones access ones. + * Use these where possible instead of direct access. */ struct nhg_hash_entry *zebra_nhg_alloc(void); void zebra_nhg_free(struct nhg_hash_entry *nhe); /* In order to clear a generic hash, we need a generic api, sigh. */ void zebra_nhg_hash_free(void *p); +/* Init an nhe, for use in a hash lookup for example. There's some fuzziness + * if the nhe represents only a single nexthop, so we try to capture that + * variant also. + */ +void zebra_nhe_init(struct nhg_hash_entry *nhe, afi_t afi, + const struct nexthop *nh); + +/* Allocate, free backup nexthop info objects */ +struct nhg_backup_info *zebra_nhg_backup_alloc(void); +void zebra_nhg_backup_free(struct nhg_backup_info **p); + +struct nexthop_group *zebra_nhg_get_backup_nhg(struct nhg_hash_entry *nhe); + extern struct nhg_hash_entry *zebra_nhg_resolve(struct nhg_hash_entry *nhe); extern unsigned int zebra_nhg_depends_count(const struct nhg_hash_entry *nhe); @@ -203,10 +234,14 @@ extern int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, /* Del via kernel */ extern int zebra_nhg_kernel_del(uint32_t id, vrf_id_t vrf_id); -/* Find via route creation */ +/* Find an nhe based on a nexthop_group */ extern struct nhg_hash_entry * zebra_nhg_rib_find(uint32_t id, struct nexthop_group *nhg, afi_t rt_afi); +/* Find an nhe based on a route's nhe, used during route creation */ +struct nhg_hash_entry * +zebra_nhg_rib_find_nhe(struct nhg_hash_entry *rt_nhe, afi_t rt_afi); + /* Reference counter functions */ extern void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe); extern void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe); diff --git a/zebra/zebra_northbound.c b/zebra/zebra_northbound.c new file mode 100644 index 0000000000..9f6514e12f --- /dev/null +++ b/zebra/zebra_northbound.c @@ -0,0 +1,2212 @@ +/* + * Zebra northbound implementation. + * + * Copyright (C) 2019 Network Device Education Foundation, Inc. ("NetDEF") + * Rafael Zalamena + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301 USA. + */ + +#include <zebra.h> + +#include "lib/command.h" +#include "lib/log.h" +#include "lib/northbound.h" +#include "lib/routemap.h" + +#include "zebra/rib.h" + +/* + * XPath: /frr-zebra:zebra/mcast-rpf-lookup + */ +static int zebra_mcast_rpf_lookup_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/ip-forwarding + */ +static int zebra_ip_forwarding_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_ip_forwarding_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/ipv6-forwarding + */ +static int zebra_ipv6_forwarding_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_ipv6_forwarding_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/workqueue-hold-timer + */ +static int zebra_workqueue_hold_timer_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/zapi-packets + */ +static int zebra_zapi_packets_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/import-kernel-table/table-id + */ +static int +zebra_import_kernel_table_table_id_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int +zebra_import_kernel_table_table_id_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/import-kernel-table/distance + */ +static int +zebra_import_kernel_table_distance_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/import-kernel-table/route-map + */ +static int +zebra_import_kernel_table_route_map_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int +zebra_import_kernel_table_route_map_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/allow-external-route-update + */ +static int +zebra_allow_external_route_update_create(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int +zebra_allow_external_route_update_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/dplane-queue-limit + */ +static int zebra_dplane_queue_limit_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/vrf-vni-mapping + */ +static int zebra_vrf_vni_mapping_create(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_vrf_vni_mapping_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/vrf-vni-mapping/vni-id + */ +static int zebra_vrf_vni_mapping_vni_id_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_vrf_vni_mapping_vni_id_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/vrf-vni-mapping/prefix-only + */ +static int +zebra_vrf_vni_mapping_prefix_only_create(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int +zebra_vrf_vni_mapping_prefix_only_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-events + */ +static int zebra_debugs_debug_events_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_debugs_debug_events_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-zapi-send + */ +static int zebra_debugs_debug_zapi_send_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_debugs_debug_zapi_send_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-zapi-recv + */ +static int zebra_debugs_debug_zapi_recv_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_debugs_debug_zapi_recv_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-zapi-detail + */ +static int zebra_debugs_debug_zapi_detail_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_debugs_debug_zapi_detail_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-kernel + */ +static int zebra_debugs_debug_kernel_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_debugs_debug_kernel_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-kernel-msg-send + */ +static int +zebra_debugs_debug_kernel_msg_send_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int +zebra_debugs_debug_kernel_msg_send_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-kernel-msg-recv + */ +static int +zebra_debugs_debug_kernel_msg_recv_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int +zebra_debugs_debug_kernel_msg_recv_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-rib + */ +static int zebra_debugs_debug_rib_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_debugs_debug_rib_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-rib-detail + */ +static int zebra_debugs_debug_rib_detail_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_debugs_debug_rib_detail_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-fpm + */ +static int zebra_debugs_debug_fpm_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_debugs_debug_fpm_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-nht + */ +static int zebra_debugs_debug_nht_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_debugs_debug_nht_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-nht-detail + */ +static int zebra_debugs_debug_nht_detail_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_debugs_debug_nht_detail_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-mpls + */ +static int zebra_debugs_debug_mpls_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_debugs_debug_mpls_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-vxlan + */ +static int zebra_debugs_debug_vxlan_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_debugs_debug_vxlan_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-pw + */ +static int zebra_debugs_debug_pw_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_debugs_debug_pw_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-dplane + */ +static int zebra_debugs_debug_dplane_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_debugs_debug_dplane_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-dplane-detail + */ +static int zebra_debugs_debug_dplane_detail_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int +zebra_debugs_debug_dplane_detail_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:zebra/debugs/debug-mlag + */ +static int zebra_debugs_debug_mlag_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int zebra_debugs_debug_mlag_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-route-information + */ +static int get_route_information_rpc(const char *xpath, + const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-v6-mroute-info + */ +static int get_v6_mroute_info_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-vrf-info + */ +static int get_vrf_info_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-vrf-vni-info + */ +static int get_vrf_vni_info_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-evpn-info + */ +static int get_evpn_info_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-vni-info + */ +static int get_vni_info_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-evpn-vni-rmac + */ +static int get_evpn_vni_rmac_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-evpn-vni-nexthops + */ +static int get_evpn_vni_nexthops_rpc(const char *xpath, + const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:clear-evpn-dup-addr + */ +static int clear_evpn_dup_addr_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-evpn-macs + */ +static int get_evpn_macs_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-evpn-arp-cache + */ +static int get_evpn_arp_cache_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-pbr-ipset + */ +static int get_pbr_ipset_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-pbr-iptable + */ +static int get_pbr_iptable_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-zebra:get-debugs + */ +static int get_debugs_rpc(const char *xpath, const struct list *input, + struct list *output) +{ + /* TODO: implement me. */ + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/ip4-addr-list + */ +static int +lib_interface_zebra_ip4_addr_list_create(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int +lib_interface_zebra_ip4_addr_list_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/ip4-addr-list/ip4-peer + */ +static int +lib_interface_zebra_ip4_addr_list_ip4_peer_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int +lib_interface_zebra_ip4_addr_list_ip4_peer_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/ip4-addr-list/label + */ +static int +lib_interface_zebra_ip4_addr_list_label_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int +lib_interface_zebra_ip4_addr_list_label_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/ip6-addr-list + */ +static int +lib_interface_zebra_ip6_addr_list_create(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int +lib_interface_zebra_ip6_addr_list_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/ip6-addr-list/label + */ +static int +lib_interface_zebra_ip6_addr_list_label_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int +lib_interface_zebra_ip6_addr_list_label_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/multicast + */ +static int lib_interface_zebra_multicast_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int lib_interface_zebra_multicast_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/link-detect + */ +static int lib_interface_zebra_link_detect_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int lib_interface_zebra_link_detect_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/shutdown + */ +static int lib_interface_zebra_shutdown_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int lib_interface_zebra_shutdown_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: /frr-interface:lib/interface/frr-zebra:zebra/bandwidth + */ +static int lib_interface_zebra_bandwidth_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +static int lib_interface_zebra_bandwidth_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_ERR_NOT_FOUND; +} + +/* + * XPath: + * /frr-route-map:lib/route-map/entry/match-condition/frr-zebra:ipv4-prefix-length + */ +static int lib_route_map_entry_match_condition_ipv4_prefix_length_modify( + enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource) +{ + struct routemap_hook_context *rhc; + const char *length; + int condition, rv; + + if (event != NB_EV_APPLY) + return NB_OK; + + /* Add configuration. */ + rhc = nb_running_get_entry(dnode, NULL, true); + length = yang_dnode_get_string(dnode, NULL); + condition = yang_dnode_get_enum(dnode, "../frr-route-map:condition"); + + /* Set destroy information. */ + switch (condition) { + case 100: /* ipv4-prefix-length */ + rhc->rhc_rule = "ip address prefix-len"; + break; + + case 102: /* ipv4-next-hop-prefix-length */ + rhc->rhc_rule = "ip next-hop prefix-len"; + break; + } + rhc->rhc_mhook = generic_match_delete; + rhc->rhc_event = RMAP_EVENT_MATCH_DELETED; + + rv = generic_match_add(NULL, rhc->rhc_rmi, rhc->rhc_rule, length, + RMAP_EVENT_MATCH_ADDED); + if (rv != CMD_SUCCESS) { + rhc->rhc_mhook = NULL; + return NB_ERR_INCONSISTENCY; + } + + return NB_OK; +} + +static int lib_route_map_entry_match_condition_ipv4_prefix_length_destroy( + enum nb_event event, const struct lyd_node *dnode) +{ + return lib_route_map_entry_match_destroy(event, dnode); +} + +/* + * XPath: + * /frr-route-map:lib/route-map/entry/match-condition/frr-zebra:ipv6-prefix-length + */ +static int lib_route_map_entry_match_condition_ipv6_prefix_length_modify( + enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource) +{ + struct routemap_hook_context *rhc; + const char *length; + int rv; + + if (event != NB_EV_APPLY) + return NB_OK; + + /* Add configuration. */ + rhc = nb_running_get_entry(dnode, NULL, true); + length = yang_dnode_get_string(dnode, NULL); + + /* Set destroy information. */ + rhc->rhc_mhook = generic_match_delete; + rhc->rhc_rule = "ipv6 address prefix-len"; + rhc->rhc_event = RMAP_EVENT_MATCH_DELETED; + + rv = generic_match_add(NULL, rhc->rhc_rmi, "ipv6 address prefix-len", + length, RMAP_EVENT_MATCH_ADDED); + if (rv != CMD_SUCCESS) { + rhc->rhc_mhook = NULL; + return NB_ERR_INCONSISTENCY; + } + + return NB_OK; +} + +static int lib_route_map_entry_match_condition_ipv6_prefix_length_destroy( + enum nb_event event, const struct lyd_node *dnode) +{ + return lib_route_map_entry_match_destroy(event, dnode); +} + +/* + * XPath: + * /frr-route-map:lib/route-map/entry/match-condition/frr-zebra:source-protocol + */ +static int lib_route_map_entry_match_condition_source_protocol_modify( + enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource) +{ + struct routemap_hook_context *rhc; + const char *type; + int rv; + + switch (event) { + case NB_EV_VALIDATE: + type = yang_dnode_get_string(dnode, NULL); + if (proto_name2num(type) == -1) { + zlog_warn("%s: invalid protocol: %s", __func__, type); + return NB_ERR_VALIDATION; + } + return NB_OK; + case NB_EV_PREPARE: + case NB_EV_ABORT: + return NB_OK; + case NB_EV_APPLY: + /* NOTHING */ + break; + } + + /* Add configuration. */ + rhc = nb_running_get_entry(dnode, NULL, true); + type = yang_dnode_get_string(dnode, NULL); + + /* Set destroy information. */ + rhc->rhc_mhook = generic_match_delete; + rhc->rhc_rule = "source-protocol"; + rhc->rhc_event = RMAP_EVENT_MATCH_DELETED; + + rv = generic_match_add(NULL, rhc->rhc_rmi, "source-protocol", type, + RMAP_EVENT_MATCH_ADDED); + if (rv != CMD_SUCCESS) { + rhc->rhc_mhook = NULL; + return NB_ERR_INCONSISTENCY; + } + + return NB_OK; +} + +static int lib_route_map_entry_match_condition_source_protocol_destroy( + enum nb_event event, const struct lyd_node *dnode) +{ + return lib_route_map_entry_match_destroy(event, dnode); +} + +/* + * XPath: + * /frr-route-map:lib/route-map/entry/match-condition/frr-zebra:source-instance + */ +static int lib_route_map_entry_match_condition_source_instance_modify( + enum nb_event event, const struct lyd_node *dnode, + union nb_resource *resource) +{ + struct routemap_hook_context *rhc; + const char *type; + int rv; + + if (event != NB_EV_APPLY) + return NB_OK; + + /* Add configuration. */ + rhc = nb_running_get_entry(dnode, NULL, true); + type = yang_dnode_get_string(dnode, NULL); + + /* Set destroy information. */ + rhc->rhc_mhook = generic_match_delete; + rhc->rhc_rule = "source-instance"; + rhc->rhc_event = RMAP_EVENT_MATCH_DELETED; + + rv = generic_match_add(NULL, rhc->rhc_rmi, "source-instance", type, + RMAP_EVENT_MATCH_ADDED); + if (rv != CMD_SUCCESS) { + rhc->rhc_mhook = NULL; + return NB_ERR_INCONSISTENCY; + } + + return NB_OK; +} + +static int lib_route_map_entry_match_condition_source_instance_destroy( + enum nb_event event, const struct lyd_node *dnode) +{ + return lib_route_map_entry_match_destroy(event, dnode); +} + +/* + * XPath: /frr-route-map:lib/route-map/entry/set-action/frr-zebra:source-v4 + */ +static int +lib_route_map_entry_set_action_source_v4_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + struct routemap_hook_context *rhc; + struct interface *pif = NULL; + const char *source; + struct vrf *vrf; + struct prefix p; + int rv; + + switch (event) { + case NB_EV_VALIDATE: + memset(&p, 0, sizeof(p)); + yang_dnode_get_ipv4p(&p, dnode, NULL); + if (zebra_check_addr(&p) == 0) { + zlog_warn("%s: invalid IPv4 address: %s", __func__, + yang_dnode_get_string(dnode, NULL)); + return NB_ERR_VALIDATION; + } + + RB_FOREACH(vrf, vrf_id_head, &vrfs_by_id) { + pif = if_lookup_exact_address(&p.u.prefix4, AF_INET, + vrf->vrf_id); + if (pif != NULL) + break; + } + if (pif == NULL) { + zlog_warn("%s: is not a local adddress: %s", __func__, + yang_dnode_get_string(dnode, NULL)); + return NB_ERR_VALIDATION; + } + return NB_OK; + case NB_EV_PREPARE: + case NB_EV_ABORT: + return NB_OK; + case NB_EV_APPLY: + /* NOTHING */ + break; + } + + /* Add configuration. */ + rhc = nb_running_get_entry(dnode, NULL, true); + source = yang_dnode_get_string(dnode, NULL); + + /* Set destroy information. */ + rhc->rhc_shook = generic_set_delete; + rhc->rhc_rule = "src"; + + rv = generic_set_add(NULL, rhc->rhc_rmi, "src", source); + if (rv != CMD_SUCCESS) { + rhc->rhc_shook = NULL; + return NB_ERR_INCONSISTENCY; + } + + return NB_OK; +} + +static int +lib_route_map_entry_set_action_source_v4_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + return lib_route_map_entry_set_destroy(event, dnode); +} + +/* + * XPath: /frr-route-map:lib/route-map/entry/set-action/frr-zebra:source-v6 + */ +static int +lib_route_map_entry_set_action_source_v6_modify(enum nb_event event, + const struct lyd_node *dnode, + union nb_resource *resource) +{ + struct routemap_hook_context *rhc; + struct interface *pif = NULL; + const char *source; + struct vrf *vrf; + struct prefix p; + int rv; + + switch (event) { + case NB_EV_VALIDATE: + memset(&p, 0, sizeof(p)); + yang_dnode_get_ipv6p(&p, dnode, NULL); + if (zebra_check_addr(&p) == 0) { + zlog_warn("%s: invalid IPv6 address: %s", __func__, + yang_dnode_get_string(dnode, NULL)); + return NB_ERR_VALIDATION; + } + + RB_FOREACH(vrf, vrf_id_head, &vrfs_by_id) { + pif = if_lookup_exact_address(&p.u.prefix6, AF_INET6, + vrf->vrf_id); + if (pif != NULL) + break; + } + if (pif == NULL) { + zlog_warn("%s: is not a local adddress: %s", __func__, + yang_dnode_get_string(dnode, NULL)); + return NB_ERR_VALIDATION; + } + return NB_OK; + case NB_EV_PREPARE: + case NB_EV_ABORT: + return NB_OK; + case NB_EV_APPLY: + /* NOTHING */ + break; + } + + /* Add configuration. */ + rhc = nb_running_get_entry(dnode, NULL, true); + source = yang_dnode_get_string(dnode, NULL); + + /* Set destroy information. */ + rhc->rhc_shook = generic_set_delete; + rhc->rhc_rule = "src"; + + rv = generic_set_add(NULL, rhc->rhc_rmi, "src", source); + if (rv != CMD_SUCCESS) { + rhc->rhc_shook = NULL; + return NB_ERR_INCONSISTENCY; + } + + return NB_OK; +} + +static int +lib_route_map_entry_set_action_source_v6_destroy(enum nb_event event, + const struct lyd_node *dnode) +{ + return lib_route_map_entry_set_destroy(event, dnode); +} + +/* clang-format off */ +const struct frr_yang_module_info frr_zebra_info = { + .name = "frr-zebra", + .nodes = { + { + .xpath = "/frr-zebra:zebra/mcast-rpf-lookup", + .cbs = { + .modify = zebra_mcast_rpf_lookup_modify, + } + }, + { + .xpath = "/frr-zebra:zebra/ip-forwarding", + .cbs = { + .modify = zebra_ip_forwarding_modify, + .destroy = zebra_ip_forwarding_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/ipv6-forwarding", + .cbs = { + .modify = zebra_ipv6_forwarding_modify, + .destroy = zebra_ipv6_forwarding_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/workqueue-hold-timer", + .cbs = { + .modify = zebra_workqueue_hold_timer_modify, + } + }, + { + .xpath = "/frr-zebra:zebra/zapi-packets", + .cbs = { + .modify = zebra_zapi_packets_modify, + } + }, + { + .xpath = "/frr-zebra:zebra/import-kernel-table/table-id", + .cbs = { + .modify = zebra_import_kernel_table_table_id_modify, + .destroy = zebra_import_kernel_table_table_id_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/import-kernel-table/distance", + .cbs = { + .modify = zebra_import_kernel_table_distance_modify, + } + }, + { + .xpath = "/frr-zebra:zebra/import-kernel-table/route-map", + .cbs = { + .modify = zebra_import_kernel_table_route_map_modify, + .destroy = zebra_import_kernel_table_route_map_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/allow-external-route-update", + .cbs = { + .create = zebra_allow_external_route_update_create, + .destroy = zebra_allow_external_route_update_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/dplane-queue-limit", + .cbs = { + .modify = zebra_dplane_queue_limit_modify, + } + }, + { + .xpath = "/frr-zebra:zebra/vrf-vni-mapping", + .cbs = { + .create = zebra_vrf_vni_mapping_create, + .destroy = zebra_vrf_vni_mapping_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/vrf-vni-mapping/vni-id", + .cbs = { + .modify = zebra_vrf_vni_mapping_vni_id_modify, + .destroy = zebra_vrf_vni_mapping_vni_id_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/vrf-vni-mapping/prefix-only", + .cbs = { + .create = zebra_vrf_vni_mapping_prefix_only_create, + .destroy = zebra_vrf_vni_mapping_prefix_only_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-events", + .cbs = { + .modify = zebra_debugs_debug_events_modify, + .destroy = zebra_debugs_debug_events_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-zapi-send", + .cbs = { + .modify = zebra_debugs_debug_zapi_send_modify, + .destroy = zebra_debugs_debug_zapi_send_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-zapi-recv", + .cbs = { + .modify = zebra_debugs_debug_zapi_recv_modify, + .destroy = zebra_debugs_debug_zapi_recv_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-zapi-detail", + .cbs = { + .modify = zebra_debugs_debug_zapi_detail_modify, + .destroy = zebra_debugs_debug_zapi_detail_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-kernel", + .cbs = { + .modify = zebra_debugs_debug_kernel_modify, + .destroy = zebra_debugs_debug_kernel_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-kernel-msg-send", + .cbs = { + .modify = zebra_debugs_debug_kernel_msg_send_modify, + .destroy = zebra_debugs_debug_kernel_msg_send_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-kernel-msg-recv", + .cbs = { + .modify = zebra_debugs_debug_kernel_msg_recv_modify, + .destroy = zebra_debugs_debug_kernel_msg_recv_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-rib", + .cbs = { + .modify = zebra_debugs_debug_rib_modify, + .destroy = zebra_debugs_debug_rib_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-rib-detail", + .cbs = { + .modify = zebra_debugs_debug_rib_detail_modify, + .destroy = zebra_debugs_debug_rib_detail_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-fpm", + .cbs = { + .modify = zebra_debugs_debug_fpm_modify, + .destroy = zebra_debugs_debug_fpm_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-nht", + .cbs = { + .modify = zebra_debugs_debug_nht_modify, + .destroy = zebra_debugs_debug_nht_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-nht-detail", + .cbs = { + .modify = zebra_debugs_debug_nht_detail_modify, + .destroy = zebra_debugs_debug_nht_detail_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-mpls", + .cbs = { + .modify = zebra_debugs_debug_mpls_modify, + .destroy = zebra_debugs_debug_mpls_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-vxlan", + .cbs = { + .modify = zebra_debugs_debug_vxlan_modify, + .destroy = zebra_debugs_debug_vxlan_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-pw", + .cbs = { + .modify = zebra_debugs_debug_pw_modify, + .destroy = zebra_debugs_debug_pw_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-dplane", + .cbs = { + .modify = zebra_debugs_debug_dplane_modify, + .destroy = zebra_debugs_debug_dplane_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-dplane-detail", + .cbs = { + .modify = zebra_debugs_debug_dplane_detail_modify, + .destroy = zebra_debugs_debug_dplane_detail_destroy, + } + }, + { + .xpath = "/frr-zebra:zebra/debugs/debug-mlag", + .cbs = { + .modify = zebra_debugs_debug_mlag_modify, + .destroy = zebra_debugs_debug_mlag_destroy, + } + }, + { + .xpath = "/frr-zebra:get-route-information", + .cbs = { + .rpc = get_route_information_rpc, + } + }, + { + .xpath = "/frr-zebra:get-v6-mroute-info", + .cbs = { + .rpc = get_v6_mroute_info_rpc, + } + }, + { + .xpath = "/frr-zebra:get-vrf-info", + .cbs = { + .rpc = get_vrf_info_rpc, + } + }, + { + .xpath = "/frr-zebra:get-vrf-vni-info", + .cbs = { + .rpc = get_vrf_vni_info_rpc, + } + }, + { + .xpath = "/frr-zebra:get-evpn-info", + .cbs = { + .rpc = get_evpn_info_rpc, + } + }, + { + .xpath = "/frr-zebra:get-vni-info", + .cbs = { + .rpc = get_vni_info_rpc, + } + }, + { + .xpath = "/frr-zebra:get-evpn-vni-rmac", + .cbs = { + .rpc = get_evpn_vni_rmac_rpc, + } + }, + { + .xpath = "/frr-zebra:get-evpn-vni-nexthops", + .cbs = { + .rpc = get_evpn_vni_nexthops_rpc, + } + }, + { + .xpath = "/frr-zebra:clear-evpn-dup-addr", + .cbs = { + .rpc = clear_evpn_dup_addr_rpc, + } + }, + { + .xpath = "/frr-zebra:get-evpn-macs", + .cbs = { + .rpc = get_evpn_macs_rpc, + } + }, + { + .xpath = "/frr-zebra:get-evpn-arp-cache", + .cbs = { + .rpc = get_evpn_arp_cache_rpc, + } + }, + { + .xpath = "/frr-zebra:get-pbr-ipset", + .cbs = { + .rpc = get_pbr_ipset_rpc, + } + }, + { + .xpath = "/frr-zebra:get-pbr-iptable", + .cbs = { + .rpc = get_pbr_iptable_rpc, + } + }, + { + .xpath = "/frr-zebra:get-debugs", + .cbs = { + .rpc = get_debugs_rpc, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/ip4-addr-list", + .cbs = { + .create = lib_interface_zebra_ip4_addr_list_create, + .destroy = lib_interface_zebra_ip4_addr_list_destroy, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/ip4-addr-list/ip4-peer", + .cbs = { + .modify = lib_interface_zebra_ip4_addr_list_ip4_peer_modify, + .destroy = lib_interface_zebra_ip4_addr_list_ip4_peer_destroy, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/ip4-addr-list/label", + .cbs = { + .modify = lib_interface_zebra_ip4_addr_list_label_modify, + .destroy = lib_interface_zebra_ip4_addr_list_label_destroy, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/ip6-addr-list", + .cbs = { + .create = lib_interface_zebra_ip6_addr_list_create, + .destroy = lib_interface_zebra_ip6_addr_list_destroy, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/ip6-addr-list/label", + .cbs = { + .modify = lib_interface_zebra_ip6_addr_list_label_modify, + .destroy = lib_interface_zebra_ip6_addr_list_label_destroy, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/multicast", + .cbs = { + .modify = lib_interface_zebra_multicast_modify, + .destroy = lib_interface_zebra_multicast_destroy, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/link-detect", + .cbs = { + .modify = lib_interface_zebra_link_detect_modify, + .destroy = lib_interface_zebra_link_detect_destroy, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/shutdown", + .cbs = { + .modify = lib_interface_zebra_shutdown_modify, + .destroy = lib_interface_zebra_shutdown_destroy, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-zebra:zebra/bandwidth", + .cbs = { + .modify = lib_interface_zebra_bandwidth_modify, + .destroy = lib_interface_zebra_bandwidth_destroy, + } + }, + { + .xpath = "/frr-route-map:lib/route-map/entry/match-condition/frr-zebra:ipv4-prefix-length", + .cbs = { + .modify = lib_route_map_entry_match_condition_ipv4_prefix_length_modify, + .destroy = lib_route_map_entry_match_condition_ipv4_prefix_length_destroy, + } + }, + { + .xpath = "/frr-route-map:lib/route-map/entry/match-condition/frr-zebra:ipv6-prefix-length", + .cbs = { + .modify = lib_route_map_entry_match_condition_ipv6_prefix_length_modify, + .destroy = lib_route_map_entry_match_condition_ipv6_prefix_length_destroy, + } + }, + { + .xpath = "/frr-route-map:lib/route-map/entry/match-condition/frr-zebra:source-protocol", + .cbs = { + .modify = lib_route_map_entry_match_condition_source_protocol_modify, + .destroy = lib_route_map_entry_match_condition_source_protocol_destroy, + } + }, + { + .xpath = "/frr-route-map:lib/route-map/entry/match-condition/frr-zebra:source-instance", + .cbs = { + .modify = lib_route_map_entry_match_condition_source_instance_modify, + .destroy = lib_route_map_entry_match_condition_source_instance_destroy, + } + }, + { + .xpath = "/frr-route-map:lib/route-map/entry/set-action/frr-zebra:source-v4", + .cbs = { + .modify = lib_route_map_entry_set_action_source_v4_modify, + .destroy = lib_route_map_entry_set_action_source_v4_destroy, + } + }, + { + .xpath = "/frr-route-map:lib/route-map/entry/set-action/frr-zebra:source-v6", + .cbs = { + .modify = lib_route_map_entry_set_action_source_v6_modify, + .destroy = lib_route_map_entry_set_action_source_v6_destroy, + } + }, + { + .xpath = NULL, + }, + } +}; diff --git a/zebra/zebra_ptm_redistribute.c b/zebra/zebra_ptm_redistribute.c index 01d5114b9f..eabc2e005e 100644 --- a/zebra/zebra_ptm_redistribute.c +++ b/zebra/zebra_ptm_redistribute.c @@ -59,6 +59,9 @@ static int zsend_interface_bfd_update(int cmd, struct zserv *client, stream_put(s, &sp->u.prefix, blen); stream_putc(s, sp->prefixlen); + /* c-bit bullshit */ + stream_putc(s, 0); + /* Write packet size. */ stream_putw_at(s, 0, stream_get_endp(s)); diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index 29d59b515f..2dbe907751 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -112,7 +112,7 @@ static const struct { /* no entry/default: 150 */ }; -static void __attribute__((format(printf, 5, 6))) +static void PRINTFRR(5, 6) _rnode_zlog(const char *_func, vrf_id_t vrf_id, struct route_node *rn, int priority, const char *msgfmt, ...) { @@ -213,7 +213,7 @@ static void route_entry_attach_ref(struct route_entry *re, int route_entry_update_nhe(struct route_entry *re, struct nhg_hash_entry *new) { - struct nhg_hash_entry *old = NULL; + struct nhg_hash_entry *old; int ret = 0; if (new == NULL) { @@ -223,7 +223,7 @@ int route_entry_update_nhe(struct route_entry *re, struct nhg_hash_entry *new) goto done; } - if (re->nhe_id != new->id) { + if ((re->nhe_id != 0) && (re->nhe_id != new->id)) { old = re->nhe; route_entry_attach_ref(re, new); @@ -261,7 +261,7 @@ struct route_entry *rib_match(afi_t afi, safi_t safi, vrf_id_t vrf_id, p.prefixlen = IPV6_MAX_PREFIXLEN; } - rn = route_node_match(table, (struct prefix *)&p); + rn = route_node_match(table, &p); while (rn) { rib_dest_t *dest; @@ -348,8 +348,8 @@ struct route_entry *rib_match_ipv4_multicast(vrf_id_t vrf_id, char buf[BUFSIZ]; inet_ntop(AF_INET, &addr, buf, BUFSIZ); - zlog_debug("%s: %s: vrf: %u found %s, using %s", - __func__, buf, vrf_id, + zlog_debug("%s: %s: vrf: %s(%u) found %s, using %s", __func__, + buf, vrf_id_to_name(vrf_id), vrf_id, mre ? (ure ? "MRIB+URIB" : "MRIB") : ure ? "URIB" : "nothing", re == ure ? "URIB" : re == mre ? "MRIB" : "none"); @@ -659,13 +659,14 @@ void zebra_rib_evaluate_rn_nexthops(struct route_node *rn, uint32_t seq) char buf1[PREFIX_STRLEN]; char buf2[PREFIX_STRLEN]; - zlog_debug("%u:%s has Nexthop(%s) Type: %s depending on it, evaluating %u:%u", - zvrf->vrf->vrf_id, - srcdest_rnode2str(rn, buf1, - sizeof(buf1)), - prefix2str(p, buf2, sizeof(buf2)), - rnh_type2str(rnh->type), - seq, rnh->seqno); + zlog_debug( + "%s(%u):%s has Nexthop(%s) Type: %s depending on it, evaluating %u:%u", + zvrf_name(zvrf), zvrf_id(zvrf), + srcdest_rnode2str(rn, buf1, + sizeof(buf1)), + prefix2str(p, buf2, sizeof(buf2)), + rnh_type2str(rnh->type), seq, + rnh->seqno); } /* @@ -753,8 +754,8 @@ static void rib_process_add_fib(struct zebra_vrf *zvrf, struct route_node *rn, if (IS_ZEBRA_DEBUG_RIB) { char buf[SRCDEST2STR_BUFFER]; srcdest_rnode2str(rn, buf, sizeof(buf)); - zlog_debug("%u:%s: Adding route rn %p, re %p (%s)", - zvrf_id(zvrf), buf, rn, new, + zlog_debug("%s(%u):%s: Adding route rn %p, re %p (%s)", + zvrf_name(zvrf), zvrf_id(zvrf), buf, rn, new, zebra_route_string(new->type)); } @@ -776,8 +777,8 @@ static void rib_process_del_fib(struct zebra_vrf *zvrf, struct route_node *rn, if (IS_ZEBRA_DEBUG_RIB) { char buf[SRCDEST2STR_BUFFER]; srcdest_rnode2str(rn, buf, sizeof(buf)); - zlog_debug("%u:%s: Deleting route rn %p, re %p (%s)", - zvrf_id(zvrf), buf, rn, old, + zlog_debug("%s(%u):%s: Deleting route rn %p, re %p (%s)", + zvrf_name(zvrf), zvrf_id(zvrf), buf, rn, old, zebra_route_string(old->type)); } @@ -829,15 +830,17 @@ static void rib_process_update_fib(struct zebra_vrf *zvrf, srcdest_rnode2str(rn, buf, sizeof(buf)); if (new != old) zlog_debug( - "%u:%s: Updating route rn %p, re %p (%s) old %p (%s)", - zvrf_id(zvrf), buf, rn, new, + "%s(%u):%s: Updating route rn %p, re %p (%s) old %p (%s)", + zvrf_name(zvrf), zvrf_id(zvrf), + buf, rn, new, zebra_route_string(new->type), old, zebra_route_string(old->type)); else zlog_debug( - "%u:%s: Updating route rn %p, re %p (%s)", - zvrf_id(zvrf), buf, rn, new, + "%s(%u):%s: Updating route rn %p, re %p (%s)", + zvrf_name(zvrf), zvrf_id(zvrf), + buf, rn, new, zebra_route_string(new->type)); } @@ -867,15 +870,17 @@ static void rib_process_update_fib(struct zebra_vrf *zvrf, srcdest_rnode2str(rn, buf, sizeof(buf)); if (new != old) zlog_debug( - "%u:%s: Deleting route rn %p, re %p (%s) old %p (%s) - nexthop inactive", - zvrf_id(zvrf), buf, rn, new, + "%s(%u):%s: Deleting route rn %p, re %p (%s) old %p (%s) - nexthop inactive", + zvrf_name(zvrf), zvrf_id(zvrf), + buf, rn, new, zebra_route_string(new->type), old, zebra_route_string(old->type)); else zlog_debug( - "%u:%s: Deleting route rn %p, re %p (%s) - nexthop inactive", - zvrf_id(zvrf), buf, rn, new, + "%s(%u):%s: Deleting route rn %p, re %p (%s) - nexthop inactive", + zvrf_name(zvrf), zvrf_id(zvrf), + buf, rn, new, zebra_route_string(new->type)); } @@ -990,6 +995,7 @@ static void rib_process(struct route_node *rn) char buf[SRCDEST2STR_BUFFER]; rib_dest_t *dest; struct zebra_vrf *zvrf = NULL; + struct vrf *vrf; const struct prefix *p, *src_p; srcdest_rnode_prefixes(rn, &p, &src_p); @@ -1003,11 +1009,14 @@ static void rib_process(struct route_node *rn) vrf_id = zvrf_id(zvrf); } + vrf = vrf_lookup_by_id(vrf_id); + if (IS_ZEBRA_DEBUG_RIB) srcdest_rnode2str(rn, buf, sizeof(buf)); if (IS_ZEBRA_DEBUG_RIB_DETAILED) - zlog_debug("%u:%s: Processing rn %p", vrf_id, buf, rn); + zlog_debug("%s(%u):%s: Processing rn %p", VRF_LOGNAME(vrf), + vrf_id, buf, rn); /* * we can have rn's that have a NULL info pointer @@ -1021,10 +1030,10 @@ static void rib_process(struct route_node *rn) RNODE_FOREACH_RE_SAFE (rn, re, next) { if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug( - "%u:%s: Examine re %p (%s) status %x flags %x dist %d metric %d", - vrf_id, buf, re, zebra_route_string(re->type), - re->status, re->flags, re->distance, - re->metric); + "%s(%u):%s: Examine re %p (%s) status %x flags %x dist %d metric %d", + VRF_LOGNAME(vrf), vrf_id, buf, re, + zebra_route_string(re->type), re->status, + re->flags, re->distance, re->metric); /* Currently selected re. */ if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED)) { @@ -1065,9 +1074,11 @@ static void rib_process(struct route_node *rn) if (re != old_selected) { if (IS_ZEBRA_DEBUG_RIB) zlog_debug( - "%s: %u:%s: imported via import-table but denied " + "%s: %s(%u):%s: imported via import-table but denied " "by the ip protocol table route-map", - __func__, vrf_id, buf); + __func__, + VRF_LOGNAME(vrf), + vrf_id, buf); rib_unlink(rn, re); } else SET_FLAG(re->status, @@ -1118,9 +1129,9 @@ static void rib_process(struct route_node *rn) if (IS_ZEBRA_DEBUG_RIB_DETAILED) { zlog_debug( - "%u:%s: After processing: old_selected %p new_selected %p old_fib %p new_fib %p", - vrf_id, buf, (void *)old_selected, (void *)new_selected, - (void *)old_fib, (void *)new_fib); + "%s(%u):%s: After processing: old_selected %p new_selected %p old_fib %p new_fib %p", + VRF_LOGNAME(vrf), vrf_id, buf, (void *)old_selected, + (void *)new_selected, (void *)old_fib, (void *)new_fib); } /* Buffer ROUTE_ENTRY_CHANGED here, because it will get cleared if @@ -1191,8 +1202,8 @@ static void zebra_rib_evaluate_mpls(struct route_node *rn) if (CHECK_FLAG(dest->flags, RIB_DEST_UPDATE_LSPS)) { if (IS_ZEBRA_DEBUG_MPLS) zlog_debug( - "%u: Scheduling all LSPs upon RIB completion", - zvrf_id(zvrf)); + "%s(%u): Scheduling all LSPs upon RIB completion", + zvrf_name(zvrf), zvrf_id(zvrf)); zebra_mpls_lsp_schedule(zvrf); mpls_unmark_lsps_for_processing(rn); } @@ -1299,6 +1310,9 @@ static bool rib_update_re_from_ctx(struct route_entry *re, bool is_selected = false; /* Is 're' currently the selected re? */ bool changed_p = false; /* Change to nexthops? */ rib_dest_t *dest; + struct vrf *vrf; + + vrf = vrf_lookup_by_id(re->vrf_id); /* Note well: only capturing the prefix string if debug is enabled here; * unconditional log messages will have to generate the string. @@ -1311,8 +1325,9 @@ static bool rib_update_re_from_ctx(struct route_entry *re, is_selected = (re == dest->selected_fib); if (IS_ZEBRA_DEBUG_RIB_DETAILED) - zlog_debug("update_from_ctx: %u:%s: %sSELECTED", - re->vrf_id, dest_str, (is_selected ? "" : "NOT ")); + zlog_debug("update_from_ctx: %s(%u):%s: %sSELECTED", + VRF_LOGNAME(vrf), re->vrf_id, dest_str, + (is_selected ? "" : "NOT ")); /* Update zebra's nexthop FIB flag for each nexthop that was installed. * If the installed set differs from the set requested by the rib/owner, @@ -1325,11 +1340,10 @@ static bool rib_update_re_from_ctx(struct route_entry *re, * Let's assume the nexthops are ordered here to save time. */ if (nexthop_group_equal(&re->fib_ng, dplane_ctx_get_ng(ctx)) == false) { - if (IS_ZEBRA_DEBUG_RIB_DETAILED) { + if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug( - "%u:%s update_from_ctx: notif nh and fib nh mismatch", - re->vrf_id, dest_str); - } + "%s(%u):%s update_from_ctx: notif nh and fib nh mismatch", + VRF_LOGNAME(vrf), re->vrf_id, dest_str); matched = false; } else @@ -1338,8 +1352,9 @@ static bool rib_update_re_from_ctx(struct route_entry *re, /* If the new FIB set matches the existing FIB set, we're done. */ if (matched) { if (IS_ZEBRA_DEBUG_RIB) - zlog_debug("%u:%s update_from_ctx(): existing fib nhg, no change", - re->vrf_id, dest_str); + zlog_debug( + "%s(%u):%s update_from_ctx(): existing fib nhg, no change", + VRF_LOGNAME(vrf), re->vrf_id, dest_str); goto done; } else if (re->fib_ng.nexthop) { @@ -1347,8 +1362,9 @@ static bool rib_update_re_from_ctx(struct route_entry *re, * Free stale fib list and move on to check the rib nhg. */ if (IS_ZEBRA_DEBUG_RIB) - zlog_debug("%u:%s update_from_ctx(): replacing fib nhg", - re->vrf_id, dest_str); + zlog_debug( + "%s(%u):%s update_from_ctx(): replacing fib nhg", + VRF_LOGNAME(vrf), re->vrf_id, dest_str); nexthops_free(re->fib_ng.nexthop); re->fib_ng.nexthop = NULL; @@ -1356,8 +1372,8 @@ static bool rib_update_re_from_ctx(struct route_entry *re, changed_p = true; } else { if (IS_ZEBRA_DEBUG_RIB) - zlog_debug("%u:%s update_from_ctx(): no fib nhg", - re->vrf_id, dest_str); + zlog_debug("%s(%u):%s update_from_ctx(): no fib nhg", + VRF_LOGNAME(vrf), re->vrf_id, dest_str); } /* @@ -1437,9 +1453,10 @@ static bool rib_update_re_from_ctx(struct route_entry *re, /* If all nexthops were processed, we're done */ if (matched) { if (IS_ZEBRA_DEBUG_RIB) - zlog_debug("%u:%s update_from_ctx(): rib nhg matched, changed '%s'", - re->vrf_id, dest_str, - (changed_p ? "true" : "false")); + zlog_debug( + "%s(%u):%s update_from_ctx(): rib nhg matched, changed '%s'", + VRF_LOGNAME(vrf), re->vrf_id, dest_str, + (changed_p ? "true" : "false")); goto done; } @@ -1449,9 +1466,10 @@ no_nexthops: * create a fib-specific nexthop-group */ if (IS_ZEBRA_DEBUG_RIB) - zlog_debug("%u:%s update_from_ctx(): changed %s, adding new fib nhg", - re->vrf_id, dest_str, - (changed_p ? "true" : "false")); + zlog_debug( + "%s(%u):%s update_from_ctx(): changed %s, adding new fib nhg", + VRF_LOGNAME(vrf), re->vrf_id, dest_str, + (changed_p ? "true" : "false")); ctxnhg = dplane_ctx_get_ng(ctx); @@ -1489,10 +1507,12 @@ rib_find_rn_from_ctx(const struct zebra_dplane_ctx *ctx) dplane_ctx_get_vrf(ctx), dplane_ctx_get_table(ctx)); if (table == NULL) { if (IS_ZEBRA_DEBUG_DPLANE) { - zlog_debug("Failed to find route for ctx: no table for afi %d, safi %d, vrf %u", - dplane_ctx_get_afi(ctx), - dplane_ctx_get_safi(ctx), - dplane_ctx_get_vrf(ctx)); + zlog_debug( + "Failed to find route for ctx: no table for afi %d, safi %d, vrf %s(%u)", + dplane_ctx_get_afi(ctx), + dplane_ctx_get_safi(ctx), + vrf_id_to_name(dplane_ctx_get_vrf(ctx)), + dplane_ctx_get_vrf(ctx)); } goto done; } @@ -1515,6 +1535,7 @@ done: static void rib_process_result(struct zebra_dplane_ctx *ctx) { struct zebra_vrf *zvrf = NULL; + struct vrf *vrf; struct route_node *rn = NULL; struct route_entry *re = NULL, *old_re = NULL, *rib; bool is_update = false; @@ -1526,6 +1547,7 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx) bool fib_changed = false; zvrf = vrf_info_lookup(dplane_ctx_get_vrf(ctx)); + vrf = vrf_lookup_by_id(dplane_ctx_get_vrf(ctx)); dest_pfx = dplane_ctx_get_dest(ctx); /* Note well: only capturing the prefix string if debug is enabled here; @@ -1538,8 +1560,10 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx) rn = rib_find_rn_from_ctx(ctx); if (rn == NULL) { if (IS_ZEBRA_DEBUG_DPLANE) { - zlog_debug("Failed to process dplane results: no route for %u:%s", - dplane_ctx_get_vrf(ctx), dest_str); + zlog_debug( + "Failed to process dplane results: no route for %s(%u):%s", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + dest_str); } goto done; } @@ -1550,9 +1574,10 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx) status = dplane_ctx_get_status(ctx); if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) - zlog_debug("%u:%s Processing dplane ctx %p, op %s result %s", - dplane_ctx_get_vrf(ctx), dest_str, ctx, - dplane_op2str(op), dplane_res2str(status)); + zlog_debug( + "%s(%u):%s Processing dplane ctx %p, op %s result %s", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), dest_str, + ctx, dplane_op2str(op), dplane_res2str(status)); /* * Update is a bit of a special case, where we may have both old and new @@ -1590,9 +1615,10 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx) if (re) { if (re->dplane_sequence != seq) { if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) - zlog_debug("%u:%s Stale dplane result for re %p", - dplane_ctx_get_vrf(ctx), - dest_str, re); + zlog_debug( + "%s(%u):%s Stale dplane result for re %p", + VRF_LOGNAME(vrf), + dplane_ctx_get_vrf(ctx), dest_str, re); } else UNSET_FLAG(re->status, ROUTE_ENTRY_QUEUED); } @@ -1600,9 +1626,11 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx) if (old_re) { if (old_re->dplane_sequence != dplane_ctx_get_old_seq(ctx)) { if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) - zlog_debug("%u:%s Stale dplane result for old_re %p", - dplane_ctx_get_vrf(ctx), - dest_str, old_re); + zlog_debug( + "%s(%u):%s Stale dplane result for old_re %p", + VRF_LOGNAME(vrf), + dplane_ctx_get_vrf(ctx), dest_str, + old_re); } else UNSET_FLAG(old_re->status, ROUTE_ENTRY_QUEUED); } @@ -1639,10 +1667,11 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx) if (!fib_changed) { if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) - zlog_debug("%u:%s no fib change for re", - dplane_ctx_get_vrf( - ctx), - dest_str); + zlog_debug( + "%s(%u):%s no fib change for re", + VRF_LOGNAME(vrf), + dplane_ctx_get_vrf(ctx), + dest_str); } /* Redistribute */ @@ -1677,10 +1706,10 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx) zsend_route_notify_owner(re, dest_pfx, ZAPI_ROUTE_FAIL_INSTALL); - zlog_warn("%u:%s: Route install failed", - dplane_ctx_get_vrf(ctx), - prefix2str(dest_pfx, - dest_str, sizeof(dest_str))); + zlog_warn("%s(%u):%s: Route install failed", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + prefix2str(dest_pfx, dest_str, + sizeof(dest_str))); } break; case DPLANE_OP_ROUTE_DELETE: @@ -1706,10 +1735,10 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx) zsend_route_notify_owner_ctx(ctx, ZAPI_ROUTE_REMOVE_FAIL); - zlog_warn("%u:%s: Route Deletion failure", - dplane_ctx_get_vrf(ctx), - prefix2str(dest_pfx, - dest_str, sizeof(dest_str))); + zlog_warn("%s(%u):%s: Route Deletion failure", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + prefix2str(dest_pfx, dest_str, + sizeof(dest_str))); } /* @@ -1747,6 +1776,7 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) { struct route_node *rn = NULL; struct route_entry *re = NULL; + struct vrf *vrf; struct nexthop *nexthop; char dest_str[PREFIX_STRLEN] = ""; const struct prefix *dest_pfx, *src_pfx; @@ -1755,6 +1785,7 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) bool debug_p = IS_ZEBRA_DEBUG_DPLANE | IS_ZEBRA_DEBUG_RIB; int start_count, end_count; dest_pfx = dplane_ctx_get_dest(ctx); + vrf = vrf_lookup_by_id(dplane_ctx_get_vrf(ctx)); /* Note well: only capturing the prefix string if debug is enabled here; * unconditional log messages will have to generate the string. @@ -1766,8 +1797,10 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) rn = rib_find_rn_from_ctx(ctx); if (rn == NULL) { if (debug_p) { - zlog_debug("Failed to process dplane notification: no routes for %u:%s", - dplane_ctx_get_vrf(ctx), dest_str); + zlog_debug( + "Failed to process dplane notification: no routes for %s(%u):%s", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + dest_str); } goto done; } @@ -1776,8 +1809,9 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) srcdest_rnode_prefixes(rn, &dest_pfx, &src_pfx); if (debug_p) - zlog_debug("%u:%s Processing dplane notif ctx %p", - dplane_ctx_get_vrf(ctx), dest_str, ctx); + zlog_debug("%s(%u):%s Processing dplane notif ctx %p", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), dest_str, + ctx); /* * Take a pass through the routes, look for matches with the context @@ -1791,10 +1825,11 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) /* No match? Nothing we can do */ if (re == NULL) { if (debug_p) - zlog_debug("%u:%s Unable to process dplane notification: no entry for type %s", - dplane_ctx_get_vrf(ctx), dest_str, - zebra_route_string( - dplane_ctx_get_type(ctx))); + zlog_debug( + "%s(%u):%s Unable to process dplane notification: no entry for type %s", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + dest_str, + zebra_route_string(dplane_ctx_get_type(ctx))); goto done; } @@ -1824,17 +1859,21 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) if (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED)) UNSET_FLAG(re->status, ROUTE_ENTRY_INSTALLED); if (debug_p) - zlog_debug("%u:%s dplane notif, uninstalled type %s route", - dplane_ctx_get_vrf(ctx), dest_str, - zebra_route_string( - dplane_ctx_get_type(ctx))); + zlog_debug( + "%s(%u):%s dplane notif, uninstalled type %s route", + VRF_LOGNAME(vrf), + dplane_ctx_get_vrf(ctx), dest_str, + zebra_route_string( + dplane_ctx_get_type(ctx))); } else { /* At least report on the event. */ if (debug_p) - zlog_debug("%u:%s dplane notif, but type %s not selected_fib", - dplane_ctx_get_vrf(ctx), dest_str, - zebra_route_string( - dplane_ctx_get_type(ctx))); + zlog_debug( + "%s(%u):%s dplane notif, but type %s not selected_fib", + VRF_LOGNAME(vrf), + dplane_ctx_get_vrf(ctx), dest_str, + zebra_route_string( + dplane_ctx_get_type(ctx))); } goto done; } @@ -1859,8 +1898,10 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) if (!fib_changed) { if (debug_p) - zlog_debug("%u:%s dplane notification: rib_update returns FALSE", - dplane_ctx_get_vrf(ctx), dest_str); + zlog_debug( + "%s(%u):%s dplane notification: rib_update returns FALSE", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + dest_str); } /* @@ -1879,8 +1920,10 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) */ if (start_count > 0 && end_count > 0) { if (debug_p) - zlog_debug("%u:%s applied nexthop changes from dplane notification", - dplane_ctx_get_vrf(ctx), dest_str); + zlog_debug( + "%s(%u):%s applied nexthop changes from dplane notification", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + dest_str); /* Changed nexthops - update kernel/others */ dplane_route_notif_update(rn, re, @@ -1888,8 +1931,10 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) } else if (start_count == 0 && end_count > 0) { if (debug_p) - zlog_debug("%u:%s installed transition from dplane notification", - dplane_ctx_get_vrf(ctx), dest_str); + zlog_debug( + "%s(%u):%s installed transition from dplane notification", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + dest_str); /* We expect this to be the selected route, so we want * to tell others about this transition. @@ -1904,8 +1949,10 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) } else if (start_count > 0 && end_count == 0) { if (debug_p) - zlog_debug("%u:%s un-installed transition from dplane notification", - dplane_ctx_get_vrf(ctx), dest_str); + zlog_debug( + "%s(%u):%s un-installed transition from dplane notification", + VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), + dest_str); /* Transition from _something_ installed to _nothing_ * installed. @@ -1970,8 +2017,8 @@ static void process_subq_route(struct listnode *lnode, uint8_t qindex) char buf[SRCDEST2STR_BUFFER]; srcdest_rnode2str(rnode, buf, sizeof(buf)); - zlog_debug("%u:%s: rn %p dequeued from sub-queue %u", - zvrf ? zvrf_id(zvrf) : 0, buf, rnode, qindex); + zlog_debug("%s(%u):%s: rn %p dequeued from sub-queue %u", + zvrf_name(zvrf), zvrf_id(zvrf), buf, rnode, qindex); } if (rnode->info) @@ -2338,7 +2385,6 @@ static void rib_addnode(struct route_node *rn, void rib_unlink(struct route_node *rn, struct route_entry *re) { rib_dest_t *dest; - struct nhg_hash_entry *nhe = NULL; assert(rn && re); @@ -2353,11 +2399,10 @@ void rib_unlink(struct route_node *rn, struct route_entry *re) if (dest->selected_fib == re) dest->selected_fib = NULL; - if (re->nhe_id) { - nhe = zebra_nhg_lookup_id(re->nhe_id); - if (nhe) - zebra_nhg_decrement_ref(nhe); - } else if (re->nhe->nhg.nexthop) + if (re->nhe && re->nhe_id) { + assert(re->nhe->id == re->nhe_id); + zebra_nhg_decrement_ref(re->nhe); + } else if (re->nhe && re->nhe->nhg.nexthop) nexthops_free(re->nhe->nhg.nexthop); nexthops_free(re->fib_ng.nexthop); @@ -2385,9 +2430,9 @@ void rib_delnode(struct route_node *rn, struct route_entry *re) if (IS_ZEBRA_DEBUG_RIB) { char buf[SRCDEST2STR_BUFFER]; srcdest_rnode2str(rn, buf, sizeof(buf)); - zlog_debug("%u:%s: Freeing route rn %p, re %p (%s)", - re->vrf_id, buf, rn, re, - zebra_route_string(re->type)); + zlog_debug("%s(%u):%s: Freeing route rn %p, re %p (%s)", + vrf_id_to_name(re->vrf_id), re->vrf_id, buf, + rn, re, zebra_route_string(re->type)); } rib_unlink(rn, re); @@ -2396,11 +2441,75 @@ void rib_delnode(struct route_node *rn, struct route_entry *re) } } +/* + * Helper that debugs a single nexthop within a route-entry + */ +static void _route_entry_dump_nh(const struct route_entry *re, + const char *straddr, + const struct nexthop *nexthop) +{ + char nhname[PREFIX_STRLEN]; + char backup_str[50]; + char wgt_str[50]; + struct interface *ifp; + struct vrf *vrf = vrf_lookup_by_id(nexthop->vrf_id); + + switch (nexthop->type) { + case NEXTHOP_TYPE_BLACKHOLE: + sprintf(nhname, "Blackhole"); + break; + case NEXTHOP_TYPE_IFINDEX: + ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id); + sprintf(nhname, "%s", ifp ? ifp->name : "Unknown"); + break; + case NEXTHOP_TYPE_IPV4: + /* fallthrough */ + case NEXTHOP_TYPE_IPV4_IFINDEX: + inet_ntop(AF_INET, &nexthop->gate, nhname, INET6_ADDRSTRLEN); + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + inet_ntop(AF_INET6, &nexthop->gate, nhname, INET6_ADDRSTRLEN); + break; + } + + backup_str[0] = '\0'; + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) { + snprintf(backup_str, sizeof(backup_str), "backup %d,", + (int)nexthop->backup_idx); + } + + wgt_str[0] = '\0'; + if (nexthop->weight) + snprintf(wgt_str, sizeof(wgt_str), "wgt %d,", nexthop->weight); + + zlog_debug("%s: %s %s[%u] vrf %s(%u) %s%s with flags %s%s%s%s%s", + straddr, (nexthop->rparent ? " NH" : "NH"), nhname, + nexthop->ifindex, vrf ? vrf->name : "Unknown", + nexthop->vrf_id, + wgt_str, backup_str, + (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE) + ? "ACTIVE " + : ""), + (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED) + ? "FIB " + : ""), + (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE) + ? "RECURSIVE " + : ""), + (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK) + ? "ONLINK " + : ""), + (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE) + ? "DUPLICATE " + : "")); + +} + /* This function dumps the contents of a given RE entry into * standard debug log. Calling function name and IP prefix in * question are passed as 1st and 2nd arguments. */ - void _route_entry_dump(const char *func, union prefixconstptr pp, union prefixconstptr src_pp, const struct route_entry *re) @@ -2409,9 +2518,9 @@ void _route_entry_dump(const char *func, union prefixconstptr pp, bool is_srcdst = src_p && src_p->prefixlen; char straddr[PREFIX_STRLEN]; char srcaddr[PREFIX_STRLEN]; - char nhname[PREFIX_STRLEN]; struct nexthop *nexthop; struct vrf *vrf = vrf_lookup_by_id(re->vrf_id); + struct nexthop_group *nhg; zlog_debug("%s: dumping RE entry %p for %s%s%s vrf %s(%u)", func, (const void *)re, prefix2str(pp, straddr, sizeof(straddr)), @@ -2422,78 +2531,48 @@ void _route_entry_dump(const char *func, union prefixconstptr pp, zlog_debug("%s: uptime == %lu, type == %u, instance == %d, table == %d", straddr, (unsigned long)re->uptime, re->type, re->instance, re->table); - zlog_debug( - "%s: metric == %u, mtu == %u, distance == %u, flags == %u, status == %u", - straddr, re->metric, re->mtu, re->distance, re->flags, re->status); + zlog_debug("%s: metric == %u, mtu == %u, distance == %u, flags == %u, status == %u", + straddr, re->metric, re->mtu, re->distance, re->flags, + re->status); zlog_debug("%s: nexthop_num == %u, nexthop_active_num == %u", straddr, nexthop_group_nexthop_num(&(re->nhe->nhg)), nexthop_group_active_nexthop_num(&(re->nhe->nhg))); - for (ALL_NEXTHOPS(re->nhe->nhg, nexthop)) { - struct interface *ifp; - struct vrf *vrf = vrf_lookup_by_id(nexthop->vrf_id); + /* Dump nexthops */ + for (ALL_NEXTHOPS(re->nhe->nhg, nexthop)) + _route_entry_dump_nh(re, straddr, nexthop); - switch (nexthop->type) { - case NEXTHOP_TYPE_BLACKHOLE: - sprintf(nhname, "Blackhole"); - break; - case NEXTHOP_TYPE_IFINDEX: - ifp = if_lookup_by_index(nexthop->ifindex, - nexthop->vrf_id); - sprintf(nhname, "%s", ifp ? ifp->name : "Unknown"); - break; - case NEXTHOP_TYPE_IPV4: - /* fallthrough */ - case NEXTHOP_TYPE_IPV4_IFINDEX: - inet_ntop(AF_INET, &nexthop->gate, nhname, - INET6_ADDRSTRLEN); - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - inet_ntop(AF_INET6, &nexthop->gate, nhname, - INET6_ADDRSTRLEN); - break; - } - zlog_debug("%s: %s %s[%u] vrf %s(%u) with flags %s%s%s%s%s", - straddr, (nexthop->rparent ? " NH" : "NH"), nhname, - nexthop->ifindex, vrf ? vrf->name : "Unknown", - nexthop->vrf_id, - (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE) - ? "ACTIVE " - : ""), - (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED) - ? "FIB " - : ""), - (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE) - ? "RECURSIVE " - : ""), - (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK) - ? "ONLINK " - : ""), - (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE) - ? "DUPLICATE " - : "")); + if (zebra_nhg_get_backup_nhg(re->nhe)) { + zlog_debug("%s: backup nexthops:", straddr); + + nhg = zebra_nhg_get_backup_nhg(re->nhe); + for (ALL_NEXTHOPS_PTR(nhg, nexthop)) + _route_entry_dump_nh(re, straddr, nexthop); } + zlog_debug("%s: dump complete", straddr); } -/* This is an exported helper to rtm_read() to dump the strange +/* + * This is an exported helper to rtm_read() to dump the strange * RE entry found by rib_lookup_ipv4_route() */ - void rib_lookup_and_dump(struct prefix_ipv4 *p, vrf_id_t vrf_id) { struct route_table *table; struct route_node *rn; struct route_entry *re; + struct vrf *vrf; char prefix_buf[INET_ADDRSTRLEN]; + vrf = vrf_lookup_by_id(vrf_id); + /* Lookup table. */ table = zebra_vrf_table(AFI_IP, SAFI_UNICAST, vrf_id); if (!table) { flog_err(EC_ZEBRA_TABLE_LOOKUP_FAILED, - "%s:%u zebra_vrf_table() returned NULL", __func__, - vrf_id); + "%s:%s(%u) zebra_vrf_table() returned NULL", __func__, + VRF_LOGNAME(vrf), vrf_id); return; } @@ -2502,7 +2581,8 @@ void rib_lookup_and_dump(struct prefix_ipv4 *p, vrf_id_t vrf_id) /* No route for this prefix. */ if (!rn) { - zlog_debug("%s:%u lookup failed for %s", __func__, vrf_id, + zlog_debug("%s:%s(%u) lookup failed for %s", __func__, + VRF_LOGNAME(vrf), vrf_id, prefix2str((struct prefix *)p, prefix_buf, sizeof(prefix_buf))); return; @@ -2513,9 +2593,8 @@ void rib_lookup_and_dump(struct prefix_ipv4 *p, vrf_id_t vrf_id) /* let's go */ RNODE_FOREACH_RE (rn, re) { - zlog_debug("%s:%u rn %p, re %p: %s, %s", - __func__, vrf_id, - (void *)rn, (void *)re, + zlog_debug("%s:%s(%u) rn %p, re %p: %s, %s", __func__, + VRF_LOGNAME(vrf), vrf_id, (void *)rn, (void *)re, (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED) ? "removed" : "NOT removed"), @@ -2538,9 +2617,11 @@ void rib_lookup_and_pushup(struct prefix_ipv4 *p, vrf_id_t vrf_id) rib_dest_t *dest; if (NULL == (table = zebra_vrf_table(AFI_IP, SAFI_UNICAST, vrf_id))) { + struct vrf *vrf = vrf_lookup_by_id(vrf_id); + flog_err(EC_ZEBRA_TABLE_LOOKUP_FAILED, - "%s:%u zebra_vrf_table() returned NULL", __func__, - vrf_id); + "%s:%s(%u) zebra_vrf_table() returned NULL", __func__, + VRF_LOGNAME(vrf), vrf_id); return; } @@ -2563,10 +2644,13 @@ void rib_lookup_and_pushup(struct prefix_ipv4 *p, vrf_id_t vrf_id) if (dest->selected_fib) { if (IS_ZEBRA_DEBUG_RIB) { char buf[PREFIX_STRLEN]; + struct vrf *vrf = + vrf_lookup_by_id(dest->selected_fib->vrf_id); - zlog_debug("%u:%s: freeing way for connected prefix", - dest->selected_fib->vrf_id, - prefix2str(&rn->p, buf, sizeof(buf))); + zlog_debug( + "%s(%u):%s: freeing way for connected prefix", + VRF_LOGNAME(vrf), dest->selected_fib->vrf_id, + prefix2str(&rn->p, buf, sizeof(buf))); route_entry_dump(&rn->p, NULL, dest->selected_fib); } rib_uninstall(rn, dest->selected_fib); @@ -2574,9 +2658,16 @@ void rib_lookup_and_pushup(struct prefix_ipv4 *p, vrf_id_t vrf_id) } } -int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, - struct prefix_ipv6 *src_p, struct route_entry *re, - struct nexthop_group *ng) +/* + * Internal route-add implementation; there are a couple of different public + * signatures. Callers in this path are responsible for the memory they + * allocate: if they allocate a nexthop_group or backup nexthop info, they + * must free those objects. If this returns < 0, an error has occurred and the + * route_entry 're' has not been captured; the caller should free that also. + */ +int rib_add_multipath_nhe(afi_t afi, safi_t safi, struct prefix *p, + struct prefix_ipv6 *src_p, struct route_entry *re, + struct nhg_hash_entry *re_nhe) { struct nhg_hash_entry *nhe = NULL; struct route_table *table; @@ -2584,41 +2675,31 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, struct route_entry *same = NULL; int ret = 0; - if (!re) - return 0; + if (!re || !re_nhe) + return -1; assert(!src_p || !src_p->prefixlen || afi == AFI_IP6); /* Lookup table. */ table = zebra_vrf_get_table_with_table_id(afi, safi, re->vrf_id, re->table); - if (!table) { - if (ng) - nexthop_group_delete(&ng); - XFREE(MTYPE_RE, re); - return 0; - } + if (!table) + return -1; - if (re->nhe_id) { - nhe = zebra_nhg_lookup_id(re->nhe_id); + if (re_nhe->id > 0) { + nhe = zebra_nhg_lookup_id(re_nhe->id); if (!nhe) { flog_err( EC_ZEBRA_TABLE_LOOKUP_FAILED, "Zebra failed to find the nexthop hash entry for id=%u in a route entry", - re->nhe_id); - XFREE(MTYPE_RE, re); + re_nhe->id); + return -1; } } else { - nhe = zebra_nhg_rib_find(0, ng, afi); - - /* - * The nexthops got copied over into an nhe, - * so free them now. - */ - nexthop_group_delete(&ng); - + /* Lookup nhe from route information */ + nhe = zebra_nhg_rib_find_nhe(re_nhe, afi); if (!nhe) { char buf[PREFIX_STRLEN] = ""; char buf2[PREFIX_STRLEN] = ""; @@ -2631,7 +2712,6 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, src_p ? prefix2str(src_p, buf2, sizeof(buf2)) : ""); - XFREE(MTYPE_RE, re); return -1; } } @@ -2709,15 +2789,51 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, ret = 1; /* Free implicit route.*/ - if (same) { + if (same) rib_delnode(rn, same); - ret = -1; - } route_unlock_node(rn); return ret; } +/* + * Add a single route. + */ +int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, + struct prefix_ipv6 *src_p, struct route_entry *re, + struct nexthop_group *ng) +{ + int ret; + struct nhg_hash_entry nhe; + + if (!re) + return -1; + + /* We either need nexthop(s) or an existing nexthop id */ + if (ng == NULL && re->nhe_id == 0) + return -1; + + /* + * Use a temporary nhe to convey info to the common/main api. + */ + zebra_nhe_init(&nhe, afi, (ng ? ng->nexthop : NULL)); + if (ng) + nhe.nhg.nexthop = ng->nexthop; + else if (re->nhe_id > 0) + nhe.id = re->nhe_id; + + ret = rib_add_multipath_nhe(afi, safi, p, src_p, re, &nhe); + + /* In this path, the callers expect memory to be freed. */ + nexthop_group_delete(&ng); + + /* In error cases, free the route also */ + if (ret < 0) + XFREE(MTYPE_RE, re); + + return ret; +} + void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type, unsigned short instance, int flags, struct prefix *p, struct prefix_ipv6 *src_p, const struct nexthop *nh, @@ -3030,8 +3146,7 @@ void rib_update_table(struct route_table *table, rib_update_event_t event) table->info ? afi2str( ((rib_table_info_t *)table->info)->afi) : "Unknown", - vrf ? vrf->name : "Unknown", - zvrf ? zvrf->table_id : 0, + VRF_LOGNAME(vrf), zvrf ? zvrf->table_id : 0, rib_update_event2str(event)); } @@ -3188,6 +3303,9 @@ void rib_sweep_table(struct route_table *table) if (!table) return; + if (IS_ZEBRA_DEBUG_RIB) + zlog_debug("%s: starting", __func__); + for (rn = route_top(table); rn; rn = srcdest_route_next(rn)) { RNODE_FOREACH_RE_SAFE (rn, re, next) { @@ -3234,6 +3352,9 @@ void rib_sweep_table(struct route_table *table) rib_delnode(rn, re); } } + + if (IS_ZEBRA_DEBUG_RIB) + zlog_debug("%s: ends", __func__); } /* Sweep all RIB tables. */ diff --git a/zebra/zebra_rnh.c b/zebra/zebra_rnh.c index 6cfc68eb74..f9c74c7462 100644 --- a/zebra/zebra_rnh.c +++ b/zebra/zebra_rnh.c @@ -471,7 +471,7 @@ zebra_rnh_resolve_import_entry(struct zebra_vrf *zvrf, afi_t afi, *prn = rn; if (!re && IS_ZEBRA_DEBUG_NHT_DETAILED) - zlog_debug("\tRejected due to removed or is a bgp route"); + zlog_debug(" Rejected due to removed or is a bgp route"); return re; } @@ -656,7 +656,7 @@ zebra_rnh_resolve_nexthop_entry(struct zebra_vrf *zvrf, afi_t afi, && !rnh_resolve_via_default(zvrf, rn->p.family)) { if (IS_ZEBRA_DEBUG_NHT_DETAILED) zlog_debug( - "\tNot allowed to resolve through default prefix"); + " Not allowed to resolve through default prefix"); return NULL; } @@ -665,7 +665,7 @@ zebra_rnh_resolve_nexthop_entry(struct zebra_vrf *zvrf, afi_t afi, if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED)) { if (IS_ZEBRA_DEBUG_NHT_DETAILED) zlog_debug( - "\tRoute Entry %s removed", + " Route Entry %s removed", zebra_route_string(re->type)); continue; } @@ -673,7 +673,7 @@ zebra_rnh_resolve_nexthop_entry(struct zebra_vrf *zvrf, afi_t afi, !CHECK_FLAG(re->flags, ZEBRA_FLAG_FIB_OVERRIDE)) { if (IS_ZEBRA_DEBUG_NHT_DETAILED) zlog_debug( - "\tRoute Entry %s !selected", + " Route Entry %s !selected", zebra_route_string(re->type)); continue; } @@ -681,7 +681,7 @@ zebra_rnh_resolve_nexthop_entry(struct zebra_vrf *zvrf, afi_t afi, if (CHECK_FLAG(re->status, ROUTE_ENTRY_QUEUED)) { if (IS_ZEBRA_DEBUG_NHT_DETAILED) zlog_debug( - "\tRoute Entry %s queued", + " Route Entry %s queued", zebra_route_string(re->type)); continue; } @@ -697,7 +697,7 @@ zebra_rnh_resolve_nexthop_entry(struct zebra_vrf *zvrf, afi_t afi, if (nexthop == NULL) { if (IS_ZEBRA_DEBUG_NHT_DETAILED) zlog_debug( - "\tRoute Entry %s no nexthops", + " Route Entry %s no nexthops", zebra_route_string(re->type)); continue; } @@ -732,7 +732,7 @@ zebra_rnh_resolve_nexthop_entry(struct zebra_vrf *zvrf, afi_t afi, else { if (IS_ZEBRA_DEBUG_NHT_DETAILED) zlog_debug( - "\tNexthop must be connected, cannot recurse up"); + " Nexthop must be connected, cannot recurse up"); return NULL; } } @@ -1030,7 +1030,7 @@ static int send_client(struct rnh *rnh, struct zserv *client, rnh_type_t type, default: flog_err(EC_ZEBRA_RNH_UNKNOWN_FAMILY, "%s: Unknown family (%d) notification attempted\n", - __FUNCTION__, rn->p.family); + __func__, rn->p.family); break; } if (re) { diff --git a/zebra/zebra_routemap.c b/zebra/zebra_routemap.c index 2963d83828..2b3b3afbb5 100644 --- a/zebra/zebra_routemap.c +++ b/zebra/zebra_routemap.c @@ -30,6 +30,8 @@ #include "filter.h" #include "plist.h" #include "nexthop.h" +#include "northbound_cli.h" +#include "route_types.h" #include "vrf.h" #include "frrstr.h" @@ -58,82 +60,6 @@ struct nh_rmap_obj { static void zebra_route_map_set_delay_timer(uint32_t value); - -/* Add zebra route map rule */ -static int zebra_route_match_add(struct vty *vty, const char *command, - const char *arg, route_map_event_t type) -{ - VTY_DECLVAR_CONTEXT(route_map_index, index); - enum rmap_compile_rets ret; - int retval = CMD_SUCCESS; - - ret = route_map_add_match(index, command, arg, type); - switch (ret) { - case RMAP_RULE_MISSING: - vty_out(vty, "%% Zebra Can't find rule.\n"); - retval = CMD_WARNING_CONFIG_FAILED; - break; - case RMAP_COMPILE_ERROR: - vty_out(vty, "%% Zebra Argument is malformed.\n"); - retval = CMD_WARNING_CONFIG_FAILED; - break; - case RMAP_COMPILE_SUCCESS: - /* - * Nothing to do here - */ - break; - } - - return retval; -} - -/* Delete zebra route map rule. */ -static int zebra_route_match_delete(struct vty *vty, const char *command, - const char *arg, route_map_event_t type) -{ - VTY_DECLVAR_CONTEXT(route_map_index, index); - enum rmap_compile_rets ret; - int retval = CMD_SUCCESS; - char *dep_name = NULL; - const char *tmpstr; - char *rmap_name = NULL; - - if (type != RMAP_EVENT_MATCH_DELETED) { - /* ignore the mundane, the types without any dependency */ - if (arg == NULL) { - if ((tmpstr = route_map_get_match_arg(index, command)) - != NULL) - dep_name = - XSTRDUP(MTYPE_ROUTE_MAP_RULE, tmpstr); - } else { - dep_name = XSTRDUP(MTYPE_ROUTE_MAP_RULE, arg); - } - rmap_name = XSTRDUP(MTYPE_ROUTE_MAP_NAME, index->map->name); - } - - ret = route_map_delete_match(index, command, arg, type); - switch (ret) { - case RMAP_RULE_MISSING: - vty_out(vty, "%% Zebra Can't find rule.\n"); - retval = CMD_WARNING_CONFIG_FAILED; - break; - case RMAP_COMPILE_ERROR: - vty_out(vty, "%% Zebra Argument is malformed.\n"); - retval = CMD_WARNING_CONFIG_FAILED; - break; - case RMAP_COMPILE_SUCCESS: - /* - * Nothing to do here - */ - break; - } - - XFREE(MTYPE_ROUTE_MAP_RULE, dep_name); - XFREE(MTYPE_ROUTE_MAP_NAME, rmap_name); - - return retval; -} - /* 'match tag TAG' * Match function return 1 if match is success else return 0 */ @@ -425,246 +351,227 @@ static int ip_nht_rm_del(struct zebra_vrf *zvrf, const char *rmap, int rtype, return CMD_SUCCESS; } -DEFUN (match_ip_address_prefix_len, - match_ip_address_prefix_len_cmd, - "match ip address prefix-len (0-32)", - MATCH_STR - IP_STR - "Match prefix length of ip address\n" - "Match prefix length of ip address\n" - "Prefix length\n") +DEFPY( + match_ip_address_prefix_len, match_ip_address_prefix_len_cmd, + "match ip address prefix-len (0-32)$length", + MATCH_STR + IP_STR + "Match prefix length of IP address\n" + "Match prefix length of IP address\n" + "Prefix length\n") { - return zebra_route_match_add(vty, "ip address prefix-len", argv[4]->arg, - RMAP_EVENT_MATCH_ADDED); + const char *xpath = "./match-condition[condition='ipv4-prefix-length']"; + char xpath_value[XPATH_MAXLEN]; + + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + snprintf(xpath_value, sizeof(xpath_value), + "%s/frr-zebra:ipv4-prefix-length", xpath); + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, length_str); + + return nb_cli_apply_changes(vty, NULL); } -DEFUN (no_match_ip_address_prefix_len, - no_match_ip_address_prefix_len_cmd, - "no match ip address prefix-len [(0-32)]", - NO_STR - MATCH_STR - IP_STR - "Match prefix length of ip address\n" - "Match prefix length of ip address\n" - "Prefix length\n") +DEFPY( + no_match_ip_address_prefix_len, no_match_ip_address_prefix_len_cmd, + "no match ip address prefix-len [(0-32)]", + NO_STR + MATCH_STR + IP_STR + "Match prefix length of IP address\n" + "Match prefix length of IP address\n" + "Prefix length\n") { - char *plen = (argc == 6) ? argv[5]->arg : NULL; - return zebra_route_match_delete(vty, "ip address prefix-len", plen, - RMAP_EVENT_MATCH_DELETED); + const char *xpath = "./match-condition[condition='ipv4-prefix-length']"; + + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); } -DEFUN (match_ipv6_address_prefix_len, - match_ipv6_address_prefix_len_cmd, - "match ipv6 address prefix-len (0-128)", - MATCH_STR - IPV6_STR - "Match prefix length of ipv6 address\n" - "Match prefix length of ipv6 address\n" - "Prefix length\n") +DEFPY( + match_ipv6_address_prefix_len, match_ipv6_address_prefix_len_cmd, + "match ipv6 address prefix-len (0-128)$length", + MATCH_STR + IPV6_STR + "Match prefix length of IPv6 address\n" + "Match prefix length of IPv6 address\n" + "Prefix length\n") { - return zebra_route_match_add(vty, "ipv6 address prefix-len", - argv[4]->arg, RMAP_EVENT_MATCH_ADDED); + const char *xpath = "./match-condition[condition='ipv6-prefix-length']"; + char xpath_value[XPATH_MAXLEN]; + + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + snprintf(xpath_value, sizeof(xpath_value), + "%s/frr-zebra:ipv6-prefix-length", xpath); + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, length_str); + + return nb_cli_apply_changes(vty, NULL); } -DEFUN (no_match_ipv6_address_prefix_len, - no_match_ipv6_address_prefix_len_cmd, - "no match ipv6 address prefix-len [(0-128)]", - NO_STR - MATCH_STR - IPV6_STR - "Match prefix length of ip address\n" - "Match prefix length of ip address\n" - "Prefix length\n") +DEFPY( + no_match_ipv6_address_prefix_len, no_match_ipv6_address_prefix_len_cmd, + "no match ipv6 address prefix-len [(0-128)]", + NO_STR + MATCH_STR + IPV6_STR + "Match prefix length of IPv6 address\n" + "Match prefix length of IPv6 address\n" + "Prefix length\n") { - char *plen = (argc == 6) ? argv[5]->arg : NULL; - return zebra_route_match_delete(vty, "ipv6 address prefix-len", plen, - RMAP_EVENT_MATCH_DELETED); + const char *xpath = "./match-condition[condition='ipv6-prefix-length']"; + + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); } -DEFUN (match_ip_nexthop_prefix_len, - match_ip_nexthop_prefix_len_cmd, - "match ip next-hop prefix-len (0-32)", - MATCH_STR - IP_STR - "Match prefixlen of nexthop ip address\n" - "Match prefixlen of given nexthop\n" - "Prefix length\n") +DEFPY( + match_ip_nexthop_prefix_len, match_ip_nexthop_prefix_len_cmd, + "match ip next-hop prefix-len (0-32)$length", + MATCH_STR + IP_STR + "Match prefixlen of nexthop IP address\n" + "Match prefixlen of given nexthop\n" + "Prefix length\n") { - return zebra_route_match_add(vty, "ip next-hop prefix-len", - argv[4]->arg, RMAP_EVENT_MATCH_ADDED); + const char *xpath = + "./match-condition[condition='ipv4-next-hop-prefix-length']"; + char xpath_value[XPATH_MAXLEN]; + + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + snprintf(xpath_value, sizeof(xpath_value), + "%s/frr-zebra:ipv4-prefix-length", xpath); + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, length_str); + + return nb_cli_apply_changes(vty, NULL); } -DEFUN (no_match_ip_nexthop_prefix_len, - no_match_ip_nexthop_prefix_len_cmd, - "no match ip next-hop prefix-len [(0-32)]", - NO_STR - MATCH_STR - IP_STR - "Match prefixlen of nexthop ip address\n" - "Match prefix length of nexthop\n" - "Prefix length\n") -{ - char *plen = (argc == 6) ? argv[5]->arg : NULL; - return zebra_route_match_delete(vty, "ip next-hop prefix-len", plen, - RMAP_EVENT_MATCH_DELETED); -} - -DEFUN (match_source_protocol, - match_source_protocol_cmd, - "match source-protocol <bgp|ospf|rip|ripng|isis|ospf6|pim|nhrp|eigrp|babel|connected|system|kernel|static|sharp>", - MATCH_STR - "Match protocol via which the route was learnt\n" - "BGP protocol\n" - "OSPF protocol\n" - "RIP protocol\n" - "RIPNG protocol\n" - "ISIS protocol\n" - "OSPF6 protocol\n" - "PIM protocol\n" - "NHRP protocol\n" - "EIGRP protocol\n" - "BABEL protocol\n" - "Routes from directly connected peer\n" - "Routes from system configuration\n" - "Routes from kernel\n" - "Statically configured routes\n" - "SHARP process\n") -{ - char *proto = argv[2]->text; - int i; +DEFPY( + no_match_ip_nexthop_prefix_len, no_match_ip_nexthop_prefix_len_cmd, + "no match ip next-hop prefix-len [(0-32)]", + NO_STR + MATCH_STR + IP_STR + "Match prefixlen of nexthop IP address\n" + "Match prefix length of nexthop\n" + "Prefix length\n") +{ + const char *xpath = + "./match-condition[condition='ipv4-next-hop-prefix-length']"; - i = proto_name2num(proto); - if (i < 0) { - vty_out(vty, "invalid protocol name \"%s\"\n", proto); - return CMD_WARNING_CONFIG_FAILED; - } - return zebra_route_match_add(vty, "source-protocol", proto, - RMAP_EVENT_MATCH_ADDED); + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); } -DEFUN (no_match_source_protocol, - no_match_source_protocol_cmd, - "no match source-protocol [<bgp|ospf|rip|ripng|isis|ospf6|pim|nhrp|eigrp|babel|connected|system|kernel|static|sharp>]", - NO_STR - MATCH_STR - "No match protocol via which the route was learnt\n" - "BGP protocol\n" - "OSPF protocol\n" - "RIP protocol\n" - "RIPNG protocol\n" - "ISIS protocol\n" - "OSPF6 protocol\n" - "PIM protocol\n" - "NHRP protocol\n" - "EIGRP protocol\n" - "BABEL protocol\n" - "Routes from directly connected peer\n" - "Routes from system configuration\n" - "Routes from kernel\n" - "Statically configured routes\n" - "SHARP process\n") -{ - char *proto = (argc == 4) ? argv[3]->text : NULL; - return zebra_route_match_delete(vty, "source-protocol", proto, - RMAP_EVENT_MATCH_DELETED); -} - -DEFUN (match_source_instance, - match_source_instance_cmd, - "match source-instance (0-255)", - MATCH_STR - "Match the protocol's instance number\n" - "The instance number\n") -{ - char *instance = argv[2]->arg; - - return zebra_route_match_add(vty, "source-instance", instance, - RMAP_EVENT_MATCH_ADDED); -} - -DEFUN (no_match_source_instance, - no_match_source_instance_cmd, - "no match source-instance [(0-255)]", - NO_STR MATCH_STR - "Match the protocol's instance number\n" - "The instance number\n") -{ - char *instance = (argc == 4) ? argv[3]->arg : NULL; - - return zebra_route_match_delete(vty, "source-instance", instance, - RMAP_EVENT_MATCH_ADDED); +DEFPY( + match_source_protocol, match_source_protocol_cmd, + "match source-protocol " FRR_REDIST_STR_ZEBRA "$proto", + MATCH_STR + "Match protocol via which the route was learnt\n" + FRR_REDIST_HELP_STR_ZEBRA) +{ + const char *xpath = "./match-condition[condition='source-protocol']"; + char xpath_value[XPATH_MAXLEN]; + + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + snprintf(xpath_value, sizeof(xpath_value), + "%s/frr-zebra:source-protocol", xpath); + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, proto); + + return nb_cli_apply_changes(vty, NULL); } -/* set functions */ +DEFPY( + no_match_source_protocol, no_match_source_protocol_cmd, + "no match source-protocol [" FRR_REDIST_STR_ZEBRA "]", + NO_STR + MATCH_STR + "Match protocol via which the route was learnt\n" + FRR_REDIST_HELP_STR_ZEBRA) +{ + const char *xpath = "./match-condition[condition='source-protocol']"; -DEFUN (set_src, - set_src_cmd, - "set src <A.B.C.D|X:X::X:X>", - SET_STR - "src address for route\n" - "IPv4 src address\n" - "IPv6 src address\n") -{ - int idx_ip = 2; - union g_addr src; - struct interface *pif = NULL; - int family; - struct prefix p; - struct vrf *vrf; - - if (inet_pton(AF_INET, argv[idx_ip]->arg, &src.ipv4) != 1) { - if (inet_pton(AF_INET6, argv[idx_ip]->arg, &src.ipv6) != 1) { - vty_out(vty, "%% not a valid IPv4/v6 address\n"); - return CMD_WARNING_CONFIG_FAILED; - } + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); - p.family = family = AF_INET6; - p.u.prefix6 = src.ipv6; - p.prefixlen = IPV6_MAX_BITLEN; - } else { - p.family = family = AF_INET; - p.u.prefix4 = src.ipv4; - p.prefixlen = IPV4_MAX_BITLEN; - } + return nb_cli_apply_changes(vty, NULL); +} - if (!zebra_check_addr(&p)) { - vty_out(vty, "%% not a valid source IPv4/v6 address\n"); - return CMD_WARNING_CONFIG_FAILED; - } +DEFPY( + match_source_instance, match_source_instance_cmd, + "match source-instance (0-255)$instance", + MATCH_STR + "Match the protocol's instance number\n" + "The instance number\n") +{ + const char *xpath = "./match-condition[condition='source-instance']"; + char xpath_value[XPATH_MAXLEN]; - RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) { - if (family == AF_INET) - pif = if_lookup_exact_address((void *)&src.ipv4, - AF_INET, vrf->vrf_id); - else if (family == AF_INET6) - pif = if_lookup_exact_address((void *)&src.ipv6, - AF_INET6, vrf->vrf_id); + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + snprintf(xpath_value, sizeof(xpath_value), + "%s/frr-zebra:source-instance", xpath); + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, instance_str); - if (pif != NULL) - break; - } + return nb_cli_apply_changes(vty, NULL); +} - if (!pif) { - vty_out(vty, "%% not a local address\n"); - return CMD_WARNING_CONFIG_FAILED; +DEFPY( + no_match_source_instance, no_match_source_instance_cmd, + "no match source-instance [(0-255)]", + NO_STR MATCH_STR + "Match the protocol's instance number\n" + "The instance number\n") +{ + const char *xpath = "./match-condition[condition='source-instance']"; + + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); +} + +/* set functions */ + +DEFPY( + set_src, set_src_cmd, + "set src <A.B.C.D$addrv4|X:X::X:X$addrv6>", + SET_STR + "src address for route\n" + "IPv4 src address\n" + "IPv6 src address\n") +{ + const char *xpath = "./set-action[action='source']"; + char xpath_value[XPATH_MAXLEN]; + + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + if (addrv4_str) { + snprintf(xpath_value, sizeof(xpath_value), + "%s/frr-zebra:source-v4", xpath); + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, + addrv4_str); + } else { + snprintf(xpath_value, sizeof(xpath_value), + "%s/frr-zebra:source-v6", xpath); + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, + addrv6_str); } - VTY_DECLVAR_CONTEXT(route_map_index, index); - return generic_set_add(vty, index, "src", argv[idx_ip]->arg); + return nb_cli_apply_changes(vty, NULL); } -DEFUN (no_set_src, - no_set_src_cmd, - "no set src [<A.B.C.D|X:X::X:X>]", - NO_STR - SET_STR - "Source address for route\n" - "IPv4 address\n" - "IPv6 address\n") +DEFPY( + no_set_src, no_set_src_cmd, + "no set src [<A.B.C.D|X:X::X:X>]", + NO_STR + SET_STR + "Source address for route\n" + "IPv4 address\n" + "IPv6 address\n") { - char *ip = (argc == 4) ? argv[3]->arg : NULL; - VTY_DECLVAR_CONTEXT(route_map_index, index); - return generic_set_delete(vty, index, "src", ip); + const char *xpath = "./set-action[action='source']"; + + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); } DEFUN (zebra_route_map_timer, @@ -1793,6 +1700,15 @@ static void zebra_route_map_set_delay_timer(uint32_t value) } } +void zebra_routemap_finish(void) +{ + /* Set zebra_rmap_update_timer to 0 so that it wont schedule again */ + zebra_rmap_update_timer = 0; + /* Thread off if any scheduled already */ + THREAD_TIMER_OFF(zebra_t_rmap_update); + route_map_finish(); +} + void zebra_route_map_write_delay_timer(struct vty *vty) { if (vty && (zebra_rmap_update_timer != ZEBRA_RMAP_DEFAULT_UPDATE_TIMER)) diff --git a/zebra/zebra_routemap.h b/zebra/zebra_routemap.h index 6a630e1ac0..56e805ea03 100644 --- a/zebra/zebra_routemap.h +++ b/zebra/zebra_routemap.h @@ -56,4 +56,5 @@ zebra_nht_route_map_check(afi_t afi, int client_proto, const struct prefix *p, } #endif +extern void zebra_routemap_finish(void); #endif diff --git a/zebra/zebra_router.c b/zebra/zebra_router.c index a891ffb76a..ea2b6752b3 100644 --- a/zebra/zebra_router.c +++ b/zebra/zebra_router.c @@ -223,10 +223,11 @@ void zebra_router_terminate(void) zebra_vxlan_disable(); zebra_mlag_terminate(); - hash_clean(zrouter.nhgs, zebra_nhg_hash_free); - hash_free(zrouter.nhgs); - hash_clean(zrouter.nhgs_id, NULL); + /* Free NHE in ID table only since it has unhashable entries as well */ + hash_clean(zrouter.nhgs_id, zebra_nhg_hash_free); hash_free(zrouter.nhgs_id); + hash_clean(zrouter.nhgs, NULL); + hash_free(zrouter.nhgs); hash_clean(zrouter.rules_hash, zebra_pbr_rules_free); hash_free(zrouter.rules_hash); diff --git a/zebra/zebra_router.h b/zebra/zebra_router.h index 59bd0e55f0..773e5a6415 100644 --- a/zebra/zebra_router.h +++ b/zebra/zebra_router.h @@ -218,6 +218,9 @@ extern void multicast_mode_ipv4_set(enum multicast_mode mode); extern enum multicast_mode multicast_mode_ipv4_get(void); +/* zebra_northbound.c */ +extern const struct frr_yang_module_info frr_zebra_info; + #ifdef __cplusplus } #endif diff --git a/zebra/zebra_vrf.c b/zebra/zebra_vrf.c index c392303760..ee1e251a69 100644 --- a/zebra/zebra_vrf.c +++ b/zebra/zebra_vrf.c @@ -60,8 +60,13 @@ static void zebra_vrf_add_update(struct zebra_vrf *zvrf) if (IS_ZEBRA_DEBUG_EVENT) zlog_debug("MESSAGE: ZEBRA_VRF_ADD %s", zvrf_name(zvrf)); - for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) + for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { + /* Do not send unsolicited messages to synchronous clients. */ + if (client->synchronous) + continue; + zsend_vrf_add(client, zvrf); + } } static void zebra_vrf_delete_update(struct zebra_vrf *zvrf) @@ -72,8 +77,13 @@ static void zebra_vrf_delete_update(struct zebra_vrf *zvrf) if (IS_ZEBRA_DEBUG_EVENT) zlog_debug("MESSAGE: ZEBRA_VRF_DELETE %s", zvrf_name(zvrf)); - for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) + for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { + /* Do not send unsolicited messages to synchronous clients. */ + if (client->synchronous) + continue; + zsend_vrf_delete(client, zvrf); + } } void zebra_vrf_update_all(struct zserv *client) @@ -168,7 +178,7 @@ static int zebra_vrf_disable(struct vrf *vrf) zebra_vxlan_vrf_disable(zvrf); #if defined(HAVE_RTADV) - rtadv_terminate(zvrf); + rtadv_vrf_terminate(zvrf); #endif /* Inform clients that the VRF is now inactive. This is a diff --git a/zebra/zebra_vrf.h b/zebra/zebra_vrf.h index 5448e17073..268ee12a65 100644 --- a/zebra/zebra_vrf.h +++ b/zebra/zebra_vrf.h @@ -193,7 +193,7 @@ struct zebra_vrf { static inline vrf_id_t zvrf_id(struct zebra_vrf *zvrf) { if (!zvrf || !zvrf->vrf) - return VRF_UNKNOWN; + return VRF_DEFAULT; return zvrf->vrf->vrf_id; } @@ -206,6 +206,8 @@ static inline const char *zvrf_ns_name(struct zebra_vrf *zvrf) static inline const char *zvrf_name(struct zebra_vrf *zvrf) { + if (!zvrf || !zvrf->vrf) + return "Unknown"; return zvrf->vrf->name; } diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c index 79ce43be93..ef3dc9808f 100644 --- a/zebra/zebra_vty.c +++ b/zebra/zebra_vty.c @@ -164,7 +164,8 @@ DEFUN (show_ip_rpf_addr, return CMD_SUCCESS; } -static char re_status_output_char(struct route_entry *re, struct nexthop *nhop) +static char re_status_output_char(const struct route_entry *re, + const struct nexthop *nhop) { if (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED)) { if (!CHECK_FLAG(nhop->flags, NEXTHOP_FLAG_DUPLICATE) && @@ -187,6 +188,152 @@ static char re_status_output_char(struct route_entry *re, struct nexthop *nhop) return ' '; } +/* + * TODO -- Show backup nexthop info + */ +static void show_nh_backup_helper(struct vty *vty, + const struct nhg_hash_entry *nhe, + const struct nexthop *nexthop) +{ + /* Double-check that there _is_ a backup */ + if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) + return; + + /* Locate the backup nexthop */ + + /* Format the backup (indented) */ + +} + +/* + * Helper api to format output for a nexthop, used in the 'detailed' + * output path. + */ +static void show_nexthop_detail_helper(struct vty *vty, + const struct route_entry *re, + const struct nexthop *nexthop) +{ + char addrstr[32]; + char buf[MPLS_LABEL_STRLEN]; + + vty_out(vty, " %c%s", + re_status_output_char(re, nexthop), + nexthop->rparent ? " " : ""); + + switch (nexthop->type) { + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV4_IFINDEX: + vty_out(vty, " %s", + inet_ntoa(nexthop->gate.ipv4)); + if (nexthop->ifindex) + vty_out(vty, ", via %s", + ifindex2ifname( + nexthop->ifindex, + nexthop->vrf_id)); + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + vty_out(vty, " %s", + inet_ntop(AF_INET6, &nexthop->gate.ipv6, + buf, sizeof(buf))); + if (nexthop->ifindex) + vty_out(vty, ", via %s", + ifindex2ifname( + nexthop->ifindex, + nexthop->vrf_id)); + break; + + case NEXTHOP_TYPE_IFINDEX: + vty_out(vty, " directly connected, %s", + ifindex2ifname(nexthop->ifindex, + nexthop->vrf_id)); + break; + case NEXTHOP_TYPE_BLACKHOLE: + vty_out(vty, " unreachable"); + switch (nexthop->bh_type) { + case BLACKHOLE_REJECT: + vty_out(vty, " (ICMP unreachable)"); + break; + case BLACKHOLE_ADMINPROHIB: + vty_out(vty, + " (ICMP admin-prohibited)"); + break; + case BLACKHOLE_NULL: + vty_out(vty, " (blackhole)"); + break; + case BLACKHOLE_UNSPEC: + break; + } + break; + default: + break; + } + + if ((re->vrf_id != nexthop->vrf_id) + && (nexthop->type != NEXTHOP_TYPE_BLACKHOLE)) { + struct vrf *vrf = + vrf_lookup_by_id(nexthop->vrf_id); + + if (vrf) + vty_out(vty, "(vrf %s)", vrf->name); + else + vty_out(vty, "(vrf UNKNOWN)"); + } + + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE)) + vty_out(vty, " (duplicate nexthop removed)"); + + if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) + vty_out(vty, " inactive"); + + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) + vty_out(vty, " onlink"); + + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) + vty_out(vty, " (recursive)"); + + /* Source specified? */ + switch (nexthop->type) { + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV4_IFINDEX: + if (nexthop->src.ipv4.s_addr) { + if (inet_ntop(AF_INET, &nexthop->src.ipv4, + addrstr, sizeof(addrstr))) + vty_out(vty, ", src %s", + addrstr); + } + break; + + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, + &in6addr_any)) { + if (inet_ntop(AF_INET6, &nexthop->src.ipv6, + addrstr, sizeof(addrstr))) + vty_out(vty, ", src %s", + addrstr); + } + break; + + default: + break; + } + + if (re->nexthop_mtu) + vty_out(vty, ", mtu %u", re->nexthop_mtu); + + /* Label information */ + if (nexthop->nh_label && nexthop->nh_label->num_labels) { + vty_out(vty, ", label %s", + mpls_label2str(nexthop->nh_label->num_labels, + nexthop->nh_label->label, buf, + sizeof(buf), 1 /*pretty*/)); + } + + if (nexthop->weight) + vty_out(vty, ", weight %u", nexthop->weight); +} + /* New RIB. Detailed information for IPv4 route. */ static void vty_show_ip_route_detail(struct vty *vty, struct route_node *rn, int mcast, bool use_fib, bool show_ng) @@ -241,153 +388,134 @@ static void vty_show_ip_route_detail(struct vty *vty, struct route_node *rn, vty_out(vty, "\n"); time_t uptime; - struct tm tm; uptime = monotime(NULL); uptime -= re->uptime; - gmtime_r(&uptime, &tm); - vty_out(vty, " Last update "); + frrtime_to_interval(uptime, buf, sizeof(buf)); - if (uptime < ONE_DAY_SECOND) - vty_out(vty, "%02d:%02d:%02d", tm.tm_hour, tm.tm_min, - tm.tm_sec); - else if (uptime < ONE_WEEK_SECOND) - vty_out(vty, "%dd%02dh%02dm", tm.tm_yday, tm.tm_hour, - tm.tm_min); - else - vty_out(vty, "%02dw%dd%02dh", tm.tm_yday / 7, - tm.tm_yday - ((tm.tm_yday / 7) * 7), - tm.tm_hour); - vty_out(vty, " ago\n"); + vty_out(vty, " Last update %s ago\n", buf); if (show_ng) vty_out(vty, " Nexthop Group ID: %u\n", re->nhe_id); for (ALL_NEXTHOPS(re->nhe->nhg, nexthop)) { - char addrstr[32]; - - vty_out(vty, " %c%s", - re_status_output_char(re, nexthop), - nexthop->rparent ? " " : ""); - - switch (nexthop->type) { - case NEXTHOP_TYPE_IPV4: - case NEXTHOP_TYPE_IPV4_IFINDEX: - vty_out(vty, " %s", - inet_ntoa(nexthop->gate.ipv4)); - if (nexthop->ifindex) - vty_out(vty, ", via %s", - ifindex2ifname( - nexthop->ifindex, - nexthop->vrf_id)); - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - vty_out(vty, " %s", - inet_ntop(AF_INET6, &nexthop->gate.ipv6, - buf, sizeof(buf))); - if (nexthop->ifindex) - vty_out(vty, ", via %s", - ifindex2ifname( - nexthop->ifindex, - nexthop->vrf_id)); - break; - case NEXTHOP_TYPE_IFINDEX: - vty_out(vty, " directly connected, %s", - ifindex2ifname(nexthop->ifindex, - nexthop->vrf_id)); - break; - case NEXTHOP_TYPE_BLACKHOLE: - vty_out(vty, " unreachable"); - switch (nexthop->bh_type) { - case BLACKHOLE_REJECT: - vty_out(vty, " (ICMP unreachable)"); - break; - case BLACKHOLE_ADMINPROHIB: - vty_out(vty, - " (ICMP admin-prohibited)"); - break; - case BLACKHOLE_NULL: - vty_out(vty, " (blackhole)"); - break; - case BLACKHOLE_UNSPEC: - break; - } - break; - default: - break; - } - - if ((re->vrf_id != nexthop->vrf_id) - && (nexthop->type != NEXTHOP_TYPE_BLACKHOLE)) { - struct vrf *vrf = - vrf_lookup_by_id(nexthop->vrf_id); - - if (vrf) - vty_out(vty, "(vrf %s)", vrf->name); - else - vty_out(vty, "(vrf UNKNOWN)"); - } + /* Use helper to format each nexthop */ + show_nexthop_detail_helper(vty, re, nexthop); + vty_out(vty, "\n"); - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE)) - vty_out(vty, " (duplicate nexthop removed)"); + /* Include backup info, if present */ + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) + show_nh_backup_helper(vty, re->nhe, nexthop); + } + vty_out(vty, "\n"); + } +} - if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) - vty_out(vty, " inactive"); +/* + * Helper for nexthop output, used in the 'show ip route' path + */ +static void show_route_nexthop_helper(struct vty *vty, + const struct route_entry *re, + const struct nexthop *nexthop) +{ + char buf[MPLS_LABEL_STRLEN]; + + switch (nexthop->type) { + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV4_IFINDEX: + vty_out(vty, " via %s", inet_ntoa(nexthop->gate.ipv4)); + if (nexthop->ifindex) + vty_out(vty, ", %s", + ifindex2ifname(nexthop->ifindex, + nexthop->vrf_id)); + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + vty_out(vty, " via %s", + inet_ntop(AF_INET6, &nexthop->gate.ipv6, buf, + sizeof(buf))); + if (nexthop->ifindex) + vty_out(vty, ", %s", + ifindex2ifname(nexthop->ifindex, + nexthop->vrf_id)); + break; - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) - vty_out(vty, " onlink"); + case NEXTHOP_TYPE_IFINDEX: + vty_out(vty, " is directly connected, %s", + ifindex2ifname(nexthop->ifindex, + nexthop->vrf_id)); + break; + case NEXTHOP_TYPE_BLACKHOLE: + vty_out(vty, " unreachable"); + switch (nexthop->bh_type) { + case BLACKHOLE_REJECT: + vty_out(vty, " (ICMP unreachable)"); + break; + case BLACKHOLE_ADMINPROHIB: + vty_out(vty, " (ICMP admin-prohibited)"); + break; + case BLACKHOLE_NULL: + vty_out(vty, " (blackhole)"); + break; + case BLACKHOLE_UNSPEC: + break; + } + break; + default: + break; + } - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) - vty_out(vty, " (recursive)"); + if ((re == NULL || (nexthop->vrf_id != re->vrf_id)) && + (nexthop->type != NEXTHOP_TYPE_BLACKHOLE)) { + struct vrf *vrf = vrf_lookup_by_id(nexthop->vrf_id); - switch (nexthop->type) { - case NEXTHOP_TYPE_IPV4: - case NEXTHOP_TYPE_IPV4_IFINDEX: - if (nexthop->src.ipv4.s_addr) { - if (inet_ntop(AF_INET, - &nexthop->src.ipv4, - addrstr, sizeof(addrstr))) - vty_out(vty, ", src %s", - addrstr); - } - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, - &in6addr_any)) { - if (inet_ntop(AF_INET6, - &nexthop->src.ipv6, - addrstr, sizeof(addrstr))) - vty_out(vty, ", src %s", - addrstr); - } - break; - default: - break; - } + if (vrf) + vty_out(vty, " (vrf %s)", vrf->name); + else + vty_out(vty, " (vrf UNKNOWN)"); + } - if (re->nexthop_mtu) - vty_out(vty, ", mtu %u", re->nexthop_mtu); + if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) + vty_out(vty, " inactive"); - /* Label information */ - if (nexthop->nh_label - && nexthop->nh_label->num_labels) { - vty_out(vty, ", label %s", - mpls_label2str( - nexthop->nh_label->num_labels, - nexthop->nh_label->label, buf, - sizeof(buf), 1)); - } + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) + vty_out(vty, " onlink"); - if (nexthop->weight) - vty_out(vty, ", weight %u", nexthop->weight); + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) + vty_out(vty, " (recursive)"); - vty_out(vty, "\n"); + switch (nexthop->type) { + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV4_IFINDEX: + if (nexthop->src.ipv4.s_addr) { + if (inet_ntop(AF_INET, &nexthop->src.ipv4, buf, + sizeof(buf))) + vty_out(vty, ", src %s", buf); } - vty_out(vty, "\n"); + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, &in6addr_any)) { + if (inet_ntop(AF_INET6, &nexthop->src.ipv6, buf, + sizeof(buf))) + vty_out(vty, ", src %s", buf); + } + break; + default: + break; } + + /* Label information */ + if (nexthop->nh_label && nexthop->nh_label->num_labels) { + vty_out(vty, ", label %s", + mpls_label2str(nexthop->nh_label->num_labels, + nexthop->nh_label->label, buf, + sizeof(buf), 1)); + } + + if ((re == NULL) && nexthop->weight) + vty_out(vty, ", weight %u", nexthop->weight); } static void vty_show_ip_route(struct vty *vty, struct route_node *rn, @@ -402,14 +530,15 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn, json_object *json_route = NULL; json_object *json_labels = NULL; time_t uptime; - struct tm tm; struct vrf *vrf = NULL; rib_dest_t *dest = rib_dest_from_rnode(rn); struct nexthop_group *nhg; + char up_str[MONOTIME_STRLEN]; uptime = monotime(NULL); uptime -= re->uptime; - gmtime_r(&uptime, &tm); + + frrtime_to_interval(uptime, up_str, sizeof(up_str)); /* If showing fib information, use the fib view of the * nexthops. @@ -474,18 +603,8 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn, json_object_int_add(json_route, "internalNextHopActiveNum", nexthop_group_active_nexthop_num( &(re->nhe->nhg))); - if (uptime < ONE_DAY_SECOND) - sprintf(buf, "%02d:%02d:%02d", tm.tm_hour, tm.tm_min, - tm.tm_sec); - else if (uptime < ONE_WEEK_SECOND) - sprintf(buf, "%dd%02dh%02dm", tm.tm_yday, tm.tm_hour, - tm.tm_min); - else - sprintf(buf, "%02dw%dd%02dh", tm.tm_yday / 7, - tm.tm_yday - ((tm.tm_yday / 7) * 7), - tm.tm_hour); - json_object_string_add(json_route, "uptime", buf); + json_object_string_add(json_route, "uptime", up_str); for (ALL_NEXTHOPS_PTR(nhg, nexthop)) { json_nexthop = json_object_new_object(); @@ -646,6 +765,10 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn, json_labels); } + if (nexthop->weight) + json_object_int_add(json_nexthop, "weight", + nexthop->weight); + json_object_array_add(json_nexthops, json_nexthop); } @@ -681,115 +804,46 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn, len - 3 + (2 * nexthop_level(nexthop)), ' '); } - switch (nexthop->type) { - case NEXTHOP_TYPE_IPV4: - case NEXTHOP_TYPE_IPV4_IFINDEX: - vty_out(vty, " via %s", inet_ntoa(nexthop->gate.ipv4)); - if (nexthop->ifindex) - vty_out(vty, ", %s", - ifindex2ifname(nexthop->ifindex, - nexthop->vrf_id)); - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - vty_out(vty, " via %s", - inet_ntop(AF_INET6, &nexthop->gate.ipv6, buf, - sizeof(buf))); - if (nexthop->ifindex) - vty_out(vty, ", %s", - ifindex2ifname(nexthop->ifindex, - nexthop->vrf_id)); - break; - - case NEXTHOP_TYPE_IFINDEX: - vty_out(vty, " is directly connected, %s", - ifindex2ifname(nexthop->ifindex, - nexthop->vrf_id)); - break; - case NEXTHOP_TYPE_BLACKHOLE: - vty_out(vty, " unreachable"); - switch (nexthop->bh_type) { - case BLACKHOLE_REJECT: - vty_out(vty, " (ICMP unreachable)"); - break; - case BLACKHOLE_ADMINPROHIB: - vty_out(vty, " (ICMP admin-prohibited)"); - break; - case BLACKHOLE_NULL: - vty_out(vty, " (blackhole)"); - break; - case BLACKHOLE_UNSPEC: - break; - } - break; - default: - break; - } - - if ((nexthop->vrf_id != re->vrf_id) - && (nexthop->type != NEXTHOP_TYPE_BLACKHOLE)) { - struct vrf *vrf = vrf_lookup_by_id(nexthop->vrf_id); + show_route_nexthop_helper(vty, re, nexthop); - if (vrf) - vty_out(vty, "(vrf %s)", vrf->name); - else - vty_out(vty, "(vrf UNKNOWN)"); - } + if (nexthop->weight) + vty_out(vty, ", weight %u", nexthop->weight); - if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) - vty_out(vty, " inactive"); + vty_out(vty, ", %s\n", up_str); - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) - vty_out(vty, " onlink"); + /* Check for backup info */ + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) { + struct nexthop *backup; + int i; - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) - vty_out(vty, " (recursive)"); + if (re->nhe->backup_info == NULL || + re->nhe->backup_info->nhe == NULL) + continue; - switch (nexthop->type) { - case NEXTHOP_TYPE_IPV4: - case NEXTHOP_TYPE_IPV4_IFINDEX: - if (nexthop->src.ipv4.s_addr) { - if (inet_ntop(AF_INET, &nexthop->src.ipv4, buf, - sizeof(buf))) - vty_out(vty, ", src %s", buf); - } - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, &in6addr_any)) { - if (inet_ntop(AF_INET6, &nexthop->src.ipv6, buf, - sizeof(buf))) - vty_out(vty, ", src %s", buf); + i = 0; + for (ALL_NEXTHOPS(re->nhe->backup_info->nhe->nhg, + backup)) { + if (i == nexthop->backup_idx) + break; + i++; } - break; - default: - break; - } - /* Label information */ - if (nexthop->nh_label && nexthop->nh_label->num_labels) { - vty_out(vty, ", label %s", - mpls_label2str(nexthop->nh_label->num_labels, - nexthop->nh_label->label, buf, - sizeof(buf), 1)); + /* Print useful backup info */ + if (backup) { + /* TODO -- install state is not accurate */ + vty_out(vty, " %*c [backup %d]", + /*re_status_output_char(re, backup),*/ + len - 3 + (2 * nexthop_level(nexthop)), + ' ', nexthop->backup_idx); + show_route_nexthop_helper(vty, re, backup); + vty_out(vty, "\n"); + } } - - if (uptime < ONE_DAY_SECOND) - vty_out(vty, ", %02d:%02d:%02d", tm.tm_hour, - tm.tm_min, tm.tm_sec); - else if (uptime < ONE_WEEK_SECOND) - vty_out(vty, ", %dd%02dh%02dm", tm.tm_yday, - tm.tm_hour, tm.tm_min); - else - vty_out(vty, ", %02dw%dd%02dh", tm.tm_yday / 7, - tm.tm_yday - ((tm.tm_yday / 7) * 7), - tm.tm_hour); - vty_out(vty, "\n"); } } static void vty_show_ip_route_detail_json(struct vty *vty, - struct route_node *rn, bool use_fib) + struct route_node *rn, bool use_fib) { json_object *json = NULL; json_object *json_prefix = NULL; @@ -1059,9 +1113,8 @@ static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe) { struct nexthop *nexthop = NULL; struct nhg_connected *rb_node_dep = NULL; - char buf[SRCDEST2STR_BUFFER]; - struct vrf *nhe_vrf = vrf_lookup_by_id(nhe->vrf_id); + struct nexthop_group *backup_nhg; vty_out(vty, "ID: %u\n", nhe->id); vty_out(vty, " RefCnt: %d\n", nhe->refcnt); @@ -1093,6 +1146,7 @@ static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe) vty_out(vty, "\n"); } + /* Output nexthops */ for (ALL_NEXTHOPS(nhe->nhg, nexthop)) { if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) vty_out(vty, " "); @@ -1100,100 +1154,56 @@ static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe) /* Make recursive nexthops a bit more clear */ vty_out(vty, " "); - switch (nexthop->type) { - case NEXTHOP_TYPE_IPV4: - case NEXTHOP_TYPE_IPV4_IFINDEX: - vty_out(vty, " %s", inet_ntoa(nexthop->gate.ipv4)); - if (nexthop->ifindex) - vty_out(vty, ", %s", - ifindex2ifname(nexthop->ifindex, - nexthop->vrf_id)); - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - vty_out(vty, " %s", - inet_ntop(AF_INET6, &nexthop->gate.ipv6, buf, - sizeof(buf))); - if (nexthop->ifindex) - vty_out(vty, ", %s", - ifindex2ifname(nexthop->ifindex, - nexthop->vrf_id)); - break; + show_route_nexthop_helper(vty, NULL, nexthop); - case NEXTHOP_TYPE_IFINDEX: - vty_out(vty, " directly connected %s", - ifindex2ifname(nexthop->ifindex, - nexthop->vrf_id)); - break; - case NEXTHOP_TYPE_BLACKHOLE: - vty_out(vty, " unreachable"); - switch (nexthop->bh_type) { - case BLACKHOLE_REJECT: - vty_out(vty, " (ICMP unreachable)"); - break; - case BLACKHOLE_ADMINPROHIB: - vty_out(vty, " (ICMP admin-prohibited)"); - break; - case BLACKHOLE_NULL: - vty_out(vty, " (blackhole)"); - break; - case BLACKHOLE_UNSPEC: - break; - } - break; - default: - break; + if (nhe->backup_info == NULL || nhe->backup_info->nhe == NULL) { + if (CHECK_FLAG(nexthop->flags, + NEXTHOP_FLAG_HAS_BACKUP)) + vty_out(vty, " [backup %d]", + nexthop->backup_idx); + + vty_out(vty, "\n"); + continue; } - struct vrf *vrf = vrf_lookup_by_id(nexthop->vrf_id); + /* TODO -- print more useful backup info */ + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) { + struct nexthop *backup; + int i; - if (vrf) - vty_out(vty, " (vrf %s)", vrf->name); - else - vty_out(vty, " (vrf UNKNOWN)"); + i = 0; + for (ALL_NEXTHOPS(nhe->backup_info->nhe->nhg, backup)) { + if (i == nexthop->backup_idx) + break; + i++; + } - if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) - vty_out(vty, " inactive"); + /* TODO */ + if (backup) + vty_out(vty, " [backup %d]", + nexthop->backup_idx); + else + vty_out(vty, " [backup INVALID]"); + } - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) - vty_out(vty, " onlink"); + vty_out(vty, "\n"); + } - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) - vty_out(vty, " (recursive)"); + /* Output backup nexthops (if any) */ + backup_nhg = zebra_nhg_get_backup_nhg(nhe); + if (backup_nhg) { + vty_out(vty, " Backups:\n"); - switch (nexthop->type) { - case NEXTHOP_TYPE_IPV4: - case NEXTHOP_TYPE_IPV4_IFINDEX: - if (nexthop->src.ipv4.s_addr) { - if (inet_ntop(AF_INET, &nexthop->src.ipv4, buf, - sizeof(buf))) - vty_out(vty, ", src %s", buf); - } - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, &in6addr_any)) { - if (inet_ntop(AF_INET6, &nexthop->src.ipv6, buf, - sizeof(buf))) - vty_out(vty, ", src %s", buf); - } - break; - default: - break; - } + for (ALL_NEXTHOPS_PTR(backup_nhg, nexthop)) { + if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) + vty_out(vty, " "); + else + /* Make recursive nexthops a bit more clear */ + vty_out(vty, " "); - /* Label information */ - if (nexthop->nh_label && nexthop->nh_label->num_labels) { - vty_out(vty, ", label %s", - mpls_label2str(nexthop->nh_label->num_labels, - nexthop->nh_label->label, buf, - sizeof(buf), 1)); + show_route_nexthop_helper(vty, NULL, nexthop); + vty_out(vty, "\n"); } - - if (nexthop->weight) - vty_out(vty, ", weight %u", nexthop->weight); - - vty_out(vty, "\n"); } if (!zebra_nhg_dependents_is_empty(nhe)) { diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c index 61865e5baf..24050d8038 100644 --- a/zebra/zebra_vxlan.c +++ b/zebra/zebra_vxlan.c @@ -121,11 +121,11 @@ static struct interface *zvni_map_to_macvlan(struct interface *br_if, /* l3-vni next-hop neigh related APIs */ static zebra_neigh_t *zl3vni_nh_lookup(zebra_l3vni_t *zl3vni, - struct ipaddr *ip); + const struct ipaddr *ip); static void *zl3vni_nh_alloc(void *p); static zebra_neigh_t *zl3vni_nh_add(zebra_l3vni_t *zl3vni, - struct ipaddr *vtep_ip, - struct ethaddr *rmac); + const struct ipaddr *vtep_ip, + const struct ethaddr *rmac); static int zl3vni_nh_del(zebra_l3vni_t *zl3vni, zebra_neigh_t *n); static int zl3vni_nh_install(zebra_l3vni_t *zl3vni, zebra_neigh_t *n); static int zl3vni_nh_uninstall(zebra_l3vni_t *zl3vni, zebra_neigh_t *n); @@ -133,10 +133,10 @@ static int zl3vni_nh_uninstall(zebra_l3vni_t *zl3vni, zebra_neigh_t *n); /* l3-vni rmac related APIs */ static void zl3vni_print_rmac_hash(struct hash_bucket *, void *); static zebra_mac_t *zl3vni_rmac_lookup(zebra_l3vni_t *zl3vni, - struct ethaddr *rmac); + const struct ethaddr *rmac); static void *zl3vni_rmac_alloc(void *p); static zebra_mac_t *zl3vni_rmac_add(zebra_l3vni_t *zl3vni, - struct ethaddr *rmac); + const struct ethaddr *rmac); static int zl3vni_rmac_del(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac); static int zl3vni_rmac_install(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac); static int zl3vni_rmac_uninstall(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac); @@ -1515,8 +1515,8 @@ static void zvni_print_mac_hash_all_vni(struct hash_bucket *bucket, void *ctxt) struct mac_walk_ctx *wctx = ctxt; char vni_str[VNI_STR_LEN]; - vty = (struct vty *)wctx->vty; - json = (struct json_object *)wctx->json; + vty = wctx->vty; + json = wctx->json; zvni = (zebra_vni_t *)bucket->data; wctx->zvni = zvni; @@ -1586,8 +1586,8 @@ static void zvni_print_mac_hash_all_vni_detail(struct hash_bucket *bucket, struct mac_walk_ctx *wctx = ctxt; char vni_str[VNI_STR_LEN]; - vty = (struct vty *)wctx->vty; - json = (struct json_object *)wctx->json; + vty = wctx->vty; + json = wctx->json; zvni = (zebra_vni_t *)bucket->data; if (!zvni) { @@ -3059,7 +3059,7 @@ static int zvni_local_neigh_update(zebra_vni_t *zvni, zvrf = vrf_info_lookup(zvni->vxlan_if->vrf_id); if (!zvrf) { if (IS_ZEBRA_DEBUG_VXLAN) - zlog_debug("\tUnable to find vrf for: %d", + zlog_debug(" Unable to find vrf for: %d", zvni->vxlan_if->vrf_id); return -1; } @@ -3094,7 +3094,7 @@ static int zvni_local_neigh_update(zebra_vni_t *zvni, if (!mac_different && is_router == cur_is_router) { if (IS_ZEBRA_DEBUG_VXLAN) zlog_debug( - "\tIgnoring entry mac is the same and is_router == cur_is_router"); + " Ignoring entry mac is the same and is_router == cur_is_router"); n->ifindex = ifp->ifindex; return 0; } @@ -3126,7 +3126,7 @@ static int zvni_local_neigh_update(zebra_vni_t *zvni, else { if (IS_ZEBRA_DEBUG_VXLAN) zlog_debug( - "\tNeighbor active and frozen"); + " Neighbor active and frozen"); } return 0; } @@ -3271,7 +3271,7 @@ static int zvni_local_neigh_update(zebra_vni_t *zvni, n->flags, n->loc_seq); } else { if (IS_ZEBRA_DEBUG_VXLAN) - zlog_debug("\tNeighbor on hold not sending"); + zlog_debug(" Neighbor on hold not sending"); } return 0; } @@ -3691,7 +3691,7 @@ static struct interface *zvni_map_to_svi(vlanid_t vid, struct interface *br_if) if (!zif || zif->zif_type != ZEBRA_IF_VLAN || zif->link != br_if) continue; - vl = (struct zebra_l2info_vlan *)&zif->l2info.vl; + vl = &zif->l2info.vl; if (vl->vid == vid) { found = 1; @@ -4434,7 +4434,7 @@ static void zl3vni_cleanup_all(struct hash_bucket *bucket, void *args) } static void rb_find_or_add_host(struct host_rb_tree_entry *hrbe, - struct prefix *host) + const struct prefix *host) { struct host_rb_entry lookup; struct host_rb_entry *hle; @@ -4473,7 +4473,7 @@ static void rb_delete_host(struct host_rb_tree_entry *hrbe, struct prefix *host) * Look up MAC hash entry. */ static zebra_mac_t *zl3vni_rmac_lookup(zebra_l3vni_t *zl3vni, - struct ethaddr *rmac) + const struct ethaddr *rmac) { zebra_mac_t tmp; zebra_mac_t *pmac; @@ -4502,7 +4502,8 @@ static void *zl3vni_rmac_alloc(void *p) /* * Add RMAC entry to l3-vni */ -static zebra_mac_t *zl3vni_rmac_add(zebra_l3vni_t *zl3vni, struct ethaddr *rmac) +static zebra_mac_t *zl3vni_rmac_add(zebra_l3vni_t *zl3vni, + const struct ethaddr *rmac) { zebra_mac_t tmp_rmac; zebra_mac_t *zrmac = NULL; @@ -4632,9 +4633,10 @@ static int zl3vni_rmac_uninstall(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac) } /* handle rmac add */ -static int zl3vni_remote_rmac_add(zebra_l3vni_t *zl3vni, struct ethaddr *rmac, - struct ipaddr *vtep_ip, - struct prefix *host_prefix) +static int zl3vni_remote_rmac_add(zebra_l3vni_t *zl3vni, + const struct ethaddr *rmac, + const struct ipaddr *vtep_ip, + const struct prefix *host_prefix) { char buf[ETHER_ADDR_STRLEN]; char buf1[INET6_ADDRSTRLEN]; @@ -4709,7 +4711,8 @@ static void zl3vni_remote_rmac_del(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac, /* * Look up nh hash entry on a l3-vni. */ -static zebra_neigh_t *zl3vni_nh_lookup(zebra_l3vni_t *zl3vni, struct ipaddr *ip) +static zebra_neigh_t *zl3vni_nh_lookup(zebra_l3vni_t *zl3vni, + const struct ipaddr *ip) { zebra_neigh_t tmp; zebra_neigh_t *n; @@ -4739,8 +4742,9 @@ static void *zl3vni_nh_alloc(void *p) /* * Add neighbor entry. */ -static zebra_neigh_t *zl3vni_nh_add(zebra_l3vni_t *zl3vni, struct ipaddr *ip, - struct ethaddr *mac) +static zebra_neigh_t *zl3vni_nh_add(zebra_l3vni_t *zl3vni, + const struct ipaddr *ip, + const struct ethaddr *mac) { zebra_neigh_t tmp_n; zebra_neigh_t *n = NULL; @@ -4822,9 +4826,10 @@ static int zl3vni_nh_uninstall(zebra_l3vni_t *zl3vni, zebra_neigh_t *n) } /* add remote vtep as a neigh entry */ -static int zl3vni_remote_nh_add(zebra_l3vni_t *zl3vni, struct ipaddr *vtep_ip, - struct ethaddr *rmac, - struct prefix *host_prefix) +static int zl3vni_remote_nh_add(zebra_l3vni_t *zl3vni, + const struct ipaddr *vtep_ip, + const struct ethaddr *rmac, + const struct prefix *host_prefix) { char buf[ETHER_ADDR_STRLEN]; char buf1[ETHER_ADDR_STRLEN]; @@ -5960,9 +5965,9 @@ int is_l3vni_for_prefix_routes_only(vni_t vni) } /* handle evpn route in vrf table */ -void zebra_vxlan_evpn_vrf_route_add(vrf_id_t vrf_id, struct ethaddr *rmac, - struct ipaddr *vtep_ip, - struct prefix *host_prefix) +void zebra_vxlan_evpn_vrf_route_add(vrf_id_t vrf_id, const struct ethaddr *rmac, + const struct ipaddr *vtep_ip, + const struct prefix *host_prefix) { zebra_l3vni_t *zl3vni = NULL; struct ipaddr ipv4_vtep; @@ -8034,7 +8039,7 @@ int zebra_vxlan_local_mac_add_update(struct interface *ifp, if (!zvni) { if (IS_ZEBRA_DEBUG_VXLAN) zlog_debug( - "\tAdd/Update %sMAC %s intf %s(%u) VID %u, could not find VNI", + " Add/Update %sMAC %s intf %s(%u) VID %u, could not find VNI", sticky ? "sticky " : "", prefix_mac2str(macaddr, buf, sizeof(buf)), ifp->name, ifp->ifindex, vid); @@ -8044,7 +8049,7 @@ int zebra_vxlan_local_mac_add_update(struct interface *ifp, if (!zvni->vxlan_if) { if (IS_ZEBRA_DEBUG_VXLAN) zlog_debug( - "\tVNI %u hash %p doesn't have intf upon local MAC ADD", + " VNI %u hash %p doesn't have intf upon local MAC ADD", zvni->vni, zvni); return -1; } @@ -8052,7 +8057,7 @@ int zebra_vxlan_local_mac_add_update(struct interface *ifp, zvrf = vrf_info_lookup(zvni->vxlan_if->vrf_id); if (!zvrf) { if (IS_ZEBRA_DEBUG_VXLAN) - zlog_debug("\tNo Vrf found for vrf_id: %d", + zlog_debug(" No Vrf found for vrf_id: %d", zvni->vxlan_if->vrf_id); return -1; } @@ -8105,7 +8110,7 @@ int zebra_vxlan_local_mac_add_update(struct interface *ifp, && mac->fwd_info.local.vid == vid) { if (IS_ZEBRA_DEBUG_VXLAN) zlog_debug( - "\tAdd/Update %sMAC %s intf %s(%u) VID %u -> VNI %u, " + " Add/Update %sMAC %s intf %s(%u) VID %u -> VNI %u, " "entry exists and has not changed ", sticky ? "sticky " : "", prefix_mac2str(macaddr, buf, @@ -10254,7 +10259,7 @@ static int zebra_evpn_pim_cfg_clean_up(struct zserv *client) { struct zebra_vrf *zvrf = zebra_vrf_get_evpn(); - if (CHECK_FLAG(zvrf->flags, ZEBRA_PIM_SEND_VXLAN_SG)) { + if (zvrf && CHECK_FLAG(zvrf->flags, ZEBRA_PIM_SEND_VXLAN_SG)) { if (IS_ZEBRA_DEBUG_VXLAN) zlog_debug("VxLAN SG updates to PIM, stop"); UNSET_FLAG(zvrf->flags, ZEBRA_PIM_SEND_VXLAN_SG); diff --git a/zebra/zebra_vxlan.h b/zebra/zebra_vxlan.h index 6ca93f6cb6..a5c13a59e3 100644 --- a/zebra/zebra_vxlan.h +++ b/zebra/zebra_vxlan.h @@ -199,9 +199,9 @@ extern void zebra_vxlan_cleanup_tables(struct zebra_vrf *); extern void zebra_vxlan_init(void); extern void zebra_vxlan_disable(void); extern void zebra_vxlan_evpn_vrf_route_add(vrf_id_t vrf_id, - struct ethaddr *rmac, - struct ipaddr *ip, - struct prefix *host_prefix); + const struct ethaddr *rmac, + const struct ipaddr *ip, + const struct prefix *host_prefix); extern void zebra_vxlan_evpn_vrf_route_del(vrf_id_t vrf_id, struct ipaddr *vtep_ip, struct prefix *host_prefix); diff --git a/zebra/zserv.c b/zebra/zserv.c index 40aa9010c5..7f806d82c3 100644 --- a/zebra/zserv.c +++ b/zebra/zserv.c @@ -858,7 +858,6 @@ void zserv_event(struct zserv *client, enum zserv_event event) #define ZEBRA_TIME_BUF 32 static char *zserv_time_buf(time_t *time1, char *buf, int buflen) { - struct tm tm; time_t now; assert(buf != NULL); @@ -872,17 +871,9 @@ static char *zserv_time_buf(time_t *time1, char *buf, int buflen) now = monotime(NULL); now -= *time1; - gmtime_r(&now, &tm); - - if (now < ONE_DAY_SECOND) - snprintf(buf, buflen, "%02d:%02d:%02d", tm.tm_hour, tm.tm_min, - tm.tm_sec); - else if (now < ONE_WEEK_SECOND) - snprintf(buf, buflen, "%dd%02dh%02dm", tm.tm_yday, tm.tm_hour, - tm.tm_min); - else - snprintf(buf, buflen, "%02dw%dd%02dh", tm.tm_yday / 7, - tm.tm_yday - ((tm.tm_yday / 7) * 7), tm.tm_hour); + + frrtime_to_interval(now, buf, buflen); + return buf; } @@ -1001,8 +992,6 @@ static void zebra_show_stale_client_detail(struct vty *vty, struct zserv *client) { char buf[PREFIX2STR_BUFFER]; - struct tm tm; - struct timeval tv; time_t uptime; struct client_gr_info *info = NULL; struct zserv *s = NULL; @@ -1028,26 +1017,13 @@ static void zebra_show_stale_client_detail(struct vty *vty, if (ZEBRA_CLIENT_GR_ENABLED(info->capabilities)) { if (info->stale_client_ptr) { s = (struct zserv *)(info->stale_client_ptr); - uptime = monotime(&tv); + uptime = monotime(NULL); uptime -= s->restart_time; - gmtime_r(&uptime, &tm); - - vty_out(vty, "Last restart time : "); - if (uptime < ONE_DAY_SECOND) - vty_out(vty, "%02d:%02d:%02d", - tm.tm_hour, tm.tm_min, - tm.tm_sec); - else if (uptime < ONE_WEEK_SECOND) - vty_out(vty, "%dd%02dh%02dm", - tm.tm_yday, tm.tm_hour, - tm.tm_min); - else - vty_out(vty, "%02dw%dd%02dh", - tm.tm_yday / 7, - tm.tm_yday - ((tm.tm_yday / 7) - * 7), - tm.tm_hour); - vty_out(vty, " ago\n"); + + frrtime_to_interval(uptime, buf, sizeof(buf)); + + vty_out(vty, "Last restart time : %s ago\n", + buf); vty_out(vty, "Stalepath removal time: %d sec\n", info->stale_removal_time); diff --git a/zebra/zserv.h b/zebra/zserv.h index 6a075cc9a7..08df664d56 100644 --- a/zebra/zserv.h +++ b/zebra/zserv.h @@ -131,6 +131,9 @@ struct zserv { bool notify_owner; + /* Indicates if client is synchronous. */ + bool synchronous; + /* client's protocol */ uint8_t proto; uint16_t instance; @@ -314,7 +317,7 @@ extern void zserv_read_file(char *input); #endif /* TODO */ -int zebra_finalize(struct thread *event); +__attribute__((__noreturn__)) int zebra_finalize(struct thread *event); /* * Graceful restart functions. |
